mirror of
https://github.com/natelandau/ansible-homelab-config.git
synced 2025-11-17 17:33:41 -05:00
Compare commits
39 Commits
v0.2.0
...
feb1fbedf4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
feb1fbedf4 | ||
|
|
6b00bf557c | ||
|
|
ea9678eba6 | ||
|
|
1714dff877 | ||
|
|
7bde47d43a | ||
|
|
394b34a5d1 | ||
|
|
846fb2bc31 | ||
|
|
eff9059bac | ||
|
|
5e35cf4400 | ||
|
|
7f94a62989 | ||
|
|
95f4c129ba | ||
|
|
9a46bc9ebc | ||
|
|
5b426da3ae | ||
|
|
041649cc5e | ||
|
|
ce0cb6c5f1 | ||
|
|
98d9a5a86f | ||
|
|
f7ba237d0d | ||
|
|
e134616692 | ||
|
|
9194190591 | ||
|
|
2bb55f3d51 | ||
|
|
7365e8b3d6 | ||
|
|
87c2a4e1b4 | ||
|
|
edd9704258 | ||
|
|
cb4a0e9f8a | ||
|
|
57c1a42f66 | ||
|
|
8499f5029b | ||
|
|
47288456a5 | ||
|
|
0f35061a2c | ||
|
|
2842e27282 | ||
|
|
d36212b7d7 | ||
|
|
76f4af703e | ||
|
|
9bb7eeb439 | ||
|
|
5526024244 | ||
|
|
ec52175c5c | ||
|
|
be56f2a308 | ||
|
|
a757ff0cf2 | ||
|
|
d6c155bef1 | ||
|
|
440d570c87 | ||
|
|
049267cec7 |
32
.ansible-lint-ignore
Normal file
32
.ansible-lint-ignore
Normal file
@@ -0,0 +1,32 @@
|
||||
# This file contains ignores rule violations for ansible-lint
|
||||
handlers/main.yml ignore-errors
|
||||
handlers/main.yml name[casing]
|
||||
main.yml name[casing]
|
||||
main.yml name[missing]
|
||||
tasks/backups.yml name[casing]
|
||||
tasks/cluster_storage.yml name[casing]
|
||||
tasks/consul.yml command-instead-of-module
|
||||
tasks/consul.yml name[template]
|
||||
tasks/consul.yml no-changed-when
|
||||
tasks/debug.yml name[casing]
|
||||
tasks/docker.yml name[casing]
|
||||
tasks/docker.yml no-changed-when
|
||||
tasks/interpolated_variables.yml name[casing]
|
||||
tasks/logrotate.yml ignore-errors
|
||||
tasks/logrotate.yml name[casing]
|
||||
tasks/nomad.yml name[casing]
|
||||
tasks/nomad.yml name[template]
|
||||
tasks/orchestration_jobs.yml name[casing]
|
||||
tasks/packages.yml ignore-errors
|
||||
tasks/packages.yml name[casing]
|
||||
tasks/pull_repositories.yml name[casing]
|
||||
tasks/pull_repositories.yml no-changed-when
|
||||
tasks/sanity.yml name[casing]
|
||||
tasks/service_prometheus_nodeExporter.yml name[casing]
|
||||
tasks/service_prometheus_nodeExporter.yml no-changed-when
|
||||
tasks/tdarr.yml name[casing]
|
||||
tasks/tdarr.yml no-changed-when
|
||||
tasks/telegraf.yml name[casing]
|
||||
tasks/telegraf.yml name[template]
|
||||
tasks/telegraf.yml package-latest
|
||||
vault.yml yaml[document-start]
|
||||
@@ -10,9 +10,10 @@ exclude_paths:
|
||||
- galaxy-roles/
|
||||
- .cz.yaml
|
||||
- vault.yml
|
||||
- .venv/
|
||||
- ansible_collections/
|
||||
|
||||
skip_list:
|
||||
- command-instead-of-shell
|
||||
- name[template]
|
||||
- ignore-errors
|
||||
- meta-incorrect
|
||||
@@ -21,10 +22,11 @@ skip_list:
|
||||
- role-name
|
||||
- unnamed-task
|
||||
- var-naming
|
||||
- name[casing]
|
||||
- latest[git]
|
||||
|
||||
warn_list:
|
||||
- experimental
|
||||
- risky-file-permissions
|
||||
- command-instead-of-module
|
||||
- no-changed-when
|
||||
- command-instead-of-shell
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
---
|
||||
repos:
|
||||
- repo: "https://github.com/commitizen-tools/commitizen"
|
||||
rev: v2.40.0
|
||||
rev: v3.13.0
|
||||
hooks:
|
||||
- id: "commitizen"
|
||||
|
||||
- repo: "https://github.com/pre-commit/pre-commit-hooks"
|
||||
rev: v4.4.0
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-ast
|
||||
@@ -31,7 +31,7 @@ repos:
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
|
||||
- repo: "https://github.com/adrienverge/yamllint.git"
|
||||
rev: v1.29.0
|
||||
rev: v1.33.0
|
||||
hooks:
|
||||
- id: yamllint
|
||||
files: \.(yaml|yml)$
|
||||
@@ -43,6 +43,11 @@ repos:
|
||||
)\.(yaml|yml)$
|
||||
entry: yamllint --strict --config-file .yamllint.yml
|
||||
|
||||
- repo: "https://github.com/crate-ci/typos"
|
||||
rev: v1.16.26
|
||||
hooks:
|
||||
- id: typos
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: vault-pre-commit
|
||||
@@ -50,10 +55,14 @@ repos:
|
||||
entry: scripts/ansible-vault-precommit.sh
|
||||
language: system
|
||||
|
||||
# This calls a custom script. Remove if you don't need it.
|
||||
- id: stopwords
|
||||
name: check stopwords
|
||||
entry: scripts/stopwords.sh
|
||||
name: stopwords
|
||||
entry: git-stopwords
|
||||
# args: ["-v"]
|
||||
language: system
|
||||
pass_filenames: true
|
||||
types: [text]
|
||||
|
||||
- id: ansible-lint
|
||||
name: running ansible-lint
|
||||
@@ -68,12 +77,6 @@ repos:
|
||||
files: \.sh\.j2$
|
||||
entry: shellcheck -x --exclude=1009,1054,1056,1072,1073,1083,2001,2148
|
||||
|
||||
- id: "run-shellscripts-bats-tests"
|
||||
name: run bats unit tests
|
||||
language: system
|
||||
files: \.bats$
|
||||
entry: bats -t
|
||||
|
||||
- id: "ansible-encryption-check"
|
||||
name: Ansible Encryption Check
|
||||
language: system
|
||||
|
||||
8
.typos.toml
Normal file
8
.typos.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
[default]
|
||||
default.locale = "en_us"
|
||||
|
||||
[default.extend-words]
|
||||
Hashi = "Hashi" # Hashicorpt
|
||||
|
||||
[files]
|
||||
extend-exclude = ["galaxy-roles/"]
|
||||
15
.vscode/settings.json
vendored
15
.vscode/settings.json
vendored
@@ -1,5 +1,14 @@
|
||||
{
|
||||
"yaml.schemas": {
|
||||
"https://raw.githubusercontent.com/ansible-community/schemas/main/f/ansible.json#/$defs/playbook": "file:///Users/natelandau/repos/ansible-homelab-config/main.yml"
|
||||
}
|
||||
"yaml.schemas": {
|
||||
"https://raw.githubusercontent.com/ansible-community/schemas/main/f/ansible.json#/$defs/playbook": "file:///Users/natelandau/repos/ansible-homelab-config/main.yml"
|
||||
},
|
||||
"ansible.python.interpreterPath": "${workspaceFolder}/.venv/bin/python",
|
||||
"files.associations": {
|
||||
"**/tasks/*.yml": "ansible",
|
||||
"**/handlers/*.yml": "ansible",
|
||||
"main.yml": "ansible",
|
||||
"inventory.yml": "ansible",
|
||||
"default_variables.yml": "ansible",
|
||||
"vault.yml": "ansible"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,14 +41,13 @@ This playbook adds storage, services, applications, and configurations to a prev
|
||||
- Custom shell scripts for backups and house keeping
|
||||
|
||||
* **Syncs Nomad and Docker Compose job files** to servers:
|
||||
- [ASN-to-IP](https://hub.docker.com/r/ddimick/asn-to-ip) - Used by Opnsense to build firewall aliases
|
||||
- [Authelia](https://www.authelia.com/) - Open-source full-featured authentication server
|
||||
- [Changedetection.io](https://github.com/dgtlmoon/changedetection.io) - Website change detection monitoring and notification service
|
||||
- [Diun](https://crazymax.dev/diun/) - Docker Image Update Notifier is a CLI application
|
||||
- [FreshRSS](https://freshrss.org/) - A containerized RSS reader
|
||||
- [Gitea](https://about.gitea.com/) - Slef-hodted Git service
|
||||
- [Grafana](https://grafana.com/) - Operational dashboards
|
||||
- [Grafana Loki](https://grafana.com/oss/loki/) - Log aggregation system
|
||||
- [Headless Trunk](https://github.com/alpeware/chrome-headless-trunk) - Headless Chromium
|
||||
- [iCloud Drive Docker](https://github.com/mandarons/icloud-drive-docker) - Backup files and photos from Apple iCloud
|
||||
- [InfluxDB](https://www.influxdata.com/) - Time series database
|
||||
- [Lidarr](https://lidarr.audio/) - Music collection manager
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[defaults]
|
||||
nocows = True
|
||||
roles_path = ./galaxy-roles:./roles
|
||||
collections_paths = ./
|
||||
collections_path = ./
|
||||
inventory = ./inventory.yml
|
||||
stdout_callback = yaml
|
||||
any_errors_fatal = True
|
||||
@@ -9,4 +9,4 @@ display_skipped_hosts = False
|
||||
vault_password_file = ./.password_file
|
||||
|
||||
[ssh_connection]
|
||||
scp_if_ssh = True
|
||||
transfer_method = smart
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
---
|
||||
# ---------------------------------- SOFTWARE VERSIONS
|
||||
authelia_version: 4.37.3
|
||||
consul_version: 1.14.2
|
||||
influxdb_version: 1.8.10
|
||||
nomad_version: 1.4.3
|
||||
prometheus_verssion: 1.1.2
|
||||
authelia_version: 4.37.5
|
||||
backup_mongodb_version: 1.1.0
|
||||
consul_version: 1.16.1
|
||||
gitea_version: 1.21.3
|
||||
influxdb_version: 1.11.1
|
||||
nomad_version: 1.7.1
|
||||
prometheus_verssion: 2.46.0
|
||||
recyclarr_version: 6.0.2
|
||||
speedtest_cli_version: 1.2.0
|
||||
tdarr_installer_version: 2.00.13
|
||||
telegraf_version: 1.25.0
|
||||
traefik_version: "v2.9.6"
|
||||
telegraf_version: 1.28.4
|
||||
traefik_version: 2.10.7
|
||||
valentina_version: 2.1.0
|
||||
sabnzbd_version: 4.2.1
|
||||
|
||||
# ---------------------------------- SERVICE STATIC PORT MAPPINGS
|
||||
authelia_port: "9091"
|
||||
influxdb_port: "8086"
|
||||
tdarr_node_port: "8267"
|
||||
tdarr_server_port: "8266"
|
||||
@@ -20,6 +24,7 @@ tdarr_webui_port: "8265"
|
||||
# ---------------------------------- DIRECTORIES FOR SERVICE LOCAL STORAGE
|
||||
# These folders must be created, even if empty, to allow mounting nomad local storage end-points
|
||||
service_localfs_dirs:
|
||||
- gitea
|
||||
- influxdb
|
||||
- lidarr
|
||||
- prowlarr
|
||||
@@ -99,6 +104,7 @@ apt_packages_list:
|
||||
- logrotate
|
||||
- lsof
|
||||
- nano
|
||||
- netcat
|
||||
- net-tools
|
||||
- nmap
|
||||
- openssh-server
|
||||
|
||||
@@ -3,80 +3,96 @@
|
||||
- name: Mount shared storage on Mac
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: automount -cv
|
||||
cmd: automount -cv
|
||||
register: automount_output
|
||||
failed_when: automount_output.rc > 0
|
||||
changed_when: automount_output.rc == 0
|
||||
when:
|
||||
- "'macs' in group_names"
|
||||
- not ansible_check_mode
|
||||
- "'macs' in group_names"
|
||||
- not ansible_check_mode
|
||||
listen: "mac_run_automount"
|
||||
|
||||
- name: Mount and unmount shared storage on Mac
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: automount -cvu
|
||||
cmd: automount -cvu
|
||||
register: automount_output
|
||||
failed_when: automount_output.rc > 0
|
||||
changed_when: automount_output.rc == 0
|
||||
when:
|
||||
- "'macs' in group_names"
|
||||
- not ansible_check_mode
|
||||
- "'macs' in group_names"
|
||||
- not ansible_check_mode
|
||||
listen: "mac_run_automount_unmount"
|
||||
|
||||
##################################### TELEGRAF
|
||||
- name: (Re)Start telegraf (Debian)
|
||||
become: true
|
||||
ansible.builtin.service:
|
||||
name: telegraf
|
||||
state: restarted
|
||||
name: telegraf
|
||||
state: restarted
|
||||
register: telegraf_service
|
||||
failed_when: telegraf_service.rc > 0
|
||||
changed_when: telegraf_service.rc == 0
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_os_family == 'Debian'
|
||||
listen: restart_telegraf
|
||||
|
||||
- name: (Re)Start telegraf
|
||||
ansible.builtin.shell:
|
||||
cmd: /usr/local/bin/brew services restart telegraf
|
||||
executable: /usr/local/bin/bash
|
||||
cmd: /usr/local/bin/brew services restart telegraf
|
||||
executable: /usr/local/bin/bash
|
||||
ignore_errors: true
|
||||
register: telegraf_service
|
||||
failed_when: telegraf_service.rc > 0
|
||||
changed_when: telegraf_service.rc == 0
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- ansible_os_family == 'Darwin'
|
||||
listen: restart_telegraf
|
||||
|
||||
##################################### NOMAD
|
||||
|
||||
- name: restart nomad (Debian)
|
||||
- name: Restart nomad (Debian)
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: nomad
|
||||
enabled: true
|
||||
state: restarted
|
||||
name: nomad
|
||||
enabled: true
|
||||
state: restarted
|
||||
register: nomad_service
|
||||
failed_when: nomad_service.rc > 0
|
||||
changed_when: nomad_service.rc == 0
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
- ansible_os_family == 'Debian'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
listen: "restart nomad"
|
||||
|
||||
- name: "unload nomad agent (MacOSX)"
|
||||
- name: "Unload nomad agent (MacOSX)"
|
||||
ansible.builtin.command:
|
||||
cmd: "launchctl unload -w {{ nomad_plist_macos }}"
|
||||
failed_when: false
|
||||
cmd: "launchctl unload -w {{ nomad_plist_macos }}"
|
||||
register: nomad_service
|
||||
changed_when: nomad_service.rc == 0
|
||||
failed_when: nomad_service.rc > 0
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
- ansible_os_family == 'Darwin'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
listen: "restart nomad"
|
||||
|
||||
- name: "load the nomad agent (MacOSX)"
|
||||
- name: "Load the nomad agent (MacOSX)"
|
||||
ansible.builtin.command:
|
||||
cmd: "launchctl load -w {{ nomad_plist_macos }}"
|
||||
cmd: "launchctl load -w {{ nomad_plist_macos }}"
|
||||
register: nomad_service
|
||||
changed_when: nomad_service.rc == 0
|
||||
failed_when: nomad_service.rc > 0
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
- ansible_os_family == 'Darwin'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
listen: "restart nomad"
|
||||
|
||||
- name: "ensure nomad is really running"
|
||||
- name: "Ensure nomad is really running"
|
||||
ansible.builtin.shell:
|
||||
cmd: "sleep 10 && /usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
|
||||
cmd: "set -o pipefail && sleep 10 && /usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
|
||||
register: node_status_response
|
||||
failed_when: node_status_response.rc > 0
|
||||
changed_when: false
|
||||
changed_when: node_status_response.rc == 0
|
||||
when: "'nostart' not in ansible_run_tags"
|
||||
listen: "restart nomad"
|
||||
# - name: "Ensure sure Nomad service is really running"
|
||||
|
||||
@@ -42,7 +42,7 @@ all:
|
||||
pis:
|
||||
hosts:
|
||||
rpi1:
|
||||
ansible_host: 10.0.30.91
|
||||
ansible_host: "{{ rpi1_ip_address }}"
|
||||
ansible_user: "{{ pi_username }}"
|
||||
ansible_become_pass: "{{ pi_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi1"
|
||||
@@ -58,7 +58,7 @@ all:
|
||||
manage_apt_packages_list: true
|
||||
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
|
||||
rpi2:
|
||||
ansible_host: 10.0.30.92
|
||||
ansible_host: "{{ rpi2_ip_address }}"
|
||||
ansible_user: "{{ pi_username }}"
|
||||
ansible_become_pass: "{{ pi_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi2"
|
||||
@@ -72,7 +72,7 @@ all:
|
||||
manage_apt_packages_list: true
|
||||
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
|
||||
rpi3:
|
||||
ansible_host: 10.0.30.93
|
||||
ansible_host: "{{ rpi3_ip_address }}"
|
||||
ansible_user: "{{ pi_username }}"
|
||||
ansible_become_pass: "{{ pi_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi3"
|
||||
@@ -86,7 +86,7 @@ all:
|
||||
manage_apt_packages_list: true
|
||||
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
|
||||
rpi4:
|
||||
ansible_host: 10.0.30.94
|
||||
ansible_host: "{{ rpi4_ip_address }}"
|
||||
ansible_user: "{{ pi_username }}"
|
||||
ansible_become_pass: "{{ pi_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi4"
|
||||
@@ -102,7 +102,7 @@ all:
|
||||
macs:
|
||||
hosts:
|
||||
macmini:
|
||||
ansible_host: 10.0.0.4
|
||||
ansible_host: "{{ macmini_ip_address }}"
|
||||
ansible_user: "{{ my_username }}"
|
||||
ansible_become_pass: "{{ mac_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/macMini"
|
||||
@@ -117,7 +117,7 @@ all:
|
||||
manage_homebrew_package_list: true
|
||||
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
|
||||
imac:
|
||||
ansible_host: 10.0.0.25
|
||||
ansible_host: "{{ imac_ip_address }}"
|
||||
ansible_user: "{{ my_username }}"
|
||||
ansible_become_pass: "{{ mac_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/imac"
|
||||
@@ -129,7 +129,7 @@ all:
|
||||
is_shared_storage_client: true
|
||||
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
|
||||
skimmbook:
|
||||
ansible_host: 10.0.0.21
|
||||
ansible_host: "{{ skimmbook_ip_address }}"
|
||||
ansible_user: "{{ my_username }}"
|
||||
ansible_become_pass: "{{ mac_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/skimmbook"
|
||||
@@ -140,22 +140,11 @@ all:
|
||||
is_tdarr_node: true
|
||||
is_shared_storage_client: true
|
||||
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
|
||||
vpnmac:
|
||||
ansible_host: 10.0.90.2
|
||||
ansible_user: "{{ my_username }}"
|
||||
ansible_become_pass: "{{ mac_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/skimmbook"
|
||||
ansible_python_interpreter: "/Users/natelandau/.pyenv/shims/python"
|
||||
ansible_port: 22
|
||||
mac_arm: true
|
||||
manage_homebrew_package_list: true
|
||||
is_tdarr_node: true
|
||||
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
|
||||
nas:
|
||||
hosts:
|
||||
synology:
|
||||
ansible_host: 10.0.0.6
|
||||
synology_second_ip: 10.0.30.6
|
||||
ansible_host: "{{ synology_ip_address_1 }}"
|
||||
synology_second_ip: "{{ synology_ip_address_2 }}"
|
||||
ansible_user: "{{ my_username }}"
|
||||
ansible_become_pass: "{{ synology_become_pass }}"
|
||||
ansible_ssh_private_key_file: "{{ ssh_key_location }}/synology"
|
||||
|
||||
138
main.yml
138
main.yml
@@ -1,79 +1,79 @@
|
||||
---
|
||||
- hosts: all
|
||||
name: "Running playbook"
|
||||
- name: "Running playbook"
|
||||
hosts: all
|
||||
serial: 1
|
||||
|
||||
vars_files:
|
||||
- default_variables.yml
|
||||
- vault.yml
|
||||
- default_variables.yml
|
||||
- vault.yml
|
||||
|
||||
pre_tasks:
|
||||
- name: Run sanity checks
|
||||
ansible.builtin.import_tasks: tasks/sanity.yml
|
||||
tags: ["always", "sanity"]
|
||||
- name: populate service facts
|
||||
ansible.builtin.service_facts:
|
||||
tags: ["nomad", "consul"]
|
||||
- name: Run debug tasks
|
||||
ansible.builtin.import_tasks: tasks/debug.yml
|
||||
tags: [never, debug]
|
||||
- name: populate device specific variables
|
||||
ansible.builtin.import_tasks: tasks/interpolated_variables.yml
|
||||
tags: ["always"]
|
||||
- name: Ensure we have up-to-date packages
|
||||
ansible.builtin.import_tasks: tasks/packages.yml
|
||||
tags: ["packages", "update"]
|
||||
- name: Set clean nomad_jobs_dir variable
|
||||
ansible.builtin.set_fact:
|
||||
clean_nomad_jobs: true
|
||||
tags: ["never", "clean"]
|
||||
- name: Run sanity checks
|
||||
ansible.builtin.import_tasks: tasks/sanity.yml
|
||||
tags: ["always", "sanity"]
|
||||
- name: Populate service facts
|
||||
ansible.builtin.service_facts:
|
||||
tags: ["nomad", "consul"]
|
||||
- name: Run debug tasks
|
||||
ansible.builtin.import_tasks: tasks/debug.yml
|
||||
tags: [never, debug]
|
||||
- name: Populate device specific variables
|
||||
ansible.builtin.import_tasks: tasks/interpolated_variables.yml
|
||||
tags: ["always"]
|
||||
- name: Ensure we have up-to-date packages
|
||||
ansible.builtin.import_tasks: tasks/packages.yml
|
||||
tags: ["packages", "update"]
|
||||
- name: Set clean nomad_jobs_dir variable
|
||||
ansible.builtin.set_fact:
|
||||
clean_nomad_jobs: true
|
||||
tags: ["never", "clean"]
|
||||
|
||||
tasks:
|
||||
- name: Configure cluster NFS mounts
|
||||
ansible.builtin.import_tasks: tasks/cluster_storage.yml
|
||||
tags: ["storage"]
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or is_shared_storage_client
|
||||
- name: Install Docker
|
||||
ansible.builtin.import_tasks: tasks/docker.yml
|
||||
tags: ["docker"]
|
||||
when: "'nas' not in group_names"
|
||||
- name: Install and Upgrade Consul
|
||||
ansible.builtin.import_tasks: tasks/consul.yml
|
||||
tags: ["consul"]
|
||||
when: is_consul_client or is_consul_server
|
||||
- name: Install and Upgrade Nomad
|
||||
ansible.builtin.import_tasks: tasks/nomad.yml
|
||||
tags: ["nomad"]
|
||||
when: is_nomad_client or is_nomad_server
|
||||
- name: Orchestration Jobs
|
||||
ansible.builtin.import_tasks: tasks/orchestration_jobs.yml
|
||||
tags: ["jobs", "update"]
|
||||
- name: Prometheus Node Exporter
|
||||
ansible.builtin.import_tasks: tasks/service_prometheus_nodeExporter.yml
|
||||
tags: ["prometheus_exporter"]
|
||||
when:
|
||||
- is_prometheus_node
|
||||
- "'pis' in group_names"
|
||||
- name: Install backup scripts
|
||||
ansible.builtin.import_tasks: tasks/backups.yml
|
||||
tags: ["backup", "backups"]
|
||||
when: is_nomad_client or is_nomad_server
|
||||
- name: Install and configure Telegraf
|
||||
ansible.builtin.import_tasks: tasks/telegraf.yml
|
||||
tags: ["telegraf"]
|
||||
when: is_telegraf_client
|
||||
- name: Pull repositories
|
||||
ansible.builtin.import_tasks: tasks/pull_repositories.yml
|
||||
tags: ["never", "update", "repos"]
|
||||
- name: Configure log rotate
|
||||
ansible.builtin.import_tasks: tasks/logrotate.yml
|
||||
tags: ["logrotate"]
|
||||
when: is_cluster_leader
|
||||
- name: Install and configure tdarr
|
||||
ansible.builtin.import_tasks: tasks/tdarr.yml
|
||||
tags: ["tdarr"]
|
||||
when: is_tdarr_server or is_tdarr_node
|
||||
- name: Configure cluster NFS mounts
|
||||
ansible.builtin.import_tasks: tasks/cluster_storage.yml
|
||||
tags: ["storage"]
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or is_shared_storage_client
|
||||
- name: Install Docker
|
||||
ansible.builtin.import_tasks: tasks/docker.yml
|
||||
tags: ["docker"]
|
||||
when: "'nas' not in group_names"
|
||||
- name: Install and Upgrade Consul
|
||||
ansible.builtin.import_tasks: tasks/consul.yml
|
||||
tags: ["consul"]
|
||||
when: is_consul_client or is_consul_server
|
||||
- name: Install and Upgrade Nomad
|
||||
ansible.builtin.import_tasks: tasks/nomad.yml
|
||||
tags: ["nomad"]
|
||||
when: is_nomad_client or is_nomad_server
|
||||
- name: Orchestration Jobs
|
||||
ansible.builtin.import_tasks: tasks/orchestration_jobs.yml
|
||||
tags: ["jobs", "update"]
|
||||
- name: Prometheus Node Exporter
|
||||
ansible.builtin.import_tasks: tasks/service_prometheus_nodeExporter.yml
|
||||
tags: ["prometheus_exporter"]
|
||||
when:
|
||||
- is_prometheus_node
|
||||
- "'pis' in group_names"
|
||||
- name: Install backup scripts
|
||||
ansible.builtin.import_tasks: tasks/backups.yml
|
||||
tags: ["backup", "backups"]
|
||||
when: is_nomad_client or is_nomad_server
|
||||
- name: Install and configure Telegraf
|
||||
ansible.builtin.import_tasks: tasks/telegraf.yml
|
||||
tags: ["telegraf"]
|
||||
when: is_telegraf_client
|
||||
- name: Pull repositories
|
||||
ansible.builtin.import_tasks: tasks/pull_repositories.yml
|
||||
tags: ["never", "update", "repos"]
|
||||
- name: Configure log rotate
|
||||
ansible.builtin.import_tasks: tasks/logrotate.yml
|
||||
tags: ["logrotate"]
|
||||
when: is_cluster_leader
|
||||
- name: Install and configure tdarr
|
||||
ansible.builtin.import_tasks: tasks/tdarr.yml
|
||||
tags: ["tdarr"]
|
||||
when: is_tdarr_server or is_tdarr_node
|
||||
|
||||
handlers:
|
||||
- ansible.builtin.import_tasks: handlers/main.yml
|
||||
- ansible.builtin.import_tasks: handlers/main.yml
|
||||
|
||||
1175
poetry.lock
generated
1175
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
2
poetry.toml
Normal file
2
poetry.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[virtualenvs]
|
||||
in-project = true
|
||||
@@ -7,18 +7,26 @@
|
||||
version = "0.2.0"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
ansible = "^7.2.0"
|
||||
ansible-lint = { version = "^6.12.1", markers = "platform_system != 'Windows'" }
|
||||
ansible = "^8.6.0"
|
||||
ansible-lint = { version = "^6.18.0", markers = "platform_system != 'Windows'" }
|
||||
commitizen = "^2.40.0"
|
||||
poethepoet = "^0.18.1"
|
||||
pre-commit = "^3.0.4"
|
||||
pre-commit = "^3.3.3"
|
||||
python = "^3.9"
|
||||
yamllint = "^1.29.0"
|
||||
yamllint = "^1.32.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
black = "^23.11.0"
|
||||
sh = "^2.0.6"
|
||||
typos = "^1.16.23"
|
||||
|
||||
[build-system]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
requires = ["poetry-core"]
|
||||
|
||||
[tool.black]
|
||||
line-length = 100
|
||||
|
||||
[tool.commitizen]
|
||||
bump_message = "bump(release): v$current_version → v$new_version"
|
||||
tag_format = "v$version"
|
||||
@@ -27,11 +35,18 @@
|
||||
version_files = ["pyproject.toml:version"]
|
||||
|
||||
[tool.poe.tasks]
|
||||
pb = """
|
||||
ansible-playbook
|
||||
--vault-password-file .password_file
|
||||
main.yml
|
||||
-i inventory.yml
|
||||
"""
|
||||
|
||||
[tool.poe.tasks.lint]
|
||||
help = "Run linters"
|
||||
|
||||
[[tool.poe.tasks.lint.sequence]]
|
||||
shell = "yamllint --strict --config-file .yamllint.yml tasks/ handlers/ main.yml inventory.yml default_variables.yml"
|
||||
cmd = "yamllint --strict --config-file .yamllint.yml tasks/ handlers/ main.yml inventory.yml default_variables.yml"
|
||||
|
||||
[[tool.poe.tasks.lint.sequence]]
|
||||
shell = "ansible-lint --force-color --config-file .ansible-lint.yml"
|
||||
cmd = "ansible-lint --force-color --config-file .ansible-lint.yml"
|
||||
|
||||
@@ -1,821 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2317
|
||||
|
||||
_mainScript_() {
|
||||
|
||||
_customStopWords_() {
|
||||
# DESC: Check if any specified stop words are in the commit diff. If found, the pre-commit hook will exit with a non-zero exit code.
|
||||
# ARGS:
|
||||
# $1 (Required): Path to file
|
||||
# OUTS:
|
||||
# 0: Success
|
||||
# 1: Failure
|
||||
# USAGE:
|
||||
# _customStopWords_ "/path/to/file.sh"
|
||||
# NOTE:
|
||||
# Requires a plaintext stopword file located at
|
||||
# `~/.git_stop_words` containing one stopword per line.
|
||||
|
||||
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
|
||||
|
||||
local _gitDiffTmp
|
||||
local FILE_TO_CHECK="${1}"
|
||||
|
||||
_gitDiffTmp="${TMP_DIR}/${RANDOM}.${RANDOM}.${RANDOM}.diff.txt"
|
||||
|
||||
if [ -f "${STOP_WORD_FILE}" ]; then
|
||||
|
||||
if [[ $(basename "${STOP_WORD_FILE}") == "$(basename "${FILE_TO_CHECK}")" ]]; then
|
||||
debug "$(basename "${1}"): Don't check stop words file for stop words."
|
||||
return 0
|
||||
fi
|
||||
debug "$(basename "${FILE_TO_CHECK}"): Checking for stop words..."
|
||||
|
||||
# remove blank lines from stopwords file
|
||||
sed '/^$/d' "${STOP_WORD_FILE}" >"${TMP_DIR}/pattern_file.txt"
|
||||
|
||||
# Check for stopwords
|
||||
if git diff --cached -- "${FILE_TO_CHECK}" | grep –i -q "new file mode"; then
|
||||
if grep -i --file="${TMP_DIR}/pattern_file.txt" "${FILE_TO_CHECK}"; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
else
|
||||
# Add diff to a temporary file
|
||||
git diff --cached -- "${FILE_TO_CHECK}" | grep '^+' >"${_gitDiffTmp}"
|
||||
if grep -i --file="${TMP_DIR}/pattern_file.txt" "${_gitDiffTmp}"; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
else
|
||||
|
||||
notice "Could not find git stopwords file expected at '${STOP_WORD_FILE}'. Continuing..."
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Don;t lint binary files
|
||||
if [[ ${ARGS[0]} =~ \.(jpg|jpeg|gif|png|exe|zip|gzip|tiff|tar|dmg|ttf|otf|m4a|mp3|mkv|mov|avi|eot|svg|woff2?|aac|wav|flac|pdf|doc|xls|ppt|7z|bin|dmg|dat|sql|ico|mpe?g)$ ]]; then
|
||||
_safeExit_ 0
|
||||
fi
|
||||
|
||||
if ! _customStopWords_ "${ARGS[0]}"; then
|
||||
error "Stop words found in ${ARGS[0]}"
|
||||
_safeExit_ 1
|
||||
fi
|
||||
}
|
||||
# end _mainScript_
|
||||
|
||||
# ################################## Flags and defaults
|
||||
# Required variables
|
||||
LOGFILE="${HOME}/logs/$(basename "$0").log"
|
||||
QUIET=false
|
||||
LOGLEVEL=ERROR
|
||||
VERBOSE=false
|
||||
FORCE=false
|
||||
DRYRUN=false
|
||||
declare -a ARGS=()
|
||||
|
||||
# Script specific
|
||||
LOGLEVEL=NONE
|
||||
STOP_WORD_FILE="${HOME}/.git_stop_words"
|
||||
shopt -s nocasematch
|
||||
# ################################## Custom utility functions (Pasted from repository)
|
||||
|
||||
# ################################## Functions required for this template to work
|
||||
|
||||
_setColors_() {
|
||||
# DESC:
|
||||
# Sets colors use for alerts.
|
||||
# ARGS:
|
||||
# None
|
||||
# OUTS:
|
||||
# None
|
||||
# USAGE:
|
||||
# printf "%s\n" "${blue}Some text${reset}"
|
||||
|
||||
if tput setaf 1 >/dev/null 2>&1; then
|
||||
bold=$(tput bold)
|
||||
underline=$(tput smul)
|
||||
reverse=$(tput rev)
|
||||
reset=$(tput sgr0)
|
||||
|
||||
if [[ $(tput colors) -ge 256 ]] >/dev/null 2>&1; then
|
||||
white=$(tput setaf 231)
|
||||
blue=$(tput setaf 38)
|
||||
yellow=$(tput setaf 11)
|
||||
green=$(tput setaf 82)
|
||||
red=$(tput setaf 9)
|
||||
purple=$(tput setaf 171)
|
||||
gray=$(tput setaf 250)
|
||||
else
|
||||
white=$(tput setaf 7)
|
||||
blue=$(tput setaf 38)
|
||||
yellow=$(tput setaf 3)
|
||||
green=$(tput setaf 2)
|
||||
red=$(tput setaf 9)
|
||||
purple=$(tput setaf 13)
|
||||
gray=$(tput setaf 7)
|
||||
fi
|
||||
else
|
||||
bold="\033[4;37m"
|
||||
reset="\033[0m"
|
||||
underline="\033[4;37m"
|
||||
# shellcheck disable=SC2034
|
||||
reverse=""
|
||||
white="\033[0;37m"
|
||||
blue="\033[0;34m"
|
||||
yellow="\033[0;33m"
|
||||
green="\033[1;32m"
|
||||
red="\033[0;31m"
|
||||
purple="\033[0;35m"
|
||||
gray="\033[0;37m"
|
||||
fi
|
||||
}
|
||||
|
||||
_alert_() {
|
||||
# DESC:
|
||||
# Controls all printing of messages to log files and stdout.
|
||||
# ARGS:
|
||||
# $1 (required) - The type of alert to print
|
||||
# (success, header, notice, dryrun, debug, warning, error,
|
||||
# fatal, info, input)
|
||||
# $2 (required) - The message to be printed to stdout and/or a log file
|
||||
# $3 (optional) - Pass '${LINENO}' to print the line number where the _alert_ was triggered
|
||||
# OUTS:
|
||||
# stdout: The message is printed to stdout
|
||||
# log file: The message is printed to a log file
|
||||
# USAGE:
|
||||
# [_alertType] "[MESSAGE]" "${LINENO}"
|
||||
# NOTES:
|
||||
# - The colors of each alert type are set in this function
|
||||
# - For specified alert types, the funcstac will be printed
|
||||
|
||||
local _color
|
||||
local _alertType="${1}"
|
||||
local _message="${2}"
|
||||
local _line="${3-}" # Optional line number
|
||||
|
||||
[[ $# -lt 2 ]] && fatal 'Missing required argument to _alert_'
|
||||
|
||||
if [[ -n ${_line} && ${_alertType} =~ ^fatal && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
|
||||
_message="${_message} ${gray}(line: ${_line}) $(_printFuncStack_)"
|
||||
elif [[ -n ${_line} && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
|
||||
_message="${_message} ${gray}(line: ${_line})"
|
||||
elif [[ -z ${_line} && ${_alertType} =~ ^fatal && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
|
||||
_message="${_message} ${gray}$(_printFuncStack_)"
|
||||
fi
|
||||
|
||||
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
|
||||
_color="${bold}${red}"
|
||||
elif [ "${_alertType}" == "info" ]; then
|
||||
_color="${gray}"
|
||||
elif [ "${_alertType}" == "warning" ]; then
|
||||
_color="${red}"
|
||||
elif [ "${_alertType}" == "success" ]; then
|
||||
_color="${green}"
|
||||
elif [ "${_alertType}" == "debug" ]; then
|
||||
_color="${purple}"
|
||||
elif [ "${_alertType}" == "header" ]; then
|
||||
_color="${bold}${white}${underline}"
|
||||
elif [ "${_alertType}" == "notice" ]; then
|
||||
_color="${bold}"
|
||||
elif [ "${_alertType}" == "input" ]; then
|
||||
_color="${bold}${underline}"
|
||||
elif [ "${_alertType}" = "dryrun" ]; then
|
||||
_color="${blue}"
|
||||
else
|
||||
_color=""
|
||||
fi
|
||||
|
||||
_writeToScreen_() {
|
||||
("${QUIET}") && return 0 # Print to console when script is not 'quiet'
|
||||
[[ ${VERBOSE} == false && ${_alertType} =~ ^(debug|verbose) ]] && return 0
|
||||
|
||||
if ! [[ -t 1 || -z ${TERM-} ]]; then # Don't use colors on non-recognized terminals
|
||||
_color=""
|
||||
reset=""
|
||||
fi
|
||||
|
||||
if [[ ${_alertType} == header ]]; then
|
||||
printf "${_color}%s${reset}\n" "${_message}"
|
||||
else
|
||||
printf "${_color}[%7s] %s${reset}\n" "${_alertType}" "${_message}"
|
||||
fi
|
||||
}
|
||||
_writeToScreen_
|
||||
|
||||
_writeToLog_() {
|
||||
[[ ${_alertType} == "input" ]] && return 0
|
||||
[[ ${LOGLEVEL} =~ (off|OFF|Off) ]] && return 0
|
||||
if [ -z "${LOGFILE-}" ]; then
|
||||
LOGFILE="$(pwd)/$(basename "$0").log"
|
||||
fi
|
||||
[ ! -d "$(dirname "${LOGFILE}")" ] && mkdir -p "$(dirname "${LOGFILE}")"
|
||||
[[ ! -f ${LOGFILE} ]] && touch "${LOGFILE}"
|
||||
|
||||
# Don't use colors in logs
|
||||
local _cleanmessage
|
||||
_cleanmessage="$(printf "%s" "${_message}" | sed -E 's/(\x1b)?\[(([0-9]{1,2})(;[0-9]{1,3}){0,2})?[mGK]//g')"
|
||||
# Print message to log file
|
||||
printf "%s [%7s] %s %s\n" "$(date +"%b %d %R:%S")" "${_alertType}" "[$(/bin/hostname)]" "${_cleanmessage}" >>"${LOGFILE}"
|
||||
}
|
||||
|
||||
# Write specified log level data to logfile
|
||||
case "${LOGLEVEL:-ERROR}" in
|
||||
ALL | all | All)
|
||||
_writeToLog_
|
||||
;;
|
||||
DEBUG | debug | Debug)
|
||||
_writeToLog_
|
||||
;;
|
||||
INFO | info | Info)
|
||||
if [[ ${_alertType} =~ ^(error|fatal|warning|info|notice|success) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
NOTICE | notice | Notice)
|
||||
if [[ ${_alertType} =~ ^(error|fatal|warning|notice|success) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
WARN | warn | Warn)
|
||||
if [[ ${_alertType} =~ ^(error|fatal|warning) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
ERROR | error | Error)
|
||||
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
FATAL | fatal | Fatal)
|
||||
if [[ ${_alertType} =~ ^fatal ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
OFF | off)
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
|
||||
_writeToLog_
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
} # /_alert_
|
||||
|
||||
error() { _alert_ error "${1}" "${2-}"; }
|
||||
warning() { _alert_ warning "${1}" "${2-}"; }
|
||||
notice() { _alert_ notice "${1}" "${2-}"; }
|
||||
info() { _alert_ info "${1}" "${2-}"; }
|
||||
success() { _alert_ success "${1}" "${2-}"; }
|
||||
dryrun() { _alert_ dryrun "${1}" "${2-}"; }
|
||||
input() { _alert_ input "${1}" "${2-}"; }
|
||||
header() { _alert_ header "${1}" "${2-}"; }
|
||||
debug() { _alert_ debug "${1}" "${2-}"; }
|
||||
fatal() {
|
||||
_alert_ fatal "${1}" "${2-}"
|
||||
_safeExit_ "1"
|
||||
}
|
||||
|
||||
_printFuncStack_() {
|
||||
# DESC:
|
||||
# Prints the function stack in use. Used for debugging, and error reporting.
|
||||
# ARGS:
|
||||
# None
|
||||
# OUTS:
|
||||
# stdout: Prints [function]:[file]:[line]
|
||||
# NOTE:
|
||||
# Does not print functions from the alert class
|
||||
local _i
|
||||
declare -a _funcStackResponse=()
|
||||
for ((_i = 1; _i < ${#BASH_SOURCE[@]}; _i++)); do
|
||||
case "${FUNCNAME[${_i}]}" in
|
||||
_alert_ | _trapCleanup_ | fatal | error | warning | notice | info | debug | dryrun | header | success)
|
||||
continue
|
||||
;;
|
||||
*)
|
||||
_funcStackResponse+=("${FUNCNAME[${_i}]}:$(basename "${BASH_SOURCE[${_i}]}"):${BASH_LINENO[_i - 1]}")
|
||||
;;
|
||||
esac
|
||||
|
||||
done
|
||||
printf "( "
|
||||
printf %s "${_funcStackResponse[0]}"
|
||||
printf ' < %s' "${_funcStackResponse[@]:1}"
|
||||
printf ' )\n'
|
||||
}
|
||||
|
||||
_safeExit_() {
|
||||
# DESC:
|
||||
# Cleanup and exit from a script
|
||||
# ARGS:
|
||||
# $1 (optional) - Exit code (defaults to 0)
|
||||
# OUTS:
|
||||
# None
|
||||
|
||||
if [[ -d ${SCRIPT_LOCK-} ]]; then
|
||||
if command rm -rf "${SCRIPT_LOCK}"; then
|
||||
debug "Removing script lock"
|
||||
else
|
||||
warning "Script lock could not be removed. Try manually deleting ${yellow}'${SCRIPT_LOCK}'"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n ${TMP_DIR-} && -d ${TMP_DIR-} ]]; then
|
||||
if [[ ${1-} == 1 && -n "$(ls "${TMP_DIR}")" ]]; then
|
||||
command rm -r "${TMP_DIR}"
|
||||
else
|
||||
command rm -r "${TMP_DIR}"
|
||||
debug "Removing temp directory"
|
||||
fi
|
||||
fi
|
||||
|
||||
trap - INT TERM EXIT
|
||||
exit "${1:-0}"
|
||||
}
|
||||
|
||||
_trapCleanup_() {
|
||||
# DESC:
|
||||
# Log errors and cleanup from script when an error is trapped. Called by 'trap'
|
||||
# ARGS:
|
||||
# $1: Line number where error was trapped
|
||||
# $2: Line number in function
|
||||
# $3: Command executing at the time of the trap
|
||||
# $4: Names of all shell functions currently in the execution call stack
|
||||
# $5: Scriptname
|
||||
# $6: $BASH_SOURCE
|
||||
# USAGE:
|
||||
# trap '_trapCleanup_ ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${FUNCNAME[*]}" "${0}" "${BASH_SOURCE[0]}"' EXIT INT TERM SIGINT SIGQUIT SIGTERM ERR
|
||||
# OUTS:
|
||||
# Exits script with error code 1
|
||||
|
||||
local _line=${1-} # LINENO
|
||||
local _linecallfunc=${2-}
|
||||
local _command="${3-}"
|
||||
local _funcstack="${4-}"
|
||||
local _script="${5-}"
|
||||
local _sourced="${6-}"
|
||||
|
||||
# Replace the cursor in-case 'tput civis' has been used
|
||||
tput cnorm
|
||||
|
||||
if declare -f "fatal" &>/dev/null && declare -f "_printFuncStack_" &>/dev/null; then
|
||||
|
||||
_funcstack="'$(printf "%s" "${_funcstack}" | sed -E 's/ / < /g')'"
|
||||
|
||||
if [[ ${_script##*/} == "${_sourced##*/}" ]]; then
|
||||
fatal "${7-} command: '${_command}' (line: ${_line}) [func: $(_printFuncStack_)]"
|
||||
else
|
||||
fatal "${7-} command: '${_command}' (func: ${_funcstack} called at line ${_linecallfunc} of '${_script##*/}') (line: ${_line} of '${_sourced##*/}') "
|
||||
fi
|
||||
else
|
||||
printf "%s\n" "Fatal error trapped. Exiting..."
|
||||
fi
|
||||
|
||||
if declare -f _safeExit_ &>/dev/null; then
|
||||
_safeExit_ 1
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
_makeTempDir_() {
|
||||
# DESC:
|
||||
# Creates a temp directory to house temporary files
|
||||
# ARGS:
|
||||
# $1 (Optional) - First characters/word of directory name
|
||||
# OUTS:
|
||||
# Sets $TMP_DIR variable to the path of the temp directory
|
||||
# USAGE:
|
||||
# _makeTempDir_ "$(basename "$0")"
|
||||
|
||||
[ -d "${TMP_DIR-}" ] && return 0
|
||||
|
||||
if [ -n "${1-}" ]; then
|
||||
TMP_DIR="${TMPDIR:-/tmp/}${1}.${RANDOM}.${RANDOM}.$$"
|
||||
else
|
||||
TMP_DIR="${TMPDIR:-/tmp/}$(basename "$0").${RANDOM}.${RANDOM}.${RANDOM}.$$"
|
||||
fi
|
||||
(umask 077 && mkdir "${TMP_DIR}") || {
|
||||
fatal "Could not create temporary directory! Exiting."
|
||||
}
|
||||
debug "\$TMP_DIR=${TMP_DIR}"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
_acquireScriptLock_() {
|
||||
# DESC:
|
||||
# Acquire script lock to prevent running the same script a second time before the
|
||||
# first instance exits
|
||||
# ARGS:
|
||||
# $1 (optional) - Scope of script execution lock (system or user)
|
||||
# OUTS:
|
||||
# exports $SCRIPT_LOCK - Path to the directory indicating we have the script lock
|
||||
# Exits script if lock cannot be acquired
|
||||
# NOTE:
|
||||
# If the lock was acquired it's automatically released in _safeExit_()
|
||||
|
||||
local _lockDir
|
||||
if [[ ${1-} == 'system' ]]; then
|
||||
_lockDir="${TMPDIR:-/tmp/}$(basename "$0").lock"
|
||||
else
|
||||
_lockDir="${TMPDIR:-/tmp/}$(basename "$0").${UID}.lock"
|
||||
fi
|
||||
|
||||
if command mkdir "${_lockDir}" 2>/dev/null; then
|
||||
readonly SCRIPT_LOCK="${_lockDir}"
|
||||
debug "Acquired script lock: ${yellow}${SCRIPT_LOCK}${purple}"
|
||||
else
|
||||
if declare -f "_safeExit_" &>/dev/null; then
|
||||
error "Unable to acquire script lock: ${yellow}${_lockDir}${red}"
|
||||
fatal "If you trust the script isn't running, delete the lock dir"
|
||||
else
|
||||
printf "%s\n" "ERROR: Could not acquire script lock. If you trust the script isn't running, delete: ${_lockDir}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
_setPATH_() {
|
||||
# DESC:
|
||||
# Add directories to $PATH so script can find executables
|
||||
# ARGS:
|
||||
# $@ - One or more paths
|
||||
# OPTS:
|
||||
# -x - Fail if directories are not found
|
||||
# OUTS:
|
||||
# 0: Success
|
||||
# 1: Failure
|
||||
# Adds items to $PATH
|
||||
# USAGE:
|
||||
# _setPATH_ "/usr/local/bin" "${HOME}/bin" "$(npm bin)"
|
||||
|
||||
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
|
||||
|
||||
local opt
|
||||
local OPTIND=1
|
||||
local _failIfNotFound=false
|
||||
|
||||
while getopts ":xX" opt; do
|
||||
case ${opt} in
|
||||
x | X) _failIfNotFound=true ;;
|
||||
*)
|
||||
{
|
||||
error "Unrecognized option '${1}' passed to _backupFile_" "${LINENO}"
|
||||
return 1
|
||||
}
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
local _newPath
|
||||
|
||||
for _newPath in "$@"; do
|
||||
if [ -d "${_newPath}" ]; then
|
||||
if ! printf "%s" "${PATH}" | grep -Eq "(^|:)${_newPath}($|:)"; then
|
||||
if PATH="${_newPath}:${PATH}"; then
|
||||
debug "Added '${_newPath}' to PATH"
|
||||
else
|
||||
debug "'${_newPath}' already in PATH"
|
||||
fi
|
||||
else
|
||||
debug "_setPATH_: '${_newPath}' already exists in PATH"
|
||||
fi
|
||||
else
|
||||
debug "_setPATH_: can not find: ${_newPath}"
|
||||
if [[ ${_failIfNotFound} == true ]]; then
|
||||
return 1
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
_useGNUutils_() {
|
||||
# DESC:
|
||||
# Add GNU utilities to PATH to allow consistent use of sed/grep/tar/etc. on MacOS
|
||||
# ARGS:
|
||||
# None
|
||||
# OUTS:
|
||||
# 0 if successful
|
||||
# 1 if unsuccessful
|
||||
# PATH: Adds GNU utilities to the path
|
||||
# USAGE:
|
||||
# # if ! _useGNUUtils_; then exit 1; fi
|
||||
# NOTES:
|
||||
# GNU utilities can be added to MacOS using Homebrew
|
||||
|
||||
! declare -f "_setPATH_" &>/dev/null && fatal "${FUNCNAME[0]} needs function _setPATH_"
|
||||
|
||||
if _setPATH_ \
|
||||
"/usr/local/opt/gnu-tar/libexec/gnubin" \
|
||||
"/usr/local/opt/coreutils/libexec/gnubin" \
|
||||
"/usr/local/opt/gnu-sed/libexec/gnubin" \
|
||||
"/usr/local/opt/grep/libexec/gnubin" \
|
||||
"/usr/local/opt/findutils/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/findutils/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/gnu-sed/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/grep/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/coreutils/libexec/gnubin" \
|
||||
"/opt/homebrew/opt/gnu-tar/libexec/gnubin"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
_homebrewPath_() {
|
||||
# DESC:
|
||||
# Add homebrew bin dir to PATH
|
||||
# ARGS:
|
||||
# None
|
||||
# OUTS:
|
||||
# 0 if successful
|
||||
# 1 if unsuccessful
|
||||
# PATH: Adds homebrew bin directory to PATH
|
||||
# USAGE:
|
||||
# # if ! _homebrewPath_; then exit 1; fi
|
||||
|
||||
! declare -f "_setPATH_" &>/dev/null && fatal "${FUNCNAME[0]} needs function _setPATH_"
|
||||
|
||||
if _uname=$(command -v uname); then
|
||||
if "${_uname}" | tr '[:upper:]' '[:lower:]' | grep -q 'darwin'; then
|
||||
if _setPATH_ "/usr/local/bin" "/opt/homebrew/bin"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if _setPATH_ "/usr/local/bin" "/opt/homebrew/bin"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
_parseOptions_() {
|
||||
# DESC:
|
||||
# Iterates through options passed to script and sets variables. Will break -ab into -a -b
|
||||
# when needed and --foo=bar into --foo bar
|
||||
# ARGS:
|
||||
# $@ from command line
|
||||
# OUTS:
|
||||
# Sets array 'ARGS' containing all arguments passed to script that were not parsed as options
|
||||
# USAGE:
|
||||
# _parseOptions_ "$@"
|
||||
|
||||
# Iterate over options
|
||||
local _optstring=h
|
||||
declare -a _options
|
||||
local _c
|
||||
local i
|
||||
while (($#)); do
|
||||
case $1 in
|
||||
# If option is of type -ab
|
||||
-[!-]?*)
|
||||
# Loop over each character starting with the second
|
||||
for ((i = 1; i < ${#1}; i++)); do
|
||||
_c=${1:i:1}
|
||||
_options+=("-${_c}") # Add current char to options
|
||||
# If option takes a required argument, and it's not the last char make
|
||||
# the rest of the string its argument
|
||||
if [[ ${_optstring} == *"${_c}:"* && -n ${1:i+1} ]]; then
|
||||
_options+=("${1:i+1}")
|
||||
break
|
||||
fi
|
||||
done
|
||||
;;
|
||||
# If option is of type --foo=bar
|
||||
--?*=*) _options+=("${1%%=*}" "${1#*=}") ;;
|
||||
# add --endopts for --
|
||||
--) _options+=(--endopts) ;;
|
||||
# Otherwise, nothing special
|
||||
*) _options+=("$1") ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
set -- "${_options[@]-}"
|
||||
unset _options
|
||||
|
||||
# Read the options and set stuff
|
||||
# shellcheck disable=SC2034
|
||||
while [[ ${1-} == -?* ]]; do
|
||||
case $1 in
|
||||
# Custom options
|
||||
|
||||
# Common options
|
||||
-h | --help)
|
||||
_usage_
|
||||
_safeExit_
|
||||
;;
|
||||
--loglevel)
|
||||
shift
|
||||
LOGLEVEL=${1}
|
||||
;;
|
||||
--logfile)
|
||||
shift
|
||||
LOGFILE="${1}"
|
||||
;;
|
||||
-n | --dryrun) DRYRUN=true ;;
|
||||
-v | --verbose) VERBOSE=true ;;
|
||||
-q | --quiet) QUIET=true ;;
|
||||
--force) FORCE=true ;;
|
||||
--endopts)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
if declare -f _safeExit_ &>/dev/null; then
|
||||
fatal "invalid option: $1"
|
||||
else
|
||||
printf "%s\n" "ERROR: Invalid option: $1"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [[ -z ${*} || ${*} == null ]]; then
|
||||
ARGS=()
|
||||
else
|
||||
ARGS+=("$@") # Store the remaining user input as arguments.
|
||||
fi
|
||||
}
|
||||
|
||||
_columns_() {
|
||||
# DESC:
|
||||
# Prints a two column output from a key/value pair.
|
||||
# Optionally pass a number of 2 space tabs to indent the output.
|
||||
# ARGS:
|
||||
# $1 (required): Key name (Left column text)
|
||||
# $2 (required): Long value (Right column text. Wraps around if too long)
|
||||
# $3 (optional): Number of 2 character tabs to indent the command (default 1)
|
||||
# OPTS:
|
||||
# -b Bold the left column
|
||||
# -u Underline the left column
|
||||
# -r Reverse background and foreground colors
|
||||
# OUTS:
|
||||
# stdout: Prints the output in columns
|
||||
# NOTE:
|
||||
# Long text or ANSI colors in the first column may create display issues
|
||||
# USAGE:
|
||||
# _columns_ "Key" "Long value text" [tab level]
|
||||
|
||||
[[ $# -lt 2 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
|
||||
|
||||
local opt
|
||||
local OPTIND=1
|
||||
local _style=""
|
||||
while getopts ":bBuUrR" opt; do
|
||||
case ${opt} in
|
||||
b | B) _style="${_style}${bold}" ;;
|
||||
u | U) _style="${_style}${underline}" ;;
|
||||
r | R) _style="${_style}${reverse}" ;;
|
||||
*) fatal "Unrecognized option '${1}' passed to ${FUNCNAME[0]}. Exiting." ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
local _key="${1}"
|
||||
local _value="${2}"
|
||||
local _tabLevel="${3-}"
|
||||
local _tabSize=2
|
||||
local _line
|
||||
local _rightIndent
|
||||
local _leftIndent
|
||||
if [[ -z ${3-} ]]; then
|
||||
_tabLevel=0
|
||||
fi
|
||||
|
||||
_leftIndent="$((_tabLevel * _tabSize))"
|
||||
|
||||
local _leftColumnWidth="$((30 + _leftIndent))"
|
||||
|
||||
if [ "$(tput cols)" -gt 180 ]; then
|
||||
_rightIndent=110
|
||||
elif [ "$(tput cols)" -gt 160 ]; then
|
||||
_rightIndent=90
|
||||
elif [ "$(tput cols)" -gt 130 ]; then
|
||||
_rightIndent=60
|
||||
elif [ "$(tput cols)" -gt 120 ]; then
|
||||
_rightIndent=50
|
||||
elif [ "$(tput cols)" -gt 110 ]; then
|
||||
_rightIndent=40
|
||||
elif [ "$(tput cols)" -gt 100 ]; then
|
||||
_rightIndent=30
|
||||
elif [ "$(tput cols)" -gt 90 ]; then
|
||||
_rightIndent=20
|
||||
elif [ "$(tput cols)" -gt 80 ]; then
|
||||
_rightIndent=10
|
||||
else
|
||||
_rightIndent=0
|
||||
fi
|
||||
|
||||
local _rightWrapLength=$(($(tput cols) - _leftColumnWidth - _leftIndent - _rightIndent))
|
||||
|
||||
local _first_line=0
|
||||
while read -r _line; do
|
||||
if [[ ${_first_line} -eq 0 ]]; then
|
||||
_first_line=1
|
||||
else
|
||||
_key=" "
|
||||
fi
|
||||
printf "%-${_leftIndent}s${_style}%-${_leftColumnWidth}b${reset} %b\n" "" "${_key}${reset}" "${_line}"
|
||||
done <<<"$(fold -w${_rightWrapLength} -s <<<"${_value}")"
|
||||
}
|
||||
|
||||
_usage_() {
|
||||
cat <<USAGE_TEXT
|
||||
|
||||
${bold}$(basename "$0") [OPTION]... [FILE]...${reset}
|
||||
|
||||
Custom pre-commit hook script. This script is intended to be used as part of the pre-commit pipeline managed within .pre-commit-config.yaml.
|
||||
|
||||
${bold}${underline}Options:${reset}
|
||||
$(_columns_ -b -- '-h, --help' "Display this help and exit" 2)
|
||||
$(_columns_ -b -- "--loglevel [LEVEL]" "One of: FATAL, ERROR (default), WARN, INFO, NOTICE, DEBUG, ALL, OFF" 2)
|
||||
$(_columns_ -b -- "--logfile [FILE]" "Full PATH to logfile. (Default is '\${HOME}/logs/$(basename "$0").log')" 2)
|
||||
$(_columns_ -b -- "-n, --dryrun" "Non-destructive. Makes no permanent changes." 2)
|
||||
$(_columns_ -b -- "-q, --quiet" "Quiet (no output)" 2)
|
||||
$(_columns_ -b -- "-v, --verbose" "Output more information. (Items echoed to 'verbose')" 2)
|
||||
$(_columns_ -b -- "--force" "Skip all user interaction. Implied 'Yes' to all actions." 2)
|
||||
|
||||
${bold}${underline}Example Usage:${reset}
|
||||
|
||||
${gray}# Run the script and specify log level and log file.${reset}
|
||||
$(basename "$0") -vn --logfile "/path/to/file.log" --loglevel 'WARN'
|
||||
USAGE_TEXT
|
||||
}
|
||||
|
||||
# ################################## INITIALIZE AND RUN THE SCRIPT
|
||||
# (Comment or uncomment the lines below to customize script behavior)
|
||||
|
||||
trap '_trapCleanup_ ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${FUNCNAME[*]}" "${0}" "${BASH_SOURCE[0]}"' EXIT INT TERM SIGINT SIGQUIT SIGTERM
|
||||
|
||||
# Trap errors in subshells and functions
|
||||
set -o errtrace
|
||||
|
||||
# Exit on error. Append '||true' if you expect an error
|
||||
set -o errexit
|
||||
|
||||
# Use last non-zero exit code in a pipeline
|
||||
set -o pipefail
|
||||
|
||||
# Confirm we have BASH greater than v4
|
||||
[ "${BASH_VERSINFO:-0}" -ge 4 ] || {
|
||||
printf "%s\n" "ERROR: BASH_VERSINFO is '${BASH_VERSINFO:-0}'. This script requires BASH v4 or greater."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Make `for f in *.txt` work when `*.txt` matches zero files
|
||||
shopt -s nullglob globstar
|
||||
|
||||
# Set IFS to preferred implementation
|
||||
IFS=$' \n\t'
|
||||
|
||||
# Run in debug mode
|
||||
# set -o xtrace
|
||||
|
||||
# Initialize color constants
|
||||
_setColors_
|
||||
|
||||
# Disallow expansion of unset variables
|
||||
set -o nounset
|
||||
|
||||
# Force arguments when invoking the script
|
||||
# [[ $# -eq 0 ]] && _parseOptions_ "-h"
|
||||
|
||||
# Parse arguments passed to script
|
||||
_parseOptions_ "$@"
|
||||
|
||||
# Create a temp directory '$TMP_DIR'
|
||||
_makeTempDir_ "$(basename "$0")"
|
||||
|
||||
# Acquire script lock
|
||||
# _acquireScriptLock_
|
||||
|
||||
# Add Homebrew bin directory to PATH (MacOS)
|
||||
# _homebrewPath_
|
||||
|
||||
# Source GNU utilities from Homebrew (MacOS)
|
||||
# _useGNUutils_
|
||||
|
||||
# Run the main logic script
|
||||
_mainScript_
|
||||
|
||||
# Exit cleanly
|
||||
_safeExit_
|
||||
150
scripts/update_dependencies.py
Executable file
150
scripts/update_dependencies.py
Executable file
@@ -0,0 +1,150 @@
|
||||
#!/usr/bin/env python
|
||||
"""Script to update the pyproject.toml file with the latest versions of the dependencies."""
|
||||
from pathlib import Path
|
||||
from textwrap import wrap
|
||||
|
||||
try:
|
||||
import tomllib
|
||||
except ModuleNotFoundError: # pragma: no cover
|
||||
import tomli as tomllib # type: ignore [no-redef]
|
||||
|
||||
import sh
|
||||
from rich.console import Console
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
def dryrun(msg: str) -> None:
|
||||
"""Print a message if the dry run flag is set.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[cyan]DRYRUN | {msg}[/cyan]")
|
||||
|
||||
|
||||
def success(msg: str) -> None:
|
||||
"""Print a success message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[green]SUCCESS | {msg}[/green]")
|
||||
|
||||
|
||||
def warning(msg: str) -> None:
|
||||
"""Print a warning message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[yellow]WARNING | {msg}[/yellow]")
|
||||
|
||||
|
||||
def error(msg: str) -> None:
|
||||
"""Print an error message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[red]ERROR | {msg}[/red]")
|
||||
|
||||
|
||||
def notice(msg: str) -> None:
|
||||
"""Print a notice message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[bold]NOTICE | {msg}[/bold]")
|
||||
|
||||
|
||||
def info(msg: str) -> None:
|
||||
"""Print a notice message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"INFO | {msg}")
|
||||
|
||||
|
||||
def usage(msg: str, width: int = 80) -> None:
|
||||
"""Print a usage message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
width (optional): Width of the message
|
||||
"""
|
||||
for _n, line in enumerate(wrap(msg, width=width)):
|
||||
if _n == 0:
|
||||
console.print(f"[dim]USAGE | {line}")
|
||||
else:
|
||||
console.print(f"[dim] | {line}")
|
||||
|
||||
|
||||
def debug(msg: str) -> None:
|
||||
"""Print a debug message without using logging.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[blue]DEBUG | {msg}[/blue]")
|
||||
|
||||
|
||||
def dim(msg: str) -> None:
|
||||
"""Print a message in dimmed color.
|
||||
|
||||
Args:
|
||||
msg: Message to print
|
||||
"""
|
||||
console.print(f"[dim]{msg}[/dim]")
|
||||
|
||||
|
||||
# Load the pyproject.toml file
|
||||
pyproject = Path(__file__).parents[1] / "pyproject.toml"
|
||||
|
||||
if not pyproject.exists():
|
||||
console.print("pyproject.toml file not found")
|
||||
raise SystemExit(1)
|
||||
|
||||
with pyproject.open("rb") as f:
|
||||
try:
|
||||
data = tomllib.load(f)
|
||||
except tomllib.TOMLDecodeError as e:
|
||||
raise SystemExit(1) from e
|
||||
|
||||
|
||||
# Get the latest versions of all dependencies
|
||||
info("Getting latest versions of dependencies...")
|
||||
packages: dict = {}
|
||||
for line in sh.poetry("--no-ansi", "show", "--outdated").splitlines():
|
||||
package, current, latest = line.split()[:3]
|
||||
packages[package] = {"current_version": current, "new_version": latest}
|
||||
|
||||
if not packages:
|
||||
success("All dependencies are up to date")
|
||||
raise SystemExit(0)
|
||||
|
||||
|
||||
dependencies = data["tool"]["poetry"]["dependencies"]
|
||||
groups = data["tool"]["poetry"]["group"]
|
||||
|
||||
for p in dependencies:
|
||||
if p in packages:
|
||||
notice(
|
||||
f"Updating {p} from {packages[p]['current_version']} to {packages[p]['new_version']}"
|
||||
)
|
||||
sh.poetry("add", f"{p}@latest", _fg=True)
|
||||
|
||||
|
||||
for group in groups:
|
||||
for p in groups[group]["dependencies"]:
|
||||
if p in packages:
|
||||
notice(
|
||||
f"Updating {p} from {packages[p]['current_version']} to {packages[p]['new_version']}"
|
||||
)
|
||||
sh.poetry("add", f"{p}@latest", "--group", group, _fg=True)
|
||||
|
||||
sh.poetry("update", _fg=True)
|
||||
success("All dependencies are up to date")
|
||||
raise SystemExit(0)
|
||||
@@ -6,42 +6,42 @@
|
||||
# 1. Copies a backup and restore shellscript to /usr/local/bin
|
||||
# 2. Edits the sudoers file to allow the script to be invoked with sudo privileges
|
||||
|
||||
- name: copy backup shellscript to server
|
||||
- name: Copy backup shellscript to server
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: scripts/service_backups.sh.j2
|
||||
dest: /usr/local/bin/service_backups
|
||||
mode: 0755
|
||||
src: scripts/service_backups.sh.j2
|
||||
dest: /usr/local/bin/service_backups
|
||||
mode: 0755
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server
|
||||
- is_nomad_client or is_nomad_server
|
||||
|
||||
- name: copy restore shellscript to server
|
||||
- name: Copy restore shellscript to server
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: scripts/service_restore.sh.j2
|
||||
dest: /usr/local/bin/service_restore
|
||||
mode: 0755
|
||||
src: scripts/service_restore.sh.j2
|
||||
dest: /usr/local/bin/service_restore
|
||||
mode: 0755
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server
|
||||
- is_nomad_client or is_nomad_server
|
||||
|
||||
- name: ensure nomad user can run sudo with the restore script
|
||||
- name: Ensure nomad user can run sudo with the restore script
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
line: "nomad ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
|
||||
validate: "/usr/sbin/visudo -cf %s"
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
line: "nomad ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
|
||||
validate: "/usr/sbin/visudo -cf %s"
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server
|
||||
- "'pis' in group_names"
|
||||
- is_nomad_client or is_nomad_server
|
||||
- "'pis' in group_names"
|
||||
|
||||
- name: ensure my user can run sudo with the restore script
|
||||
- name: Ensure my user can run sudo with the restore script
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
line: "{{ ansible_user }} ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
|
||||
validate: "/usr/sbin/visudo -cf %s"
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
line: "{{ ansible_user }} ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
|
||||
validate: "/usr/sbin/visudo -cf %s"
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server
|
||||
- "'pis' in group_names"
|
||||
- is_nomad_client or is_nomad_server
|
||||
- "'pis' in group_names"
|
||||
|
||||
@@ -6,159 +6,159 @@
|
||||
- name: "Mount storage on Raspberry Pis"
|
||||
when: "'pis' in group_names"
|
||||
block:
|
||||
- name: ensure local mount points exist
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.local }}"
|
||||
state: directory
|
||||
mode: 0777
|
||||
# owner: "{{ ansible_user_uid }}"
|
||||
# group: "{{ ansible_user_gid }}"
|
||||
loop: "{{ rpi_nfs_mounts_list }}"
|
||||
- name: Ensure local mount points exist
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.local }}"
|
||||
state: directory
|
||||
mode: 0777
|
||||
# owner: "{{ ansible_user_uid }}"
|
||||
# group: "{{ ansible_user_gid }}"
|
||||
loop: "{{ rpi_nfs_mounts_list }}"
|
||||
|
||||
- name: remove old nfs drives
|
||||
become: true
|
||||
ansible.posix.mount:
|
||||
path: "{{ item.local }}"
|
||||
src: "{{ item.src }}"
|
||||
fstype: nfs
|
||||
opts: defaults,hard,intr,timeo=14
|
||||
state: absent
|
||||
loop: "{{ rpi_nfs_mounts_remove }}"
|
||||
- name: Remove old nfs drives
|
||||
become: true
|
||||
ansible.posix.mount:
|
||||
path: "{{ item.local }}"
|
||||
src: "{{ item.src }}"
|
||||
fstype: nfs
|
||||
opts: defaults,hard,intr,timeo=14
|
||||
state: absent
|
||||
loop: "{{ rpi_nfs_mounts_remove }}"
|
||||
|
||||
- name: mount all nfs drives
|
||||
become: true
|
||||
ansible.posix.mount:
|
||||
path: "{{ item.local }}"
|
||||
src: "{{ item.src }}"
|
||||
fstype: nfs
|
||||
opts: defaults,hard,intr,timeo=14
|
||||
state: mounted
|
||||
boot: true
|
||||
loop: "{{ rpi_nfs_mounts_list }}"
|
||||
- name: Mount all nfs drives
|
||||
become: true
|
||||
ansible.posix.mount:
|
||||
path: "{{ item.local }}"
|
||||
src: "{{ item.src }}"
|
||||
fstype: nfs
|
||||
opts: defaults,hard,intr,timeo=14
|
||||
state: mounted
|
||||
boot: true
|
||||
loop: "{{ rpi_nfs_mounts_list }}"
|
||||
|
||||
# --------------------------------- Mount on Macs
|
||||
# https://gist.github.com/l422y/8697518
|
||||
- name: "Mount storage on Macs"
|
||||
when: "'macs' in group_names"
|
||||
block:
|
||||
- name: create mount_point
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ mac_storage_mount_point }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
- name: Create mount_point
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ mac_storage_mount_point }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
# I ran into problems getting this to run successfully. If errors occur, add the line manually using:
|
||||
# $ sudo nano /private/etc/auto_master
|
||||
# I ran into problems getting this to run successfully. If errors occur, add the line manually using:
|
||||
# $ sudo nano /private/etc/auto_master
|
||||
|
||||
- name: add NFS shared drives to macs
|
||||
when: mac_autofs_type == 'nfs'
|
||||
block:
|
||||
- name: add auto_nfs to "/private/etc/auto_master"
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /private/etc/auto_master
|
||||
regexp: "auto_nfs"
|
||||
line: "/- auto_nfs -nobrowse,nosuid"
|
||||
unsafe_writes: true
|
||||
- name: Add NFS shared drives to macs
|
||||
when: mac_autofs_type == 'nfs'
|
||||
block:
|
||||
- name: Add auto_nfs to "/private/etc/auto_master"
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /private/etc/auto_master
|
||||
regexp: "auto_nfs"
|
||||
line: "/- auto_nfs -nobrowse,nosuid"
|
||||
unsafe_writes: true
|
||||
|
||||
- name: add mounts to /etc/auto_nfs
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_nfs
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_nfs_mounts_list if mac_nfs_mounts_list is iterable else [] }}"
|
||||
notify: mac_run_automount
|
||||
- name: Add mounts to /etc/auto_nfs
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_nfs
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_nfs_mounts_list if mac_nfs_mounts_list is iterable else [] }}"
|
||||
notify: mac_run_automount
|
||||
|
||||
- name: remove old mounts from /etc/auto_nfs
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_nfs
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
|
||||
state: absent
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
notify: mac_run_automount_unmount
|
||||
loop: "{{ mac_nfs_mounts_remove if mac_nfs_mounts_remove is iterable else [] }}"
|
||||
- name: Remove old mounts from /etc/auto_nfs
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_nfs
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
|
||||
state: absent
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
notify: mac_run_automount_unmount
|
||||
loop: "{{ mac_nfs_mounts_remove if mac_nfs_mounts_remove is iterable else [] }}"
|
||||
|
||||
- name: add AFP shared drives to macs
|
||||
when: mac_autofs_type == 'afp'
|
||||
block:
|
||||
- name: add auto_afp to "/private/etc/auto_master"
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /private/etc/auto_master
|
||||
regexp: "auto_afp"
|
||||
line: "/- auto_afp -nobrowse,nosuid"
|
||||
unsafe_writes: true
|
||||
- name: Add AFP shared drives to macs
|
||||
when: mac_autofs_type == 'afp'
|
||||
block:
|
||||
- name: Add auto_afp to "/private/etc/auto_master"
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /private/etc/auto_master
|
||||
regexp: "auto_afp"
|
||||
line: "/- auto_afp -nobrowse,nosuid"
|
||||
unsafe_writes: true
|
||||
|
||||
- name: add mounts to /etc/auto_afp
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_afp
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
|
||||
notify: mac_run_automount
|
||||
- name: Add mounts to /etc/auto_afp
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_afp
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
|
||||
notify: mac_run_automount
|
||||
|
||||
- name: remove mounts from /etc/auto_afp
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_afp
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
|
||||
notify: mac_run_automount_unmount
|
||||
- name: Remove mounts from /etc/auto_afp
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_afp
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
|
||||
notify: mac_run_automount_unmount
|
||||
|
||||
- name: add SMB shared drives to macs
|
||||
when: mac_autofs_type == 'smb'
|
||||
block:
|
||||
- name: add auto_smb to "/private/etc/auto_master"
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /private/etc/auto_master
|
||||
regexp: "auto_smb"
|
||||
line: "/- auto_smb -noowners,nosuid"
|
||||
unsafe_writes: true
|
||||
- name: Add SMB shared drives to macs
|
||||
when: mac_autofs_type == 'smb'
|
||||
block:
|
||||
- name: Add auto_smb to "/private/etc/auto_master"
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /private/etc/auto_master
|
||||
regexp: "auto_smb"
|
||||
line: "/- auto_smb -noowners,nosuid"
|
||||
unsafe_writes: true
|
||||
|
||||
- name: add mounts to /etc/auto_smb
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_smb
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=smbfs,soft,noowners,nosuid,rw ://{{ smb_username }}:{{ smb_password }}@{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
|
||||
notify: mac_run_automount
|
||||
- name: Add mounts to /etc/auto_smb
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_smb
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=smbfs,soft,noowners,nosuid,rw ://{{ smb_username }}:{{ smb_password }}@{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
|
||||
notify: mac_run_automount
|
||||
|
||||
- name: remove mounts from /etc/auto_smb
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_smb
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
|
||||
notify: mac_run_automount_unmount
|
||||
- name: Remove mounts from /etc/auto_smb
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
create: true
|
||||
path: /private/etc/auto_smb
|
||||
regexp: "{{ item.src }}"
|
||||
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
|
||||
state: present
|
||||
unsafe_writes: true
|
||||
mode: 0644
|
||||
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
|
||||
notify: mac_run_automount_unmount
|
||||
|
||||
584
tasks/consul.yml
584
tasks/consul.yml
@@ -4,356 +4,362 @@
|
||||
|
||||
- name: Set variables needed to install consul
|
||||
block:
|
||||
- name: "Set variable: check if we have a mounted USB drive (Debian)"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ rpi_usb_drive_mount_point }}"
|
||||
register: have_usb_drive
|
||||
changed_when: false
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: "Set variable: check if we have a mounted USB drive (Debian)"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ rpi_usb_drive_mount_point }}"
|
||||
register: have_usb_drive
|
||||
changed_when: false
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: "Set variable: Use USB drive for consul /opt (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_opt_dir: "{{ rpi_usb_drive_mount_point }}/opt/consul"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- have_usb_drive.stat.exists
|
||||
- name: "Set variable: Use USB drive for consul /opt (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_opt_dir: "{{ rpi_usb_drive_mount_point }}/opt/consul"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- have_usb_drive.stat.exists
|
||||
|
||||
- name: "Set variable: Use root disk for consul /opt (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_opt_dir: "/opt/consul"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- not have_usb_drive.stat.exists
|
||||
- name: "Set variable: Use root disk for consul /opt (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_opt_dir: "/opt/consul"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- not have_usb_drive.stat.exists
|
||||
|
||||
- name: "Set variable: Use ~/library for /opt files (macOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_opt_dir: "/Users/{{ ansible_user }}/Library/consul"
|
||||
when:
|
||||
- mac_intel or mac_arm
|
||||
- name: "Set variable: Use ~/library for /opt files (macOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_opt_dir: "/Users/{{ ansible_user }}/Library/consul"
|
||||
when:
|
||||
- mac_intel or mac_arm
|
||||
|
||||
- name: "Set variable: Use ~/volume1/docker/consul/data for /opt files (synology)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_opt_dir: "/volume1/docker/consul/data"
|
||||
when:
|
||||
- inventory_hostname == 'synology'
|
||||
- name: "Set variable: Use ~/volume1/docker/consul/data for /opt files (synology)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_opt_dir: "/volume1/docker/consul/data"
|
||||
when:
|
||||
- inventory_hostname == 'synology'
|
||||
|
||||
- name: "Set variable: Set Consul download Binary (armv7l)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
- name: "Set variable: Set Consul download Binary (armv7l)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
|
||||
- name: "Set variable: Set Consul download Binary (aarch64)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm64.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'aarch64'
|
||||
- name: "Set variable: Set Consul download Binary (aarch64)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm64.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'aarch64'
|
||||
|
||||
- name: "Set variable: Set Consul download Binary (MacOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_amd64.zip"
|
||||
when: mac_intel
|
||||
- name: "Set variable: Set Consul download Binary (MacOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_amd64.zip"
|
||||
when: mac_intel
|
||||
|
||||
- name: "Set variable: Set Consul download Binary (MacOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_arm64.zip"
|
||||
when: mac_arm
|
||||
- name: "Set variable: Set Consul download Binary (MacOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_arm64.zip"
|
||||
when: mac_arm
|
||||
|
||||
- name: Assert that we can install Consul
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- consul_download_uri is defined
|
||||
- consul_opt_dir is defined
|
||||
fail_msg: "Unable to install consul on this host"
|
||||
when: inventory_hostname != 'synology'
|
||||
- name: Assert that we can install Consul
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- consul_download_uri is defined
|
||||
- consul_opt_dir is defined
|
||||
fail_msg: "Unable to install consul on this host"
|
||||
when: inventory_hostname != 'synology'
|
||||
|
||||
- name: "Stop Consul"
|
||||
block:
|
||||
- name: "Stop consul systemd service (Debian)"
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: consul
|
||||
state: stopped
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_facts.services["consul.service"] is defined
|
||||
- name: "Stop consul systemd service (Debian)"
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: consul
|
||||
state: stopped
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_facts.services["consul.service"] is defined
|
||||
|
||||
- name: "Check if plist file exists (MacOSX)"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ consul_plist_macos }}"
|
||||
register: consul_file
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- name: "Check if plist file exists (MacOSX)"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ consul_plist_macos }}"
|
||||
register: consul_file
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
|
||||
- name: "Unload consul agent (MacOSX)"
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: "launchctl unload {{ consul_plist_macos }}"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- consul_file.stat.exists
|
||||
- name: "Unload consul agent (MacOSX)"
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: "launchctl unload {{ consul_plist_macos }}"
|
||||
register: consul_unload
|
||||
failed_when: consul_unload.rc != 0
|
||||
changed_when: consul_unload.rc == 0
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- consul_file.stat.exists
|
||||
|
||||
- name: "Create 'consul' user and group"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_os_family == 'Debian'
|
||||
block:
|
||||
- name: "Ensure group 'consul' exists (Debian)"
|
||||
become: true
|
||||
ansible.builtin.group:
|
||||
name: consul
|
||||
state: present
|
||||
- name: "Ensure group 'consul' exists (Debian)"
|
||||
become: true
|
||||
ansible.builtin.group:
|
||||
name: consul
|
||||
state: present
|
||||
|
||||
- name: "Add the user 'consul' with group 'consul' (Debian)"
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: consul
|
||||
group: consul
|
||||
- name: "Add the user 'consul' with group 'consul' (Debian)"
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: consul
|
||||
group: consul
|
||||
|
||||
- name: "Create Consul /opt storage and copy certificates"
|
||||
block:
|
||||
- name: "Create {{ consul_opt_dir }} directories"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
loop:
|
||||
- "{{ consul_opt_dir }}"
|
||||
- "{{ consul_opt_dir }}/logs"
|
||||
- "{{ consul_opt_dir }}/plugins"
|
||||
- "{{ consul_opt_dir }}/certs"
|
||||
- name: "Create {{ consul_opt_dir }} directories"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
loop:
|
||||
- "{{ consul_opt_dir }}"
|
||||
- "{{ consul_opt_dir }}/logs"
|
||||
- "{{ consul_opt_dir }}/plugins"
|
||||
- "{{ consul_opt_dir }}/certs"
|
||||
|
||||
- name: Copy certs to servers
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- { src: "certs/consul/consul-agent-ca.pem", dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem" }
|
||||
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0.pem" }
|
||||
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0-key.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0-key.pem" }
|
||||
when:
|
||||
- is_consul_server
|
||||
- name: Copy certs to servers
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- { src: "certs/consul/consul-agent-ca.pem", dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem" }
|
||||
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0.pem" }
|
||||
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0-key.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0-key.pem" }
|
||||
when:
|
||||
- is_consul_server
|
||||
|
||||
- name: Copy certs to clients
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: certs/consul/consul-agent-ca.pem
|
||||
dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
|
||||
mode: 0755
|
||||
when:
|
||||
- is_consul_client
|
||||
- not is_consul_server
|
||||
- name: Copy certs to clients
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: certs/consul/consul-agent-ca.pem
|
||||
dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
|
||||
mode: 0755
|
||||
when:
|
||||
- is_consul_client
|
||||
- not is_consul_server
|
||||
|
||||
- name: "Set owner of files to consul:consul (debian)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ consul_opt_dir }}"
|
||||
owner: consul
|
||||
group: consul
|
||||
recurse: true
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: "Set owner of files to consul:consul (debian)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ consul_opt_dir }}"
|
||||
owner: consul
|
||||
group: consul
|
||||
recurse: true
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }}"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ consul_opt_dir }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when:
|
||||
- mac_intel or mac_arm or inventory_hostname == 'synology'
|
||||
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }}"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ consul_opt_dir }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when:
|
||||
- mac_intel or mac_arm or inventory_hostname == 'synology'
|
||||
|
||||
- name: "Template out Consul configuration file"
|
||||
block:
|
||||
- name: "Create {{ interpolated_consul_configuration_dir }}"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_consul_configuration_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
- name: "Create {{ interpolated_consul_configuration_dir }}"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_consul_configuration_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Copy consul base config file
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: consul.hcl.j2
|
||||
dest: "{{ interpolated_consul_configuration_dir }}/consul.hcl"
|
||||
mode: 0644
|
||||
- name: Copy consul base config file
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: consul.hcl.j2
|
||||
dest: "{{ interpolated_consul_configuration_dir }}/consul.hcl"
|
||||
mode: 0644
|
||||
|
||||
- name: "Set owner of files to consul:consul (Debian)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_consul_configuration_dir }}"
|
||||
owner: consul
|
||||
group: consul
|
||||
recurse: true
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: "Set owner of files to consul:consul (Debian)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_consul_configuration_dir }}"
|
||||
owner: consul
|
||||
group: consul
|
||||
recurse: true
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }}"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_consul_configuration_dir }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when:
|
||||
- mac_intel or mac_arm or inventory_hostname == 'synology'
|
||||
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }}"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_consul_configuration_dir }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when:
|
||||
- mac_intel or mac_arm or inventory_hostname == 'synology'
|
||||
|
||||
- name: "Set owner of root consul dir to {{ ansible_user_uid }}:{{ ansible_user_gid }} (synology)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /volume1/docker/consul/
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when:
|
||||
- inventory_hostname == 'synology'
|
||||
- name: "Set owner of root consul dir to {{ ansible_user_uid }}:{{ ansible_user_gid }} (synology)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /volume1/docker/consul/
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when:
|
||||
- inventory_hostname == 'synology'
|
||||
|
||||
- name: "Install Consul binary"
|
||||
block:
|
||||
- name: "Set fact: need install consul?"
|
||||
ansible.builtin.set_fact:
|
||||
need_consul_install: false
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- name: "Set fact: need install consul?"
|
||||
ansible.builtin.set_fact:
|
||||
need_consul_install: false
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
|
||||
- name: Check if Consul is installed
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/consul
|
||||
register: consul_binary_file_location
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- name: Check if Consul is installed
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/consul
|
||||
register: consul_binary_file_location
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
|
||||
- name: "Set fact: need consul install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_consul_install: true
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- not consul_binary_file_location.stat.exists
|
||||
- name: "Set fact: need consul install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_consul_install: true
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- not consul_binary_file_location.stat.exists
|
||||
|
||||
- name: Check current version of Consul
|
||||
ansible.builtin.shell:
|
||||
cmd: /usr/local/bin/consul --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
register: installed_consul_version
|
||||
check_mode: false
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- not need_consul_install
|
||||
- name: Check current version of Consul
|
||||
ansible.builtin.shell:
|
||||
cmd: /usr/local/bin/consul --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
register: installed_consul_version
|
||||
check_mode: false
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- not need_consul_install
|
||||
|
||||
- name: "Set fact: need consul install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_consul_install: true
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- not need_consul_install
|
||||
- installed_consul_version.stdout is version(consul_version, '<')
|
||||
- name: "Set fact: need consul install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_consul_install: true
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- not need_consul_install
|
||||
- installed_consul_version.stdout is version(consul_version, '<')
|
||||
|
||||
- name: Install Consul
|
||||
become: true
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ consul_download_uri }}"
|
||||
dest: /usr/local/bin
|
||||
remote_src: true
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- need_consul_install
|
||||
- name: Install Consul
|
||||
become: true
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ consul_download_uri }}"
|
||||
dest: /usr/local/bin
|
||||
remote_src: true
|
||||
when:
|
||||
- consul_download_uri is defined
|
||||
- need_consul_install
|
||||
|
||||
- name: "Validate consul config"
|
||||
ansible.builtin.command:
|
||||
cmd: "/usr/local/bin/consul validate {{ interpolated_consul_configuration_dir }}"
|
||||
cmd: "/usr/local/bin/consul validate {{ interpolated_consul_configuration_dir }}"
|
||||
register: consul_config_valid
|
||||
changed_when: false
|
||||
failed_when: consul_config_valid.rc != 0
|
||||
when:
|
||||
- inventory_hostname != 'synology'
|
||||
- inventory_hostname != 'synology'
|
||||
|
||||
- name: "Copy system.d or launchctl service files"
|
||||
block:
|
||||
- name: Ensure /Library/LaunchAgents exists (MacOSX)
|
||||
ansible.builtin.file:
|
||||
path: "{{ consul_plist_macos | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- name: Ensure /Library/LaunchAgents exists (MacOSX)
|
||||
ansible.builtin.file:
|
||||
path: "{{ consul_plist_macos | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
|
||||
- name: Create Consul launchd service (MacOSX)
|
||||
ansible.builtin.template:
|
||||
src: consul.launchd.j2
|
||||
dest: "{{ consul_plist_macos }}"
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- name: Create Consul launchd service (MacOSX)
|
||||
ansible.builtin.template:
|
||||
src: consul.launchd.j2
|
||||
dest: "{{ consul_plist_macos }}"
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
|
||||
- name: Create Consul service (Debian)
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: consul.service.j2
|
||||
dest: /etc/systemd/system/consul.service
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: Create Consul service (Debian)
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: consul.service.j2
|
||||
dest: /etc/systemd/system/consul.service
|
||||
mode: 0644
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: "Start Consul"
|
||||
block:
|
||||
- name: Load the Consul agent (MacOSX)
|
||||
ansible.builtin.command:
|
||||
cmd: "launchctl load -w {{ consul_plist_macos }}"
|
||||
when:
|
||||
- mac_intel or mac_arm
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
- name: Load the Consul agent (MacOSX)
|
||||
ansible.builtin.command:
|
||||
cmd: "launchctl load -w {{ consul_plist_macos }}"
|
||||
register: consul_loaded
|
||||
changed_when: consul_loaded.rc == 0
|
||||
failed_when: consul_loaded.rc > 0
|
||||
when:
|
||||
- mac_intel or mac_arm
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
|
||||
- name: Start Consul (Debian)
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: consul
|
||||
enabled: true
|
||||
state: started
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
- name: Start Consul (Debian)
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: consul
|
||||
enabled: true
|
||||
state: started
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
|
||||
- name: Make sure Consul service is really running
|
||||
ansible.builtin.command:
|
||||
cmd: systemctl is-active consul
|
||||
register: is_consul_really_running
|
||||
changed_when: false
|
||||
failed_when: is_consul_really_running.rc != 0
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
- name: Make sure Consul service is really running
|
||||
ansible.builtin.command:
|
||||
cmd: systemctl is-active consul
|
||||
register: is_consul_really_running
|
||||
changed_when: false
|
||||
failed_when: is_consul_really_running.rc != 0
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
|
||||
- name: "Copy Consul service checks to synology"
|
||||
when:
|
||||
- inventory_hostname == 'synology'
|
||||
- inventory_hostname == 'synology'
|
||||
block:
|
||||
- name: Copy config file
|
||||
ansible.builtin.template:
|
||||
src: consul_services/consul_synology_checks.json.j2
|
||||
dest: "{{ interpolated_consul_configuration_dir }}/service_checks.json"
|
||||
mode: 0644
|
||||
- name: Copy config file
|
||||
ansible.builtin.template:
|
||||
src: consul_services/consul_synology_checks.json.j2
|
||||
dest: "{{ interpolated_consul_configuration_dir }}/service_checks.json"
|
||||
mode: 0644
|
||||
|
||||
- name: Reload configuration file
|
||||
ansible.builtin.uri:
|
||||
url: "http://{{ synology_second_ip }}:8500/v1/agent/reload"
|
||||
method: PUT
|
||||
status_code: 200
|
||||
ignore_errors: true
|
||||
check_mode: false
|
||||
register: consul_agent_reload_http_response
|
||||
failed_when: consul_agent_reload_http_response.status != 200
|
||||
- name: Reload configuration file
|
||||
ansible.builtin.uri:
|
||||
url: "http://{{ synology_second_ip }}:8500/v1/agent/reload"
|
||||
method: PUT
|
||||
status_code: 200
|
||||
ignore_errors: true
|
||||
check_mode: false
|
||||
register: consul_agent_reload_http_response
|
||||
failed_when: consul_agent_reload_http_response.status != 200
|
||||
|
||||
- name: Debug when consul agent reload fails
|
||||
ansible.builtin.debug:
|
||||
var: consul_agent_reload_http_response.msg
|
||||
check_mode: false
|
||||
when: consul_agent_reload_http_response.status != 200
|
||||
- name: Debug when consul agent reload fails
|
||||
ansible.builtin.debug:
|
||||
var: consul_agent_reload_http_response.msg
|
||||
check_mode: false
|
||||
when: consul_agent_reload_http_response.status != 200
|
||||
|
||||
@@ -33,5 +33,5 @@
|
||||
# when:
|
||||
# - ansible_facts['system_vendor'] is search("Synology")
|
||||
|
||||
- name: "end play"
|
||||
- name: "End play"
|
||||
ansible.builtin.meta: end_play
|
||||
|
||||
132
tasks/docker.yml
132
tasks/docker.yml
@@ -4,85 +4,91 @@
|
||||
|
||||
- name: Check if Docker is already present
|
||||
ansible.builtin.command:
|
||||
cmd: docker --version
|
||||
cmd: docker --version
|
||||
register: docker_command_result
|
||||
changed_when: docker_command_result.rc == 1
|
||||
failed_when: false
|
||||
|
||||
- name: install docker on Debian
|
||||
- name: Install docker on Debian
|
||||
when: ansible_os_family == 'Debian'
|
||||
block:
|
||||
- name: "Add docker local filesystem storage directory"
|
||||
ansible.builtin.file:
|
||||
path: "{{ rpi_localfs_service_storage }}"
|
||||
mode: 0755
|
||||
state: directory
|
||||
- name: "Add docker local filesystem storage directory"
|
||||
ansible.builtin.file:
|
||||
path: "{{ rpi_localfs_service_storage }}"
|
||||
mode: 0755
|
||||
state: directory
|
||||
|
||||
- name: Download Docker install convenience script
|
||||
ansible.builtin.get_url:
|
||||
url: "https://get.docker.com/"
|
||||
dest: /tmp/get-docker.sh
|
||||
mode: 0775
|
||||
when: docker_command_result.rc == 1
|
||||
- name: Download Docker install convenience script
|
||||
ansible.builtin.get_url:
|
||||
url: "https://get.docker.com/"
|
||||
dest: /tmp/get-docker.sh
|
||||
mode: 0775
|
||||
when: docker_command_result.rc == 1
|
||||
|
||||
- name: Run Docker install convenience script
|
||||
ansible.builtin.command: /tmp/get-docker.sh
|
||||
environment:
|
||||
CHANNEL: stable
|
||||
when: docker_command_result.rc == 1
|
||||
- name: Run Docker install convenience script
|
||||
ansible.builtin.command: /tmp/get-docker.sh
|
||||
environment:
|
||||
CHANNEL: stable
|
||||
register: docker_install
|
||||
failed_when: docker_install.rc > 0
|
||||
changed_when: docker_install.rc == 0
|
||||
when: docker_command_result.rc == 1
|
||||
|
||||
- name: Make sure Docker CE is the version specified
|
||||
ansible.builtin.apt:
|
||||
name: "docker-ce"
|
||||
state: present
|
||||
when: docker_command_result.rc == 1
|
||||
- name: Make sure Docker CE is the version specified
|
||||
ansible.builtin.apt:
|
||||
name: "docker-ce"
|
||||
state: present
|
||||
when: docker_command_result.rc == 1
|
||||
|
||||
- name: Ensure Docker is started
|
||||
ansible.builtin.service:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
- name: Ensure Docker is started
|
||||
ansible.builtin.service:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Ensure docker users are added to the docker group
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: "{{ ansible_user }}"
|
||||
groups: docker
|
||||
append: true
|
||||
when: docker_command_result.rc == 1
|
||||
- name: Ensure docker users are added to the docker group
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: "{{ ansible_user }}"
|
||||
groups: docker
|
||||
append: true
|
||||
when: docker_command_result.rc == 1
|
||||
|
||||
- name: install docker on macOS
|
||||
- name: Install docker on macOS
|
||||
when: "'macs' in group_names"
|
||||
block:
|
||||
- name: "Add docker directory to ~/Library"
|
||||
ansible.builtin.file:
|
||||
path: "{{ mac_localfs_service_storage }}"
|
||||
mode: 0755
|
||||
state: directory
|
||||
- name: "Add docker directory to ~/Library"
|
||||
ansible.builtin.file:
|
||||
path: "{{ mac_localfs_service_storage }}"
|
||||
mode: 0755
|
||||
state: directory
|
||||
|
||||
- name: install base homebrew packages
|
||||
community.general.homebrew:
|
||||
name: docker
|
||||
state: present
|
||||
update_homebrew: false
|
||||
upgrade_all: false
|
||||
when: docker_command_result.rc == 1
|
||||
- name: Install base homebrew packages
|
||||
community.general.homebrew:
|
||||
name: docker
|
||||
state: present
|
||||
update_homebrew: false
|
||||
upgrade_all: false
|
||||
when: docker_command_result.rc == 1
|
||||
|
||||
- name: open docker application
|
||||
ansible.builtin.command:
|
||||
cmd: open /Applications/Docker.app
|
||||
when: docker_command_result.rc == 1
|
||||
- name: Open docker application
|
||||
ansible.builtin.command:
|
||||
cmd: open /Applications/Docker.app
|
||||
register: docker_open_app
|
||||
failed_when: docker_open_app.rc > 0
|
||||
changed_when: docker_open_app.rc == 0
|
||||
when: docker_command_result.rc == 1
|
||||
|
||||
- name: Must install Docker manually
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
Docker must be installed manually on MacOS. Log in to mac to install then rerun playbook
|
||||
- name: Must install Docker manually
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
Docker must be installed manually on MacOS. Log in to mac to install then rerun playbook
|
||||
|
||||
Be certain to configure the following:
|
||||
- run on login
|
||||
- add '{{ mac_storage_mount_point }}' to mountable file system directories
|
||||
when: docker_command_result.rc == 1
|
||||
Be certain to configure the following:
|
||||
- run on login
|
||||
- add '{{ mac_storage_mount_point }}' to mountable file system directories
|
||||
when: docker_command_result.rc == 1
|
||||
|
||||
- name: end play
|
||||
ansible.builtin.meta: end_play
|
||||
when: docker_command_result.rc == 1
|
||||
- name: End play
|
||||
ansible.builtin.meta: end_play
|
||||
when: docker_command_result.rc == 1
|
||||
|
||||
@@ -8,46 +8,46 @@
|
||||
|
||||
- name: "Set local filesystem location (pis)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_localfs_service_storage: "{{ rpi_localfs_service_storage }}"
|
||||
interpolated_localfs_service_storage: "{{ rpi_localfs_service_storage }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- "'pis' in group_names"
|
||||
- "'pis' in group_names"
|
||||
|
||||
- name: "Set local filesystem location (macs)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_localfs_service_storage: "{{ mac_localfs_service_storage }}"
|
||||
interpolated_localfs_service_storage: "{{ mac_localfs_service_storage }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- "'macs' in group_names"
|
||||
- "'macs' in group_names"
|
||||
|
||||
- name: "Set NFS mount location (pis)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_nfs_service_storage: "{{ rpi_nfs_mount_point }}"
|
||||
interpolated_nfs_service_storage: "{{ rpi_nfs_mount_point }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- "'pis' in group_names"
|
||||
- "'pis' in group_names"
|
||||
|
||||
- name: "Set NFS mount location location (macs)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_nfs_service_storage: "{{ mac_storage_mount_point }}"
|
||||
interpolated_nfs_service_storage: "{{ mac_storage_mount_point }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- "'macs' in group_names"
|
||||
- "'macs' in group_names"
|
||||
|
||||
- name: "set consul configuration directory (synology)"
|
||||
- name: "Set consul configuration directory (synology)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_consul_configuration_dir: "{{ synology_consul_configuration_dir }}"
|
||||
interpolated_consul_configuration_dir: "{{ synology_consul_configuration_dir }}"
|
||||
when:
|
||||
- inventory_hostname == 'synology'
|
||||
- inventory_hostname == 'synology'
|
||||
|
||||
- name: "set consul configuration directory (pis)"
|
||||
- name: "Set consul configuration directory (pis)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_consul_configuration_dir: "{{ rpi_consul_configuration_dir }}"
|
||||
interpolated_consul_configuration_dir: "{{ rpi_consul_configuration_dir }}"
|
||||
when:
|
||||
- "'pis' in group_names"
|
||||
- "'pis' in group_names"
|
||||
|
||||
- name: "set consul configuration directory (macs)"
|
||||
- name: "Set consul configuration directory (macs)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_consul_configuration_dir: "{{ mac_consul_configuration_dir }}"
|
||||
interpolated_consul_configuration_dir: "{{ mac_consul_configuration_dir }}"
|
||||
when:
|
||||
- "'macs' in group_names"
|
||||
- "'macs' in group_names"
|
||||
|
||||
@@ -4,29 +4,29 @@
|
||||
#
|
||||
# NOTE: This task exists due to the arillso.logrotate failing completely on macOS
|
||||
|
||||
- name: add service_backups.log to logrotate
|
||||
- name: Add service_backups.log to logrotate
|
||||
become: true
|
||||
vars:
|
||||
logrotate_applications:
|
||||
- name: service_backups
|
||||
definitions:
|
||||
- logs:
|
||||
- "{{ rpi_nfs_mount_point }}/pi-cluster/logs/service_backups.log"
|
||||
options:
|
||||
- rotate 1
|
||||
- size 100k
|
||||
- missingok
|
||||
- notifempty
|
||||
- su root root
|
||||
- extension .log
|
||||
- compress
|
||||
- nodateext
|
||||
- nocreate
|
||||
- delaycompress
|
||||
logrotate_applications:
|
||||
- name: service_backups
|
||||
definitions:
|
||||
- logs:
|
||||
- "{{ rpi_nfs_mount_point }}/pi-cluster/logs/service_backups.log"
|
||||
options:
|
||||
- rotate 1
|
||||
- size 100k
|
||||
- missingok
|
||||
- notifempty
|
||||
- su root root
|
||||
- extension .log
|
||||
- compress
|
||||
- nodateext
|
||||
- nocreate
|
||||
- delaycompress
|
||||
ansible.builtin.import_role:
|
||||
name: arillso.logrotate
|
||||
name: arillso.logrotate
|
||||
failed_when: false
|
||||
ignore_errors: true
|
||||
when:
|
||||
- "'macs' not in group_names"
|
||||
- is_cluster_leader
|
||||
- "'macs' not in group_names"
|
||||
- is_cluster_leader
|
||||
|
||||
394
tasks/nomad.yml
394
tasks/nomad.yml
@@ -4,243 +4,243 @@
|
||||
|
||||
- name: "Set variables needed to install Nomad"
|
||||
block:
|
||||
- name: "set variable: check if we have a mounted USB drive (Debian)"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ rpi_usb_drive_mount_point }}"
|
||||
register: have_usb_drive
|
||||
changed_when: false
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: "Set variable: check if we have a mounted USB drive (Debian)"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ rpi_usb_drive_mount_point }}"
|
||||
register: have_usb_drive
|
||||
changed_when: false
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: "set variable: Use USB drive for nomad /opt (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_opt_dir_location: "{{ rpi_usb_drive_mount_point }}/opt/nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- have_usb_drive.stat.exists
|
||||
- name: "Set variable: Use USB drive for nomad /opt (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_opt_dir_location: "{{ rpi_usb_drive_mount_point }}/opt/nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- have_usb_drive.stat.exists
|
||||
|
||||
- name: "set variable: Use root dist for nomad /opt (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_opt_dir_location: "/opt/nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- not have_usb_drive.stat.exists
|
||||
- name: "Set variable: Use root dist for nomad /opt (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_opt_dir_location: "/opt/nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- not have_usb_drive.stat.exists
|
||||
|
||||
- name: "set variable: Use ~/library for /opt files (macOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_opt_dir_location: "/Users/{{ ansible_user }}/Library/nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- name: "Set variable: Use ~/library for /opt files (macOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_opt_dir_location: "/Users/{{ ansible_user }}/Library/nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
|
||||
- name: "set variable: Set Nomad download Binary (armv7l)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
- name: "Set variable: Set Nomad download Binary (armv7l)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
|
||||
- name: "set variable: Set Nomad download Binary (aarch64)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm64.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'aarch64'
|
||||
- name: "Set variable: Set Nomad download Binary (aarch64)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm64.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'aarch64'
|
||||
|
||||
- name: "set variable: Set Nomad download Binary (MacOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_darwin_amd64.zip"
|
||||
when:
|
||||
- mac_intel
|
||||
- name: "Set variable: Set Nomad download Binary (MacOSX)"
|
||||
ansible.builtin.set_fact:
|
||||
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_darwin_amd64.zip"
|
||||
when:
|
||||
- mac_intel
|
||||
|
||||
- name: Assert that we can install Nomad
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- nomad_download_file_uri is defined
|
||||
- nomad_opt_dir_location is defined
|
||||
fail_msg: "Unable to install Nomad on this host"
|
||||
- name: Assert that we can install Nomad
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- nomad_download_file_uri is defined
|
||||
- nomad_opt_dir_location is defined
|
||||
fail_msg: "Unable to install Nomad on this host"
|
||||
|
||||
- name: "Create Nomad user and group (Debian)"
|
||||
when: ansible_os_family == 'Debian'
|
||||
block:
|
||||
- name: "Ensure group 'nomad' exists (Debian)"
|
||||
become: true
|
||||
ansible.builtin.group:
|
||||
name: nomad
|
||||
state: present
|
||||
- name: "Ensure group 'nomad' exists (Debian)"
|
||||
become: true
|
||||
ansible.builtin.group:
|
||||
name: nomad
|
||||
state: present
|
||||
|
||||
- name: "Add the user 'nomad' with group 'nomad' (Debian)"
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: nomad
|
||||
group: nomad
|
||||
- name: "Add the user 'nomad' with group 'nomad' (Debian)"
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: nomad
|
||||
group: nomad
|
||||
|
||||
- name: "Add user 'nomad' to docker and sudo groups (Debian)"
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
user: nomad
|
||||
groups: docker,sudo
|
||||
append: true
|
||||
- name: "Add user 'nomad' to docker and sudo groups (Debian)"
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
user: nomad
|
||||
groups: docker,sudo
|
||||
append: true
|
||||
|
||||
- name: "Create Nomad /opt storage"
|
||||
block:
|
||||
- name: "create {{ nomad_opt_dir_location }} directories"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
loop:
|
||||
- "{{ nomad_opt_dir_location }}/logs"
|
||||
- "{{ nomad_opt_dir_location }}/plugins"
|
||||
- "{{ nomad_opt_dir_location }}/certs"
|
||||
- name: "Create {{ nomad_opt_dir_location }} directories"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: 0755
|
||||
loop:
|
||||
- "{{ nomad_opt_dir_location }}/logs"
|
||||
- "{{ nomad_opt_dir_location }}/plugins"
|
||||
- "{{ nomad_opt_dir_location }}/certs"
|
||||
|
||||
- name: Copy server certs
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
|
||||
- { src: certs/nomad/server.pem, dest: "{{ nomad_opt_dir_location }}/certs/server.pem" }
|
||||
- { src: certs/nomad/server-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/server-key.pem" }
|
||||
notify: "restart nomad"
|
||||
when: is_nomad_server
|
||||
- name: Copy server certs
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
|
||||
- { src: certs/nomad/server.pem, dest: "{{ nomad_opt_dir_location }}/certs/server.pem" }
|
||||
- { src: certs/nomad/server-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/server-key.pem" }
|
||||
notify: "restart nomad"
|
||||
when: is_nomad_server
|
||||
|
||||
- name: Copy client certs
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
|
||||
- { src: certs/nomad/client.pem, dest: "{{ nomad_opt_dir_location }}/certs/client.pem" }
|
||||
- { src: certs/nomad/client-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/client-key.pem" }
|
||||
notify: "restart nomad"
|
||||
when: is_nomad_client
|
||||
- name: Copy client certs
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
|
||||
- { src: certs/nomad/client.pem, dest: "{{ nomad_opt_dir_location }}/certs/client.pem" }
|
||||
- { src: certs/nomad/client-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/client-key.pem" }
|
||||
notify: "restart nomad"
|
||||
when: is_nomad_client
|
||||
|
||||
- name: "set owner of files to nomad:nomad (debian)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_opt_dir_location }}"
|
||||
owner: nomad
|
||||
group: nomad
|
||||
recurse: true
|
||||
when: ansible_os_family == 'Debian'
|
||||
- name: "Set owner of files to nomad:nomad (debian)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_opt_dir_location }}"
|
||||
owner: nomad
|
||||
group: nomad
|
||||
recurse: true
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: "set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }} (MacOSX)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_opt_dir_location }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when: ansible_os_family != 'Debian'
|
||||
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }} (MacOSX)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_opt_dir_location }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when: ansible_os_family != 'Debian'
|
||||
|
||||
- name: "Template out the configuration file"
|
||||
block:
|
||||
- name: "create {{ nomad_configuration_dir }}"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_configuration_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
- name: "Create {{ nomad_configuration_dir }}"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_configuration_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: copy base config file
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: nomad.hcl.j2
|
||||
dest: "{{ nomad_configuration_dir }}/nomad.hcl"
|
||||
mode: 0644
|
||||
notify: "restart nomad"
|
||||
- name: Copy base config file
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: nomad.hcl.j2
|
||||
dest: "{{ nomad_configuration_dir }}/nomad.hcl"
|
||||
mode: 0644
|
||||
notify: "restart nomad"
|
||||
|
||||
- name: "set owner of files to nomad:nomad (Debian)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_configuration_dir }}"
|
||||
owner: nomad
|
||||
group: nomad
|
||||
recurse: true
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: "Set owner of files to nomad:nomad (Debian)"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_configuration_dir }}"
|
||||
owner: nomad
|
||||
group: nomad
|
||||
recurse: true
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: Install or Update Nomad
|
||||
block:
|
||||
- name: "set fact: do we need a nomad install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_nomad_install: false
|
||||
- name: "Set fact: do we need a nomad install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_nomad_install: false
|
||||
|
||||
- name: Check if nomad is installed
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/nomad
|
||||
register: nomad_binary_file_location
|
||||
- name: Check if nomad is installed
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/nomad
|
||||
register: nomad_binary_file_location
|
||||
|
||||
- name: "set fact: do we need a nomad install"
|
||||
ansible.builtin.set_fact:
|
||||
need_nomad_install: true
|
||||
when:
|
||||
- not nomad_binary_file_location.stat.exists
|
||||
- name: "Set fact: do we need a nomad install"
|
||||
ansible.builtin.set_fact:
|
||||
need_nomad_install: true
|
||||
when:
|
||||
- not nomad_binary_file_location.stat.exists
|
||||
|
||||
- name: Check current version of Nomad
|
||||
ansible.builtin.shell: /usr/local/bin/nomad --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
|
||||
ignore_errors: true
|
||||
register: current_nomad_version
|
||||
check_mode: false
|
||||
changed_when: false
|
||||
when:
|
||||
- not need_nomad_install
|
||||
- name: Check current version of Nomad
|
||||
ansible.builtin.shell: /usr/local/bin/nomad --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
|
||||
ignore_errors: true
|
||||
register: current_nomad_version
|
||||
check_mode: false
|
||||
changed_when: false
|
||||
when:
|
||||
- not need_nomad_install
|
||||
|
||||
- name: "set fact: do we need a nomad install"
|
||||
ansible.builtin.set_fact:
|
||||
need_nomad_install: true
|
||||
when:
|
||||
- not need_nomad_install
|
||||
- current_nomad_version.stdout is version(nomad_version, '<')
|
||||
- name: "Set fact: do we need a nomad install"
|
||||
ansible.builtin.set_fact:
|
||||
need_nomad_install: true
|
||||
when:
|
||||
- not need_nomad_install
|
||||
- current_nomad_version.stdout is version(nomad_version, '<')
|
||||
|
||||
- name: install Nomad
|
||||
become: true
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ nomad_download_file_uri }}"
|
||||
dest: /usr/local/bin
|
||||
remote_src: true
|
||||
notify: "restart nomad"
|
||||
when:
|
||||
- need_nomad_install
|
||||
- name: Install Nomad
|
||||
become: true
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ nomad_download_file_uri }}"
|
||||
dest: /usr/local/bin
|
||||
remote_src: true
|
||||
notify: "restart nomad"
|
||||
when:
|
||||
- need_nomad_install
|
||||
|
||||
- name: "Copy system.d or launchctrl service files"
|
||||
block:
|
||||
- name: ensure /Library/LaunchAgents exists (MacOSX)
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_plist_macos | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- name: Ensure /Library/LaunchAgents exists (MacOSX)
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_plist_macos | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
|
||||
- name: create nomad launchd service (MacOSX)
|
||||
ansible.builtin.template:
|
||||
src: nomad.launchd.j2
|
||||
dest: "{{ nomad_plist_macos }}"
|
||||
mode: 0644
|
||||
notify: "restart nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- name: Create nomad launchd service (MacOSX)
|
||||
ansible.builtin.template:
|
||||
src: nomad.launchd.j2
|
||||
dest: "{{ nomad_plist_macos }}"
|
||||
mode: 0644
|
||||
notify: "restart nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
|
||||
- name: create nomad service (Debian)
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: nomad.service.j2
|
||||
dest: /etc/systemd/system/nomad.service
|
||||
mode: 0644
|
||||
notify: "restart nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: Create nomad service (Debian)
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: nomad.service.j2
|
||||
dest: /etc/systemd/system/nomad.service
|
||||
mode: 0644
|
||||
notify: "restart nomad"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: "start nomad, if stopped"
|
||||
- name: "Start nomad, if stopped"
|
||||
ansible.builtin.shell:
|
||||
cmd: "/usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
|
||||
cmd: "/usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
|
||||
register: node_status_response
|
||||
ignore_errors: true
|
||||
failed_when: false
|
||||
|
||||
@@ -10,67 +10,79 @@
|
||||
|
||||
- name: "Sync Nomad Jobs"
|
||||
block:
|
||||
- name: Remove nomad jobs directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_jobfile_location }}"
|
||||
state: absent
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
|
||||
- clean_nomad_jobs
|
||||
- name: Remove nomad jobs directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_jobfile_location }}"
|
||||
state: absent
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
|
||||
- clean_nomad_jobs
|
||||
|
||||
- name: (Re)Create nomad jobs directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_jobfile_location }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
|
||||
- "'nas' not in group_names"
|
||||
- name: (Re)Create nomad jobs directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad_jobfile_location }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
|
||||
- "'nas' not in group_names"
|
||||
|
||||
- name: synchronize nomad job templates (jinja)
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ nomad_jobfile_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
|
||||
mode: 0644
|
||||
with_fileglob: "templates/nomad_jobs/*.j2"
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
|
||||
- "'nas' not in group_names"
|
||||
- name: Synchronize nomad job templates (jinja)
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ nomad_jobfile_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
|
||||
mode: 0644
|
||||
with_fileglob: "templates/nomad_jobs/*.j2"
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
|
||||
- "'nas' not in group_names"
|
||||
|
||||
- name: synchronize nomad job templates (hcl)
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ nomad_jobfile_location }}/{{ item | basename }}"
|
||||
mode: 0644
|
||||
with_fileglob: "templates/nomad_jobs/*.hcl"
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
|
||||
- "'nas' not in group_names"
|
||||
- name: Synchronize nomad job templates (hcl)
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ nomad_jobfile_location }}/{{ item | basename }}"
|
||||
mode: 0644
|
||||
with_fileglob: "templates/nomad_jobs/*.hcl"
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
|
||||
- "'nas' not in group_names"
|
||||
|
||||
- name: Ensure we have local storage folders
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_localfs_service_storage }}/{{ item }}"
|
||||
state: directory
|
||||
mode: 0777
|
||||
group: "{{ ansible_user_gid }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server
|
||||
loop: "{{ service_localfs_dirs }}"
|
||||
- name: Ensure we have local storage folders
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_localfs_service_storage }}/{{ item }}"
|
||||
state: directory
|
||||
mode: 0777
|
||||
group: "{{ ansible_user_gid }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
when:
|
||||
- is_nomad_client or is_nomad_server
|
||||
loop: "{{ service_localfs_dirs }}"
|
||||
|
||||
- name: "Sync docker compose files"
|
||||
- name: Sync docker compose files
|
||||
when: is_docker_compose_client
|
||||
block:
|
||||
- name: confirm compose file dir exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_compose_file_location }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
- name: Confirm compose file dir exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_compose_file_location }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: synchronize docker-compose files
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ docker_compose_file_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
|
||||
mode: 0644
|
||||
with_fileglob: "../templates/docker_compose_files/*.j2"
|
||||
- name: Synchronize docker-compose files
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ docker_compose_file_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
|
||||
mode: 0644
|
||||
with_fileglob: "../templates/docker_compose_files/*.j2"
|
||||
|
||||
- name: "Prune docker caches"
|
||||
community.docker.docker_prune:
|
||||
containers: true
|
||||
images: true
|
||||
images_filters:
|
||||
dangling: false
|
||||
networks: true
|
||||
volumes: true
|
||||
builder_cache: true
|
||||
when:
|
||||
- is_docker_compose_client or is_nomad_client or is_nomad_server
|
||||
|
||||
@@ -4,64 +4,64 @@
|
||||
|
||||
- name: "Update and install APT packages"
|
||||
when:
|
||||
- ansible_os_family != 'Darwin'
|
||||
- manage_apt_packages_list
|
||||
- ansible_os_family != 'Darwin'
|
||||
- manage_apt_packages_list
|
||||
block:
|
||||
- name: update APT package cache
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
- name: Update APT package cache
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
cache_valid_time: 3600
|
||||
|
||||
- name: "upgrade APT to the latest packages (this may take a while)"
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
upgrade: safe
|
||||
- name: "Upgrade APT to the latest packages (this may take a while)"
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
upgrade: safe
|
||||
|
||||
- name: "install/upgrade APT packages (this may take a while)"
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
pkg: "{{ item }}"
|
||||
state: present
|
||||
loop: "{{ apt_packages_list }}"
|
||||
register: apt_output
|
||||
- name: "Install/upgrade APT packages (this may take a while)"
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
pkg: "{{ item }}"
|
||||
state: present
|
||||
loop: "{{ apt_packages_list }}"
|
||||
register: apt_output
|
||||
|
||||
- name: "Update and install Homebrew packages"
|
||||
when:
|
||||
- manage_homebrew_package_list
|
||||
- ansible_os_family == 'Darwin'
|
||||
- manage_homebrew_package_list
|
||||
- ansible_os_family == 'Darwin'
|
||||
block:
|
||||
- name: upgrade homebrew and all packages
|
||||
community.general.homebrew:
|
||||
update_homebrew: true
|
||||
upgrade_all: true
|
||||
register: homebrew_output
|
||||
ignore_errors: true
|
||||
- name: Upgrade homebrew and all packages
|
||||
community.general.homebrew:
|
||||
update_homebrew: true
|
||||
upgrade_all: true
|
||||
register: homebrew_output
|
||||
ignore_errors: true
|
||||
|
||||
- name: install base homebrew packages
|
||||
community.general.homebrew:
|
||||
name: "{{ homebrew_package_list | join(',') }}"
|
||||
state: present
|
||||
update_homebrew: false
|
||||
upgrade_all: false
|
||||
register: homebrew_output
|
||||
- name: Install base homebrew packages
|
||||
community.general.homebrew:
|
||||
name: "{{ homebrew_package_list | join(',') }}"
|
||||
state: present
|
||||
update_homebrew: false
|
||||
upgrade_all: false
|
||||
register: homebrew_output
|
||||
|
||||
- name: homebrew packages updated or installed
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ homebrew_output.changed_pkgs }}"
|
||||
- name: Homebrew packages updated or installed
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ homebrew_output.changed_pkgs }}"
|
||||
|
||||
- name: unchanged homebrew packages
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ homebrew_output.unchanged_pkgs }}"
|
||||
- name: Unchanged homebrew packages
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ homebrew_output.unchanged_pkgs }}"
|
||||
|
||||
- name: install homebrew casks
|
||||
community.general.homebrew_cask:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
install_options: "appdir=/Applications"
|
||||
accept_external_apps: true
|
||||
upgrade_all: false
|
||||
update_homebrew: false
|
||||
greedy: false
|
||||
loop: "{{ homebrew_casks_list }}"
|
||||
ignore_errors: true
|
||||
- name: Install homebrew casks
|
||||
community.general.homebrew_cask:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
install_options: "appdir=/Applications"
|
||||
accept_external_apps: true
|
||||
upgrade_all: false
|
||||
update_homebrew: false
|
||||
greedy: false
|
||||
loop: "{{ homebrew_casks_list }}"
|
||||
ignore_errors: true
|
||||
|
||||
@@ -5,36 +5,37 @@
|
||||
|
||||
- name: "Check if pull_all_repos exists"
|
||||
ansible.builtin.stat:
|
||||
path: "~/bin/pull_all_repos"
|
||||
path: "~/bin/pull_all_repos"
|
||||
check_mode: false
|
||||
register: pull_script_check
|
||||
|
||||
- name: "Check if ~/repos exists"
|
||||
ansible.builtin.stat:
|
||||
path: "~/repos"
|
||||
path: "~/repos"
|
||||
check_mode: false
|
||||
register: repos_directory_check
|
||||
|
||||
- name: "run pull_all_repos script"
|
||||
- name: "Run pull_all_repos script"
|
||||
ansible.builtin.command:
|
||||
cmd: "~/bin/pull_all_repos --directory ~/repos"
|
||||
cmd: "~/bin/pull_all_repos --directory ~/repos"
|
||||
register: pull_script_output
|
||||
when:
|
||||
- not ansible_check_mode
|
||||
- pull_script_check.stat.exists
|
||||
- pull_script_check.stat.executable
|
||||
- repos_directory_check.stat.isdir is defined
|
||||
- repos_directory_check.stat.isdir
|
||||
- repos_directory_check.stat.writeable
|
||||
- not ansible_check_mode
|
||||
- pull_script_check.stat.exists
|
||||
- pull_script_check.stat.executable
|
||||
- repos_directory_check.stat.isdir is defined
|
||||
- repos_directory_check.stat.isdir
|
||||
- repos_directory_check.stat.writeable
|
||||
failed_when: pull_script_output.rc > 1
|
||||
changed_when: pull_script_output.rc == 0
|
||||
|
||||
- name: "Output from pull_all_repos"
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ pull_script_output.stdout }}"
|
||||
msg: "{{ pull_script_output.stdout }}"
|
||||
when:
|
||||
- not ansible_check_mode
|
||||
- pull_script_check.stat.exists
|
||||
- pull_script_check.stat.executable
|
||||
- repos_directory_check.stat.isdir is defined
|
||||
- repos_directory_check.stat.isdir
|
||||
- repos_directory_check.stat.writeable
|
||||
- not ansible_check_mode
|
||||
- pull_script_check.stat.exists
|
||||
- pull_script_check.stat.executable
|
||||
- repos_directory_check.stat.isdir is defined
|
||||
- repos_directory_check.stat.isdir
|
||||
- repos_directory_check.stat.writeable
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
---
|
||||
# TASK DESCRIPTION:
|
||||
# Always runs fist. Confirms we can actually use Ansible
|
||||
- name: sanity - user mode
|
||||
- name: Sanity - user mode
|
||||
become: false
|
||||
ansible.builtin.debug:
|
||||
msg: "sanity check: user mode"
|
||||
msg: "Sanity check: user mode"
|
||||
|
||||
- name: sanity - become mode
|
||||
- name: Sanity - become mode
|
||||
become: true
|
||||
ansible.builtin.debug:
|
||||
msg: "sanity check: become mode"
|
||||
msg: "Sanity check: become mode"
|
||||
|
||||
@@ -4,90 +4,92 @@
|
||||
#
|
||||
# NOTE: This is depreciated, I no longer use Prometheus and have migrated to Telegraf
|
||||
|
||||
- name: populate service facts
|
||||
- name: Populate service facts
|
||||
ansible.builtin.service_facts:
|
||||
|
||||
- name: stop node_exporter
|
||||
- name: Stop node_exporter
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: node_exporter
|
||||
state: stopped
|
||||
name: node_exporter
|
||||
state: stopped
|
||||
when: ansible_facts.services["node_exporter.service"] is defined
|
||||
|
||||
- name: Ensure group "prometheus" exists
|
||||
become: true
|
||||
ansible.builtin.group:
|
||||
name: prometheus
|
||||
state: present
|
||||
name: prometheus
|
||||
state: present
|
||||
|
||||
- name: Add the user 'prometheus' with group 'prometheus'
|
||||
become: true
|
||||
ansible.builtin.user:
|
||||
name: prometheus
|
||||
group: prometheus
|
||||
groups: docker
|
||||
append: true
|
||||
name: prometheus
|
||||
group: prometheus
|
||||
groups: docker
|
||||
append: true
|
||||
|
||||
# --------------- Install or Update Prometheus
|
||||
- name: "set fact: need to install Prometheus?"
|
||||
- name: "Set fact: need to install Prometheus?"
|
||||
ansible.builtin.set_fact:
|
||||
need_prometheus_install: false
|
||||
need_prometheus_install: false
|
||||
|
||||
- name: Check if node_exporter is installed
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/node_exporter
|
||||
path: /usr/local/bin/node_exporter
|
||||
register: prometheus_binary_file_location
|
||||
|
||||
- name: "set fact: need to install Prometheus?"
|
||||
- name: "Set fact: need to install Prometheus?"
|
||||
ansible.builtin.set_fact:
|
||||
need_prometheus_install: true
|
||||
need_prometheus_install: true
|
||||
when:
|
||||
- not prometheus_binary_file_location.stat.exists
|
||||
- not prometheus_binary_file_location.stat.exists
|
||||
|
||||
- name: Check current version of Prometheus
|
||||
ansible.builtin.shell: /usr/local/bin/node_exporter --version 3>&1 1>&2 2>&3 | head -n1 | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
|
||||
ignore_errors: true
|
||||
register: current_prometheus_version
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
when:
|
||||
- need_prometheus_install is false
|
||||
- need_prometheus_install is false
|
||||
|
||||
- name: "set fact: need to install Prometheus?"
|
||||
- name: "Set fact: need to install Prometheus?"
|
||||
ansible.builtin.set_fact:
|
||||
need_prometheus_install: true
|
||||
need_prometheus_install: true
|
||||
when:
|
||||
- need_prometheus_install is false
|
||||
- current_prometheus_version.stdout != prometheus_verssion
|
||||
- need_prometheus_install is false
|
||||
- current_prometheus_version.stdout != prometheus_verssion
|
||||
|
||||
- name: install node_exporter
|
||||
- name: Install node_exporter
|
||||
become: true
|
||||
ansible.builtin.unarchive:
|
||||
src: "https://github.com/prometheus/node_exporter/releases/download/v{{ prometheus_verssion }}/node_exporter-{{ prometheus_verssion }}.linux-armv7.tar.gz"
|
||||
dest: /usr/local/bin
|
||||
group: prometheus
|
||||
owner: prometheus
|
||||
# reference for extra_opts: https://github.com/ansible/ansible/issues/27081
|
||||
extra_opts:
|
||||
- --strip=1
|
||||
- --no-anchored
|
||||
- "node_exporter"
|
||||
remote_src: true
|
||||
src: "https://github.com/prometheus/node_exporter/releases/download/v{{ prometheus_verssion }}/node_exporter-{{ prometheus_verssion }}.linux-armv7.tar.gz"
|
||||
dest: /usr/local/bin
|
||||
group: prometheus
|
||||
owner: prometheus
|
||||
# reference for extra_opts: https://github.com/ansible/ansible/issues/27081
|
||||
extra_opts:
|
||||
- --strip=1
|
||||
- --no-anchored
|
||||
- "node_exporter"
|
||||
remote_src: true
|
||||
when:
|
||||
- need_prometheus_install is true
|
||||
- need_prometheus_install is true
|
||||
|
||||
- name: create node_exporter service
|
||||
- name: Create node_exporter service
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: node_exporter.service.j2
|
||||
dest: /etc/systemd/system/node_exporter.service
|
||||
mode: 0644
|
||||
src: node_exporter.service.j2
|
||||
dest: /etc/systemd/system/node_exporter.service
|
||||
mode: 0644
|
||||
|
||||
- name: start node_exporter
|
||||
- name: Start node_exporter
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: node_exporter
|
||||
daemon_reload: true
|
||||
enabled: true
|
||||
state: started
|
||||
name: node_exporter
|
||||
daemon_reload: true
|
||||
enabled: true
|
||||
state: started
|
||||
when:
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
- "'nostart' not in ansible_run_tags"
|
||||
|
||||
305
tasks/tdarr.yml
305
tasks/tdarr.yml
@@ -4,186 +4,187 @@
|
||||
|
||||
- name: "Set variables"
|
||||
block:
|
||||
- name: "Set tdarr local filesystem location (pis)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_tdarr_dir: "{{ rpi1_tdarr_file_location }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- "'pis' in group_names"
|
||||
- name: "Set tdarr local filesystem location (pis)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_tdarr_dir: "{{ rpi1_tdarr_file_location }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- "'pis' in group_names"
|
||||
|
||||
- name: "Set tdarr local filesystem location (macs)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_tdarr_dir: "{{ mac_tdarr_file_location }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- "'macs' in group_names"
|
||||
- name: "Set tdarr local filesystem location (macs)"
|
||||
ansible.builtin.set_fact:
|
||||
interpolated_tdarr_dir: "{{ mac_tdarr_file_location }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- "'macs' in group_names"
|
||||
|
||||
- name: "set variable: Set tdarr download Binary (armv7l)"
|
||||
ansible.builtin.set_fact:
|
||||
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/linux_arm/Tdarr_Updater.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
- name: "Set variable: Set tdarr download Binary (armv7l)"
|
||||
ansible.builtin.set_fact:
|
||||
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/linux_arm/Tdarr_Updater.zip"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
|
||||
- name: "set variable: Set tdarr download Binary (MacOSX) - Intel"
|
||||
ansible.builtin.set_fact:
|
||||
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_x64/Tdarr_Updater.zip"
|
||||
when:
|
||||
- mac_intel
|
||||
- name: "Set variable: Set tdarr download Binary (MacOSX) - Intel"
|
||||
ansible.builtin.set_fact:
|
||||
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_x64/Tdarr_Updater.zip"
|
||||
when:
|
||||
- mac_intel
|
||||
|
||||
- name: "set variable: Set tdarr download Binary (MacOSX) - ARM"
|
||||
ansible.builtin.set_fact:
|
||||
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_arm64/Tdarr_Updater.zip"
|
||||
when:
|
||||
- mac_arm
|
||||
- name: "Set variable: Set tdarr download Binary (MacOSX) - ARM"
|
||||
ansible.builtin.set_fact:
|
||||
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_arm64/Tdarr_Updater.zip"
|
||||
when:
|
||||
- mac_arm
|
||||
|
||||
- name: "set fact: do we need a tdarr install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_tdarr_install: false
|
||||
- name: "Set fact: do we need a tdarr install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_tdarr_install: false
|
||||
|
||||
- name: Assert that we can install Tdarr
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- tdarr_download_uri is defined
|
||||
- interpolated_tdarr_dir is defined
|
||||
fail_msg: "Unable to install Tdarr on this host"
|
||||
- name: Assert that we can install Tdarr
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- tdarr_download_uri is defined
|
||||
- interpolated_tdarr_dir is defined
|
||||
fail_msg: "Unable to install Tdarr on this host"
|
||||
|
||||
- name: "Install ffmpeg and HandbrakeCLI"
|
||||
block:
|
||||
- name: "ensure ffmpeg and handbrake are installed (Debian)"
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
pkg: "{{ item }}"
|
||||
state: present
|
||||
loop:
|
||||
- ffmpeg
|
||||
- handbrake
|
||||
when: "'pis' in group_names"
|
||||
- name: "Ensure ffmpeg and handbrake are installed (Debian)"
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
pkg: "{{ item }}"
|
||||
state: present
|
||||
loop:
|
||||
- ffmpeg
|
||||
- handbrake
|
||||
when: "'pis' in group_names"
|
||||
|
||||
- name: "ensure ffmpeg and handbrake are installed (MacOS)"
|
||||
community.general.homebrew:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
update_homebrew: false
|
||||
upgrade_all: false
|
||||
loop:
|
||||
- ffmpeg
|
||||
- handbrake
|
||||
when: "'macs' in group_names"
|
||||
- name: "Ensure ffmpeg and handbrake are installed (MacOS)"
|
||||
community.general.homebrew:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
update_homebrew: false
|
||||
upgrade_all: false
|
||||
loop:
|
||||
- ffmpeg
|
||||
- handbrake
|
||||
when: "'macs' in group_names"
|
||||
|
||||
- name: "ensure tdarr directory exists"
|
||||
- name: "Ensure tdarr directory exists"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_tdarr_dir }}"
|
||||
mode: 0755
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
state: directory
|
||||
path: "{{ interpolated_tdarr_dir }}"
|
||||
mode: 0755
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
state: directory
|
||||
|
||||
- name: "Install tdarr"
|
||||
block:
|
||||
- name: "set_fact: need Tdarr install?"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ interpolated_tdarr_dir }}/configs"
|
||||
register: tdarr_exists
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
- name: "Set fact: need Tdarr install?"
|
||||
ansible.builtin.stat:
|
||||
path: "{{ interpolated_tdarr_dir }}/configs"
|
||||
register: tdarr_exists
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: "set fact: do we need a tdarr install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_tdarr_install: true
|
||||
when: not tdarr_exists.stat.exists
|
||||
- name: "Set fact: do we need a tdarr install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_tdarr_install: true
|
||||
when: not tdarr_exists.stat.exists
|
||||
|
||||
- name: Download tdarr
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ tdarr_download_uri }}"
|
||||
dest: "{{ interpolated_tdarr_dir }}"
|
||||
remote_src: true
|
||||
when: need_tdarr_install
|
||||
- name: Download tdarr
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ tdarr_download_uri }}"
|
||||
dest: "{{ interpolated_tdarr_dir }}"
|
||||
remote_src: true
|
||||
when: need_tdarr_install
|
||||
|
||||
- name: Did tdarr download?
|
||||
ansible.builtin.stat:
|
||||
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
|
||||
register: tdarr_installer_exists
|
||||
failed_when: not tdarr_installer_exists.stat.exists
|
||||
when: need_tdarr_install
|
||||
- name: Did tdarr download?
|
||||
ansible.builtin.stat:
|
||||
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
|
||||
register: tdarr_installer_exists
|
||||
failed_when: not tdarr_installer_exists.stat.exists
|
||||
when: need_tdarr_install
|
||||
|
||||
- name: Ensure correct permissions on Tdarr_Updater
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
|
||||
mode: 0755
|
||||
when: need_tdarr_install
|
||||
- name: Ensure correct permissions on Tdarr_Updater
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
|
||||
mode: 0755
|
||||
when: need_tdarr_install
|
||||
|
||||
- name: Install tdarr
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
|
||||
register: tdarr_install
|
||||
failed_when: tdarr_install.rc > 0
|
||||
when: need_tdarr_install
|
||||
- name: Install tdarr
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
|
||||
register: tdarr_install
|
||||
failed_when: tdarr_install.rc > 0
|
||||
changed_when: tdarr_install.rc == 0
|
||||
when: need_tdarr_install
|
||||
|
||||
- name: Ensure correct permissions on server/node executables
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_tdarr_dir }}/{{ item }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- Tdarr_Server/Tdarr_Server
|
||||
- Tdarr_Node/Tdarr_Node
|
||||
when: need_tdarr_install
|
||||
- name: Ensure correct permissions on server/node executables
|
||||
ansible.builtin.file:
|
||||
path: "{{ interpolated_tdarr_dir }}/{{ item }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- Tdarr_Server/Tdarr_Server
|
||||
- Tdarr_Node/Tdarr_Node
|
||||
when: need_tdarr_install
|
||||
|
||||
- name: "configure tdarr"
|
||||
- name: "Configure tdarr"
|
||||
block:
|
||||
- name: update server configuration file
|
||||
ansible.builtin.template:
|
||||
src: Tdarr_Server_Config.json.j2
|
||||
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Server_Config.json"
|
||||
mode: 0644
|
||||
when: is_tdarr_server
|
||||
- name: Update server configuration file
|
||||
ansible.builtin.template:
|
||||
src: Tdarr_Server_Config.json.j2
|
||||
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Server_Config.json"
|
||||
mode: 0644
|
||||
when: is_tdarr_server
|
||||
|
||||
- name: update node configuration file
|
||||
ansible.builtin.template:
|
||||
src: Tdarr_Node_Config.json.j2
|
||||
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Node_Config.json"
|
||||
mode: 0644
|
||||
when: is_tdarr_node
|
||||
- name: Update node configuration file
|
||||
ansible.builtin.template:
|
||||
src: Tdarr_Node_Config.json.j2
|
||||
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Node_Config.json"
|
||||
mode: 0644
|
||||
when: is_tdarr_node
|
||||
|
||||
- name: check if consul is installed?
|
||||
ansible.builtin.stat:
|
||||
path: "{{ interpolated_consul_configuration_dir }}"
|
||||
register: consul_installed
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when:
|
||||
- is_tdarr_server
|
||||
- name: Check if consul is installed?
|
||||
ansible.builtin.stat:
|
||||
path: "{{ interpolated_consul_configuration_dir }}"
|
||||
register: consul_installed
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when:
|
||||
- is_tdarr_server
|
||||
|
||||
- name: move consul service config into place
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: consul_services/tdarr_service.json.j2
|
||||
dest: "{{ interpolated_consul_configuration_dir }}/tdarr_service.json"
|
||||
mode: 0644
|
||||
when:
|
||||
- is_tdarr_server
|
||||
- consul_installed.stat.exists
|
||||
- name: Move consul service config into place
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: consul_services/tdarr_service.json.j2
|
||||
dest: "{{ interpolated_consul_configuration_dir }}/tdarr_service.json"
|
||||
mode: 0644
|
||||
when:
|
||||
- is_tdarr_server
|
||||
- consul_installed.stat.exists
|
||||
|
||||
- name: Reload consul agent
|
||||
ansible.builtin.uri:
|
||||
url: "http://{{ ansible_host }}:8500/v1/agent/reload"
|
||||
method: PUT
|
||||
status_code: 200
|
||||
ignore_errors: true
|
||||
register: consul_agent_reload_http_response
|
||||
failed_when: consul_agent_reload_http_response.status != 200
|
||||
when:
|
||||
- is_tdarr_server
|
||||
- consul_installed.stat.exists
|
||||
- name: Reload consul agent
|
||||
ansible.builtin.uri:
|
||||
url: "http://{{ ansible_host }}:8500/v1/agent/reload"
|
||||
method: PUT
|
||||
status_code: 200
|
||||
ignore_errors: true
|
||||
register: consul_agent_reload_http_response
|
||||
failed_when: consul_agent_reload_http_response.status != 200
|
||||
when:
|
||||
- is_tdarr_server
|
||||
- consul_installed.stat.exists
|
||||
|
||||
- name: debug when consul agent reload fails
|
||||
ansible.builtin.debug:
|
||||
var: consul_agent_reload_http_response.msg
|
||||
when:
|
||||
- is_tdarr_server
|
||||
- consul_installed.stat.exists
|
||||
- consul_agent_reload_http_response.status != 200
|
||||
- name: Debug when consul agent reload fails
|
||||
ansible.builtin.debug:
|
||||
var: consul_agent_reload_http_response.msg
|
||||
when:
|
||||
- is_tdarr_server
|
||||
- consul_installed.stat.exists
|
||||
- consul_agent_reload_http_response.status != 200
|
||||
|
||||
- name: mount shared storage
|
||||
- name: Mount shared storage
|
||||
ansible.builtin.import_tasks: cluster_storage.yml
|
||||
|
||||
@@ -5,146 +5,146 @@
|
||||
# --------------------------------- Set variables depending on system type
|
||||
- name: "Configure variables"
|
||||
block:
|
||||
- name: "set variable: telegraph_binary_location (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_binary_location: "/usr/bin/telegraf"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: "Set variable: telegraph_binary_location (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_binary_location: "/usr/bin/telegraf"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: "set variable: telegraph_binary_location (MacOS)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_binary_location: "/usr/local/bin/telegraf"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- name: "Set variable: telegraph_binary_location (MacOS)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_binary_location: "/usr/local/bin/telegraf"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
|
||||
- name: "set fact: telegraph_config_location (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_config_location: "/etc/telegraf"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: "Set fact: telegraph_config_location (Debian)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_config_location: "/etc/telegraf"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: "set fact: telegraph_config_location (macOS)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_config_location: "/usr/local/etc"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- name: "Set fact: telegraph_config_location (macOS)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_config_location: "/usr/local/etc"
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
|
||||
- name: "set fact: telegraph_config_location (macOS)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_config_location: "/volume1/docker/telegraf/config"
|
||||
when:
|
||||
- inventory_hostname == 'synology'
|
||||
- name: "Set fact: telegraph_config_location (macOS)"
|
||||
ansible.builtin.set_fact:
|
||||
telegraph_config_location: "/volume1/docker/telegraf/config"
|
||||
when:
|
||||
- inventory_hostname == 'synology'
|
||||
|
||||
- name: "Fail if arm Mac (need to update task) or variables not defined"
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- telegraph_binary_location is defined
|
||||
- telegraph_config_location is defined
|
||||
- not mac_arm
|
||||
fail_msg: "Unable to install Telegraf on this host"
|
||||
- name: "Fail if arm Mac (need to update task) or variables not defined"
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- telegraph_binary_location is defined
|
||||
- telegraph_config_location is defined
|
||||
- not mac_arm
|
||||
fail_msg: "Unable to install Telegraf on this host"
|
||||
|
||||
- name: "set variable: Set speedtest download Binary (armv7l)"
|
||||
ansible.builtin.set_fact:
|
||||
speedtest_download_file_uri: "https://install.speedtest.net/app/cli/ookla-speedtest-{{ speedtest_cli_version }}-linux-armhf.tgz"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
- name: "Set variable: Set speedtest download Binary (armv7l)"
|
||||
ansible.builtin.set_fact:
|
||||
speedtest_download_file_uri: "https://install.speedtest.net/app/cli/ookla-speedtest-{{ speedtest_cli_version }}-linux-armhf.tgz"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
|
||||
- name: "set variable: Set speedtest download Binary (aarch64)"
|
||||
ansible.builtin.set_fact:
|
||||
speedtest_download_file_uri: "https://install.speedtest.net/app/cli/ookla-speedtest-{{ speedtest_cli_version }}-linux-aarch64.tgz"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'aarch64'
|
||||
- name: "Set variable: Set speedtest download Binary (aarch64)"
|
||||
ansible.builtin.set_fact:
|
||||
speedtest_download_file_uri: "https://install.speedtest.net/app/cli/ookla-speedtest-{{ speedtest_cli_version }}-linux-aarch64.tgz"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'aarch64'
|
||||
|
||||
- name: "Install/upgrade Telegraf"
|
||||
block:
|
||||
- name: "set fact: Need telegraf install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_telegraf_install: false
|
||||
when: telegraph_binary_location is defined
|
||||
- name: "Set fact: Need telegraf install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_telegraf_install: false
|
||||
when: telegraph_binary_location is defined
|
||||
|
||||
- name: Check if telegraf is installed
|
||||
ansible.builtin.stat:
|
||||
path: "{{ telegraph_binary_location }}"
|
||||
check_mode: false
|
||||
register: telegraf_binary_exists
|
||||
when: telegraph_binary_location is defined
|
||||
- name: Check if telegraf is installed
|
||||
ansible.builtin.stat:
|
||||
path: "{{ telegraph_binary_location }}"
|
||||
check_mode: false
|
||||
register: telegraf_binary_exists
|
||||
when: telegraph_binary_location is defined
|
||||
|
||||
- name: "set fact: Need telegraf install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_telegraf_install: true
|
||||
check_mode: false
|
||||
when:
|
||||
- telegraph_binary_location is defined
|
||||
- not telegraf_binary_exists.stat.exists
|
||||
- name: "Set fact: Need telegraf install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_telegraf_install: true
|
||||
check_mode: false
|
||||
when:
|
||||
- telegraph_binary_location is defined
|
||||
- not telegraf_binary_exists.stat.exists
|
||||
|
||||
- name: Check current version of telegraf
|
||||
ansible.builtin.shell: "{{ telegraph_binary_location }} --version | grep -oE '[0-9]+\\.[0-9]+\\.[0-9]+'"
|
||||
ignore_errors: true
|
||||
register: current_telegraf_version
|
||||
check_mode: false
|
||||
changed_when: false
|
||||
when:
|
||||
- not need_telegraf_install
|
||||
- telegraph_binary_location is defined
|
||||
- name: Check current version of telegraf
|
||||
ansible.builtin.shell: "{{ telegraph_binary_location }} --version | grep -oE '[0-9]+\\.[0-9]+\\.[0-9]+'"
|
||||
ignore_errors: true
|
||||
register: current_telegraf_version
|
||||
check_mode: false
|
||||
changed_when: false
|
||||
when:
|
||||
- not need_telegraf_install
|
||||
- telegraph_binary_location is defined
|
||||
|
||||
- name: "set fact: Need telegraf install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_telegraf_install: true
|
||||
when:
|
||||
- telegraph_binary_location is defined
|
||||
- not need_telegraf_install
|
||||
- current_telegraf_version.stdout is version(telegraf_version, '<')
|
||||
- name: "Set fact: Need telegraf install?"
|
||||
ansible.builtin.set_fact:
|
||||
need_telegraf_install: true
|
||||
when:
|
||||
- telegraph_binary_location is defined
|
||||
- not need_telegraf_install
|
||||
- current_telegraf_version.stdout is version(telegraf_version, '<')
|
||||
|
||||
- name: install telegraf (MacOS)
|
||||
community.general.homebrew:
|
||||
name: telegraf
|
||||
state: present
|
||||
notify: restart_telegraf
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- need_telegraf_install
|
||||
- name: Install telegraf (MacOS)
|
||||
community.general.homebrew:
|
||||
name: telegraf
|
||||
state: present
|
||||
notify: restart_telegraf
|
||||
when:
|
||||
- ansible_os_family == 'Darwin'
|
||||
- need_telegraf_install
|
||||
|
||||
- name: install base apt-transport (Debian)
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
pkg: apt-transport-https
|
||||
state: present
|
||||
update_cache: true
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- need_telegraf_install
|
||||
- name: Install base apt-transport (Debian)
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
pkg: apt-transport-https
|
||||
state: present
|
||||
update_cache: true
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- need_telegraf_install
|
||||
|
||||
- name: Download telegraf GPG key (Debian)
|
||||
become: true
|
||||
ansible.builtin.apt_key:
|
||||
state: present
|
||||
url: "https://repos.influxdata.com/influxdb.key"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- need_telegraf_install
|
||||
- name: Download telegraf GPG key (Debian)
|
||||
become: true
|
||||
ansible.builtin.apt_key:
|
||||
state: present
|
||||
url: "https://repos.influxdata.com/influxdb.key"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- need_telegraf_install
|
||||
|
||||
- name: Add telegraf repository to apt (Debian)
|
||||
become: true
|
||||
ansible.builtin.apt_repository:
|
||||
repo: deb https://repos.influxdata.com/debian bullseye stable
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- need_telegraf_install
|
||||
- name: Add telegraf repository to apt (Debian)
|
||||
become: true
|
||||
ansible.builtin.apt_repository:
|
||||
repo: deb https://repos.influxdata.com/debian bullseye stable
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- need_telegraf_install
|
||||
|
||||
- name: install telegraf (Debian)
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
pkg: telegraf
|
||||
state: latest
|
||||
update_cache: true
|
||||
only_upgrade: true
|
||||
notify: restart_telegraf
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- need_telegraf_install
|
||||
- name: Install telegraf (Debian)
|
||||
become: true
|
||||
ansible.builtin.apt:
|
||||
pkg: telegraf
|
||||
state: latest
|
||||
update_cache: true
|
||||
only_upgrade: true
|
||||
notify: restart_telegraf
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- need_telegraf_install
|
||||
|
||||
# - name: give telegraf access to docker
|
||||
# become: true
|
||||
@@ -162,115 +162,115 @@
|
||||
- name: "Install speedtest"
|
||||
when: "'pis' in group_names"
|
||||
block:
|
||||
- name: "set fact: do we need speedtest installed?"
|
||||
ansible.builtin.set_fact:
|
||||
need_speedtest_install: false
|
||||
- name: "Set fact: do we need speedtest installed?"
|
||||
ansible.builtin.set_fact:
|
||||
need_speedtest_install: false
|
||||
|
||||
- name: Check if speedtest is installed
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/speedtest
|
||||
register: speedtest_binary_file_location
|
||||
- name: Check if speedtest is installed
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/speedtest
|
||||
register: speedtest_binary_file_location
|
||||
|
||||
- name: "set fact: do we need a speedtest install"
|
||||
ansible.builtin.set_fact:
|
||||
need_speedtest_install: true
|
||||
when:
|
||||
- not speedtest_binary_file_location.stat.exists
|
||||
- name: "Set fact: do we need a speedtest install"
|
||||
ansible.builtin.set_fact:
|
||||
need_speedtest_install: true
|
||||
when:
|
||||
- not speedtest_binary_file_location.stat.exists
|
||||
|
||||
- name: Check current version of speedtest
|
||||
ansible.builtin.shell: /usr/local/bin/speedtest --version | head -n1 | awk '{print $4}' | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
|
||||
ignore_errors: true
|
||||
register: current_speedtest_version
|
||||
check_mode: false
|
||||
changed_when: false
|
||||
when:
|
||||
- not need_speedtest_install
|
||||
- name: Check current version of speedtest
|
||||
ansible.builtin.shell: /usr/local/bin/speedtest --version | head -n1 | awk '{print $4}' | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
|
||||
ignore_errors: true
|
||||
register: current_speedtest_version
|
||||
check_mode: false
|
||||
changed_when: false
|
||||
when:
|
||||
- not need_speedtest_install
|
||||
|
||||
- name: "set fact: do we need a speedtest install"
|
||||
ansible.builtin.set_fact:
|
||||
need_speedtest_install: true
|
||||
when:
|
||||
- not need_speedtest_install
|
||||
- current_speedtest_version.stdout is version(speedtest_cli_version, '<')
|
||||
- name: "Set fact: do we need a speedtest install"
|
||||
ansible.builtin.set_fact:
|
||||
need_speedtest_install: true
|
||||
when:
|
||||
- not need_speedtest_install
|
||||
- current_speedtest_version.stdout is version(speedtest_cli_version, '<')
|
||||
|
||||
- name: "Install speedtest (pi)"
|
||||
become: true
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ speedtest_download_file_uri }}"
|
||||
dest: /usr/local/bin
|
||||
remote_src: true
|
||||
when:
|
||||
- need_speedtest_install
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
- name: "Install speedtest (pi)"
|
||||
become: true
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ speedtest_download_file_uri }}"
|
||||
dest: /usr/local/bin
|
||||
remote_src: true
|
||||
when:
|
||||
- need_speedtest_install
|
||||
- ansible_os_family == 'Debian'
|
||||
- ansible_architecture == 'armv7l'
|
||||
|
||||
- name: "Configure Telegraf"
|
||||
block:
|
||||
- name: "Ensure {{ telegraph_config_location }} exists"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
loop:
|
||||
- "{{ telegraph_config_location }}"
|
||||
- "{{ telegraph_config_location }}/telegraf.d"
|
||||
- name: "Ensure {{ telegraph_config_location }} exists"
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
loop:
|
||||
- "{{ telegraph_config_location }}"
|
||||
- "{{ telegraph_config_location }}/telegraf.d"
|
||||
|
||||
- name: template config files to server
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "644"
|
||||
loop:
|
||||
- { src: "telegraf/base_config.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.conf" }
|
||||
- { src: "telegraf/custom_metrics.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/custom_metrics.conf" }
|
||||
- { src: "telegraf/nomad.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/nomad.conf" }
|
||||
- { src: "telegraf/docker.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/docker.conf" }
|
||||
notify: restart_telegraf
|
||||
- name: Template config files to server
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "644"
|
||||
loop:
|
||||
- { src: "telegraf/base_config.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.conf" }
|
||||
- { src: "telegraf/custom_metrics.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/custom_metrics.conf" }
|
||||
- { src: "telegraf/nomad.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/nomad.conf" }
|
||||
- { src: "telegraf/docker.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/docker.conf" }
|
||||
notify: restart_telegraf
|
||||
|
||||
- name: template leader configs (ie, configs that should be placed on a single server)
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "644"
|
||||
loop:
|
||||
- { src: "telegraf/leader.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/leader.conf" }
|
||||
- { src: "telegraf/speedtest.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/speedtest.conf" }
|
||||
- { src: "telegraf/pingHosts.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/pingHosts.conf" }
|
||||
when:
|
||||
- is_cluster_leader
|
||||
notify: restart_telegraf
|
||||
- name: Template leader configs (ie, configs that should be placed on a single server)
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "644"
|
||||
loop:
|
||||
- { src: "telegraf/leader.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/leader.conf" }
|
||||
- { src: "telegraf/speedtest.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/speedtest.conf" }
|
||||
- { src: "telegraf/pingHosts.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/pingHosts.conf" }
|
||||
when:
|
||||
- is_cluster_leader
|
||||
notify: restart_telegraf
|
||||
|
||||
- name: Copy custom metrics script
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "scripts/telegraf_custom_metrics.sh.j2"
|
||||
dest: "/usr/local/bin/telegraf_custom_metrics.sh"
|
||||
mode: 0755
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
when:
|
||||
- inventory_hostname != 'synology'
|
||||
- name: Copy custom metrics script
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "scripts/telegraf_custom_metrics.sh.j2"
|
||||
dest: "/usr/local/bin/telegraf_custom_metrics.sh"
|
||||
mode: 0755
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
when:
|
||||
- inventory_hostname != 'synology'
|
||||
|
||||
- name: Copy speedtest script
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "scripts/telegraf_speedtest.sh.j2"
|
||||
dest: "/usr/local/bin/telegraf_speedtest.sh"
|
||||
mode: 0755
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
when:
|
||||
- is_cluster_leader
|
||||
- name: Copy speedtest script
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: "scripts/telegraf_speedtest.sh.j2"
|
||||
dest: "/usr/local/bin/telegraf_speedtest.sh"
|
||||
mode: 0755
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
when:
|
||||
- is_cluster_leader
|
||||
|
||||
- name: Reset file ownership
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ telegraph_config_location }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when:
|
||||
- (ansible_os_family == 'Darwin') or (inventory_hostname == 'synology')
|
||||
- name: Reset file ownership
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ telegraph_config_location }}"
|
||||
owner: "{{ ansible_user_uid }}"
|
||||
group: "{{ ansible_user_gid }}"
|
||||
recurse: true
|
||||
when:
|
||||
- (ansible_os_family == 'Darwin') or (inventory_hostname == 'synology')
|
||||
|
||||
@@ -5,11 +5,11 @@
|
||||
{% if is_consul_server %}
|
||||
"server" = true
|
||||
"ui_config" = {
|
||||
"enabled" = true
|
||||
"enabled" = true
|
||||
}
|
||||
{% else %}
|
||||
"ui_config" = {
|
||||
"enabled" = false
|
||||
"enabled" = false
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
@@ -28,15 +28,15 @@
|
||||
|
||||
# ----------------------------------------- Networking
|
||||
"addresses" = {
|
||||
"dns" = "0.0.0.0"
|
||||
"grpc" = "0.0.0.0"
|
||||
"http" = "0.0.0.0"
|
||||
"https" = "0.0.0.0"
|
||||
"dns" = "0.0.0.0"
|
||||
"grpc" = "0.0.0.0"
|
||||
"http" = "0.0.0.0"
|
||||
"https" = "0.0.0.0"
|
||||
}
|
||||
"ports" = {
|
||||
"dns" = 8600
|
||||
"http" = 8500
|
||||
"server" = 8300
|
||||
"dns" = 8600
|
||||
"http" = 8500
|
||||
"server" = 8300
|
||||
}
|
||||
|
||||
{% if 'linode' in group_names %}
|
||||
@@ -57,7 +57,7 @@
|
||||
{% if 'linode' in group_names %}
|
||||
"retry_join" = [{% for h in groups['linode-cluster'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].linode_private_ip }}"{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
{% else %}
|
||||
"retry_join" = [{% for h in groups['lan'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
"retry_join" = ["{{ rpi1_ip_address }}", "{{ rpi2_ip_address }}", "{{ rpi3_ip_address }}"]
|
||||
{% if is_consul_server %}
|
||||
{% if 'linode' in group_names %}
|
||||
"join_wan" = [{% for h in groups['linode-cluster'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
@@ -81,7 +81,7 @@
|
||||
"key_file" = "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0-key.pem"
|
||||
{% endif %}
|
||||
"auto_encrypt" = {
|
||||
"allow_tls" = true
|
||||
"allow_tls" = true
|
||||
}
|
||||
{% else %} {# Consul Clients #}
|
||||
"verify_incoming" = false
|
||||
@@ -93,14 +93,14 @@
|
||||
"ca_file" = "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
|
||||
{% endif %}
|
||||
"auto_encrypt" = {
|
||||
"tls" = true
|
||||
"tls" = true
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
"acl" = {
|
||||
enabled = false
|
||||
default_policy = "allow"
|
||||
enable_token_persistence = true
|
||||
default_policy = "allow"
|
||||
enable_token_persistence = true
|
||||
enabled = false
|
||||
}
|
||||
|
||||
# ----------------------------------------- Cluster Operations
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[Unit]
|
||||
Description="HashiCorp Consul - A service mesh solution"
|
||||
Description="hashiCorp Consul - A service mesh solution"
|
||||
Documentation=https://www.consul.io/
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
@@ -9,8 +9,7 @@
|
||||
"traefik.http.routers.sabnzbd.entryPoints=web,websecure",
|
||||
"traefik.http.routers.sabnzbd.service=sabnzbd",
|
||||
"traefik.http.routers.sabnzbd.tls=true",
|
||||
"traefik.http.routers.sabnzbd.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.sabnzbd.middlewares=authelia@file"
|
||||
"traefik.http.routers.sabnzbd.tls.certresolver=cloudflare"
|
||||
],
|
||||
"checks": [{
|
||||
"id": "sabnzbd-http-check",
|
||||
@@ -21,6 +20,27 @@
|
||||
"failures_before_critical": 3
|
||||
}]
|
||||
},
|
||||
{
|
||||
"name": "jellyfin",
|
||||
"id": "jellyfin",
|
||||
"tags": [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.services.jellyfin.loadbalancer.server.port=8096",
|
||||
"traefik.http.routers.jellyfin.rule=Host(`jellyfin.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.jellyfin.entryPoints=web,websecure",
|
||||
"traefik.http.routers.jellyfin.service=jellyfin",
|
||||
"traefik.http.routers.jellyfin.tls=true",
|
||||
"traefik.http.routers.jellyfin.tls.certresolver=cloudflare"
|
||||
],
|
||||
"checks": [{
|
||||
"id": "jellyfin-http-check",
|
||||
"http": "http://{{ synology_second_ip }}:8096",
|
||||
"interval": "30s",
|
||||
"timeout": "5s",
|
||||
"success_before_passing": 3,
|
||||
"failures_before_critical": 3
|
||||
}]
|
||||
},
|
||||
{
|
||||
"name": "synology",
|
||||
"id": "synology",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
asn-to-ip:
|
||||
image: ddimick/asn-to-ip:latest
|
||||
hostname: asn-to-ip
|
||||
container_name: asn-to-ip
|
||||
network_mode: "bridge"
|
||||
ports:
|
||||
- 5151:5000
|
||||
restart: unless-stopped
|
||||
@@ -2,7 +2,7 @@ version: '3.9'
|
||||
|
||||
services:
|
||||
consul:
|
||||
image: consul:{{ consul_version }}
|
||||
image: hashicorp/consul:{{ consul_version }}
|
||||
hostname: consul
|
||||
container_name: consul
|
||||
network_mode: "host"
|
||||
|
||||
17
templates/docker_compose_files/synology_jellyfin.yml.j2
Normal file
17
templates/docker_compose_files/synology_jellyfin.yml.j2
Normal file
@@ -0,0 +1,17 @@
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
jellyfin:
|
||||
image: lscr.io/linuxserver/jellyfin:latest
|
||||
hostname: jellyfin
|
||||
container_name: jellyfin
|
||||
network_mode: "host"
|
||||
environment:
|
||||
- "TZ=America/New_York"
|
||||
- "PGID=101"
|
||||
- "PUID={{ ansible_user_uid }}"
|
||||
volumes:
|
||||
- /volume1/pi-cluster/jellyfin:/config
|
||||
- /volume1/media/media/movies:/data/movies
|
||||
- /volume1/media/media/tv:/data/tv
|
||||
restart: unless-stopped
|
||||
@@ -2,7 +2,7 @@ version: '3.9'
|
||||
|
||||
services:
|
||||
sabnzbd:
|
||||
image: ghcr.io/linuxserver/sabnzbd
|
||||
image: ghcr.io/linuxserver/sabnzbd:{{ sabnzbd_version }}
|
||||
hostname: sabnzbd
|
||||
container_name: sabnzbd
|
||||
network_mode: "bridge"
|
||||
@@ -10,13 +10,15 @@ services:
|
||||
- "TZ=America/New_York"
|
||||
- "PGID=101"
|
||||
- "PUID={{ ansible_user_uid }}"
|
||||
#- "DOCKER_MODS=linuxserver/mods:universal-cron"
|
||||
volumes:
|
||||
- /var/services/homes/{{ my_username }}:/{{ my_username }}
|
||||
- /volume1/nate:/nate
|
||||
- /volume1/media/downloads/nzb:/nzbd
|
||||
- /volume1/media/downloads/temp:/incomplete-downloads
|
||||
- /volume1/media/downloads/complete:/downloads
|
||||
- /volume1/docker/sabnzbd:/config
|
||||
- /volume1/pi-cluster/sabnzbd:/config
|
||||
- /volume1/pi-cluster/sabnzbd/startup-scripts:/custom-cont-init.d
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 9090:9090
|
||||
|
||||
@@ -5,28 +5,28 @@ datacenter = "{{ datacenter_name }}"
|
||||
|
||||
# ----------------------------------------- Files and Logs
|
||||
data_dir = "{{ nomad_opt_dir_location }}"
|
||||
plugin_dir = "{{ nomad_opt_dir_location }}/plugins"
|
||||
log_level = "warn"
|
||||
log_file = "{{ nomad_opt_dir_location }}/logs/nomad.log"
|
||||
log_rotate_max_files = 5
|
||||
enable_syslog = false
|
||||
log_file = "{{ nomad_opt_dir_location }}/logs/nomad.log"
|
||||
log_level = "warn"
|
||||
log_rotate_max_files = 5
|
||||
plugin_dir = "{{ nomad_opt_dir_location }}/plugins"
|
||||
|
||||
# ----------------------------------------- Networking
|
||||
bind_addr = "0.0.0.0" # the default
|
||||
|
||||
advertise {
|
||||
{% if 'linode' in group_names %}
|
||||
http = "{{ linode_private_ip }}:4646"
|
||||
rpc = "{{ linode_private_ip }}:4647"
|
||||
serf = "{{ linode_private_ip }}:4648" # non-default ports may be specified
|
||||
http = "{{ linode_private_ip }}:4646"
|
||||
rpc = "{{ linode_private_ip }}:4647"
|
||||
serf = "{{ linode_private_ip }}:4648" # non-default ports may be specified
|
||||
{% elif 'synology' in group_names %}
|
||||
http = "{{ synology_second_ip }}:4646"
|
||||
rpc = "{{ synology_second_ip }}:4647"
|
||||
serf = "{{ synology_second_ip }}:4648" # non-default ports may be specified
|
||||
http = "{{ synology_second_ip }}:4646"
|
||||
rpc = "{{ synology_second_ip }}:4647"
|
||||
serf = "{{ synology_second_ip }}:4648" # non-default ports may be specified
|
||||
{% else %}
|
||||
http = "{{ ansible_host }}:4646"
|
||||
rpc = "{{ ansible_host }}:4647"
|
||||
serf = "{{ ansible_host }}:4648" # non-default ports may be specified
|
||||
http = "{{ ansible_host }}:4646"
|
||||
rpc = "{{ ansible_host }}:4647"
|
||||
serf = "{{ ansible_host }}:4648" # non-default ports may be specified
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
@@ -48,170 +48,171 @@ consul {
|
||||
|
||||
{% if is_nomad_server %}
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.nomad-server.entryPoints=web,websecure",
|
||||
"traefik.http.routers.nomad-server.service=nomad-server",
|
||||
"traefik.http.routers.nomad-server.rule=Host(`nomad.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.nomad-server.tls=true",
|
||||
"traefik.http.routers.nomad-server.middlewares=authelia@file,redirectScheme@file",
|
||||
"traefik.http.services.nomad-server.loadbalancer.server.port=4646"
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.nomad-server.entryPoints=web,websecure",
|
||||
"traefik.http.routers.nomad-server.service=nomad-server",
|
||||
"traefik.http.routers.nomad-server.rule=Host(`nomad.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.nomad-server.tls=true",
|
||||
"traefik.http.routers.nomad-server.middlewares=redirectScheme@file",
|
||||
"traefik.http.services.nomad-server.loadbalancer.server.port=4646"
|
||||
]
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
# ----------------------------------------- CLient Config
|
||||
# ----------------------------------------- Client Config
|
||||
client {
|
||||
enabled = true
|
||||
enabled = true
|
||||
{% if 'pis' in group_names %}
|
||||
node_class = "rpi"
|
||||
node_class = "rpi"
|
||||
{% elif 'macs' in group_names %}
|
||||
node_class = "mac"
|
||||
node_class = "mac"
|
||||
{% elif 'synology' in group_names %}
|
||||
node_class = "synology"
|
||||
node_class = "synology"
|
||||
{% endif %}
|
||||
reserved {
|
||||
cpu = 250
|
||||
memory = 100
|
||||
reserved_ports = "22"
|
||||
}
|
||||
reserved {
|
||||
cpu = 250
|
||||
memory = 100
|
||||
reserved_ports = "22"
|
||||
}
|
||||
{% if not is_nomad_server %}
|
||||
{% if 'linode' in group_names %}
|
||||
server_join {
|
||||
retry_join = [{% for h in groups['linode'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
retry_max = 3
|
||||
retry_interval = "15s"
|
||||
}
|
||||
server_join {
|
||||
retry_join = [{% for h in groups['linode'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
retry_max = 3
|
||||
retry_interval = "15s"
|
||||
}
|
||||
{% else %}
|
||||
server_join {
|
||||
retry_join = [{% for h in groups['lan'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
retry_max = 3
|
||||
retry_interval = "15s"
|
||||
}
|
||||
servers = ["{{ rpi1_ip_address }}", "{{ rpi2_ip_address }}", "{{ rpi3_ip_address }}"]
|
||||
server_join {
|
||||
retry_join = ["{{ rpi1_ip_address }}", "{{ rpi2_ip_address }}", "{{ rpi3_ip_address }}"]
|
||||
retry_max = 3
|
||||
retry_interval = "15s"
|
||||
}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
meta {
|
||||
# These are variables that can be used in Nomad job files
|
||||
PUID = "{{ ansible_user_uid }}"
|
||||
PGID = "{{ ansible_user_gid }}"
|
||||
nfsStorageRoot = "{{ interpolated_nfs_service_storage }}"
|
||||
localStorageRoot = "{{ interpolated_localfs_service_storage }}"
|
||||
{% if 'macs' in group_names %}
|
||||
restoreCommand = "/usr/local/bin/service_restore"
|
||||
restoreCommand1 = "--verbose"
|
||||
restoreCommand2 = "--job"
|
||||
restoreCommand3 = ""
|
||||
backupCommand = "/usr/local/bin/service_backups"
|
||||
backupCommandArg1 = "--verbose"
|
||||
backupCommandArg2 = "--loglevel=INFO"
|
||||
backupCommandArg3 = ""
|
||||
backupAllocArg1 = "--verbose"
|
||||
backupAllocArg2 = "--loglevel=INFO"
|
||||
backupAllocArg3 = "--allocation"
|
||||
backupAllocArg4 = "--delete"
|
||||
backupAllocArg5 = "--job"
|
||||
backupAllocArg6 = ""
|
||||
{% else %}
|
||||
restoreCommand = "sudo"
|
||||
restoreCommand1 = "/usr/local/bin/service_restore"
|
||||
restoreCommand2 = "--job"
|
||||
restoreCommand3 = "--verbose"
|
||||
backupCommand = "sudo"
|
||||
backupCommandArg1 = "/usr/local/bin/service_backups"
|
||||
backupCommandArg2 = "--verbose"
|
||||
backupCommandArg3 = "--loglevel=INFO"
|
||||
backupAllocArg1 = "/usr/local/bin/service_backups"
|
||||
backupAllocArg2 = "--verbose"
|
||||
backupAllocArg3 = "--loglevel=INFO"
|
||||
backupAllocArg4 = "--allocation"
|
||||
backupAllocArg5 = "--job"
|
||||
backupAllocArg6 = "--delete"
|
||||
{% endif %}
|
||||
}
|
||||
meta {
|
||||
# These are variables that can be used in Nomad job files
|
||||
PUID = "{{ ansible_user_uid }}"
|
||||
PGID = "{{ ansible_user_gid }}"
|
||||
nfsStorageRoot = "{{ interpolated_nfs_service_storage }}"
|
||||
localStorageRoot = "{{ interpolated_localfs_service_storage }}"
|
||||
{% if 'macs' in group_names %}
|
||||
restoreCommand = "/usr/local/bin/service_restore"
|
||||
restoreCommand1 = "--verbose"
|
||||
restoreCommand2 = "--job"
|
||||
restoreCommand3 = ""
|
||||
backupCommand = "/usr/local/bin/service_backups"
|
||||
backupCommandArg1 = "--verbose"
|
||||
backupCommandArg2 = "--loglevel=INFO"
|
||||
backupCommandArg3 = ""
|
||||
backupAllocArg1 = "--verbose"
|
||||
backupAllocArg2 = "--loglevel=INFO"
|
||||
backupAllocArg3 = "--allocation"
|
||||
backupAllocArg4 = "--delete"
|
||||
backupAllocArg5 = "--job"
|
||||
backupAllocArg6 = ""
|
||||
{% else %}
|
||||
restoreCommand = "sudo"
|
||||
restoreCommand1 = "/usr/local/bin/service_restore"
|
||||
restoreCommand2 = "--job"
|
||||
restoreCommand3 = "--verbose"
|
||||
backupCommand = "sudo"
|
||||
backupCommandArg1 = "/usr/local/bin/service_backups"
|
||||
backupCommandArg2 = "--verbose"
|
||||
backupCommandArg3 = "--loglevel=INFO"
|
||||
backupAllocArg1 = "/usr/local/bin/service_backups"
|
||||
backupAllocArg2 = "--verbose"
|
||||
backupAllocArg3 = "--loglevel=INFO"
|
||||
backupAllocArg4 = "--allocation"
|
||||
backupAllocArg5 = "--job"
|
||||
backupAllocArg6 = "--delete"
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
} # /client
|
||||
|
||||
{% if is_nomad_server %}
|
||||
# ----------------------------------------- Server Config
|
||||
server {
|
||||
enabled = true
|
||||
encrypt = "{{ nomad_encryption_key }}"
|
||||
enabled = true
|
||||
encrypt = "{{ nomad_encryption_key }}"
|
||||
{% if 'linode' in group_names %}
|
||||
bootstrap_expect = 1
|
||||
bootstrap_expect = 1
|
||||
{% else %}
|
||||
bootstrap_expect = 3
|
||||
bootstrap_expect = 3
|
||||
{% endif %}
|
||||
node_gc_threshold = "15m"
|
||||
job_gc_interval = "15m"
|
||||
job_gc_threshold = "6h"
|
||||
heartbeat_grace = "60s"
|
||||
min_heartbeat_ttl = "20s"
|
||||
raft_protocol = "3"
|
||||
node_gc_threshold = "15m"
|
||||
job_gc_interval = "15m"
|
||||
job_gc_threshold = "6h"
|
||||
heartbeat_grace = "60s"
|
||||
min_heartbeat_ttl = "20s"
|
||||
raft_protocol = "3"
|
||||
|
||||
server_join {
|
||||
retry_join = [{% for h in groups['lan'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
retry_max = 3
|
||||
retry_interval = "15s"
|
||||
}
|
||||
server_join {
|
||||
retry_join = ["{{ rpi1_ip_address }}", "{{ rpi2_ip_address }}", "{{ rpi3_ip_address }}"]
|
||||
retry_max = 3
|
||||
retry_interval = "15s"
|
||||
}
|
||||
}
|
||||
|
||||
autopilot {
|
||||
cleanup_dead_servers = true
|
||||
last_contact_threshold = "200ms"
|
||||
max_trailing_logs = 250
|
||||
server_stabilization_time = "10s"
|
||||
enable_redundancy_zones = false
|
||||
disable_upgrade_migration = false
|
||||
enable_custom_upgrades = false
|
||||
cleanup_dead_servers = true
|
||||
disable_upgrade_migration = false
|
||||
enable_custom_upgrades = false
|
||||
enable_redundancy_zones = false
|
||||
last_contact_threshold = "200ms"
|
||||
max_trailing_logs = 250
|
||||
server_stabilization_time = "10s"
|
||||
}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if is_nomad_server and is_nomad_client %}
|
||||
client {
|
||||
enabled = true
|
||||
enabled = true
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
# ----------------------------------------- Telemety
|
||||
telemetry = {
|
||||
publish_allocation_metrics = true
|
||||
publish_node_metrics = true
|
||||
collection_interval = "10s"
|
||||
filter_default = false
|
||||
datadog_address = "localhost:8125"
|
||||
prefix_filter = [
|
||||
"+nomad.client.allocations.running",
|
||||
"+nomad.client.allocations.terminal",
|
||||
"+nomad.client.allocs.cpu.allocated",
|
||||
"+nomad.client.allocs.cpu.total_percent",
|
||||
"+nomad.client.allocs.memory.allocated",
|
||||
"+nomad.client.allocs.memory.swap",
|
||||
"+nomad.client.allocs.memory.usage",
|
||||
"+nomad.nomad.job_status.dead",
|
||||
"+nomad.nomad.job_status.running",
|
||||
"+nomad.nomad.job_status.pending",
|
||||
"+nomad.nomad.job_summary.running",
|
||||
"+nomad.nomad.job_summary.complete",
|
||||
"+nomad.nomad.job_summary.lost",
|
||||
"+nomad.nomad.job_summary.failed"]
|
||||
collection_interval = "10s"
|
||||
datadog_address = "localhost:8125"
|
||||
filter_default = false
|
||||
publish_allocation_metrics = true
|
||||
publish_node_metrics = true
|
||||
prefix_filter = [
|
||||
"+nomad.client.allocations.running",
|
||||
"+nomad.client.allocations.terminal",
|
||||
"+nomad.client.allocs.cpu.allocated",
|
||||
"+nomad.client.allocs.cpu.total_percent",
|
||||
"+nomad.client.allocs.memory.allocated",
|
||||
"+nomad.client.allocs.memory.swap",
|
||||
"+nomad.client.allocs.memory.usage",
|
||||
"+nomad.nomad.job_status.dead",
|
||||
"+nomad.nomad.job_status.running",
|
||||
"+nomad.nomad.job_status.pending",
|
||||
"+nomad.nomad.job_summary.running",
|
||||
"+nomad.nomad.job_summary.complete",
|
||||
"+nomad.nomad.job_summary.lost",
|
||||
"+nomad.nomad.job_summary.failed"
|
||||
]
|
||||
}
|
||||
|
||||
# ----------------------------------------- Plugins
|
||||
plugin "raw_exec" {
|
||||
config {
|
||||
enabled = true
|
||||
}
|
||||
config {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
plugin "docker" {
|
||||
config {
|
||||
allow_caps = [ "ALL" ]
|
||||
allow_privileged = true
|
||||
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
allow_caps = ["all"]
|
||||
allow_privileged = true
|
||||
extra_labels = ["job_name", "job_id", "task_group_name", "task_name", "namespace", "node_name", "node_id"]
|
||||
volumes {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,9 +7,16 @@ ConditionFileNotEmpty={{ nomad_configuration_dir }}/nomad.hcl
|
||||
|
||||
[Service]
|
||||
{# {% if 'linode' in group_names %} #}
|
||||
User=nomad
|
||||
Group=nomad
|
||||
{# User=nomad #}
|
||||
{# Group=nomad #}
|
||||
{# {% endif %} #}
|
||||
|
||||
{# NOTE: Nomad is running as root rather than the Nomad user due to the Docker driver not being started when cgroups v2 are enabled.
|
||||
|
||||
https://github.com/hashicorp/nomad/pull/16063
|
||||
#}
|
||||
User=root
|
||||
Group=root
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nomad agent -config {{ nomad_configuration_dir }}
|
||||
KillMode=process
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
job "backup_local_filesystems" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "sysbatch"
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "sysbatch"
|
||||
|
||||
periodic {
|
||||
cron = "0 */8 * * * *"
|
||||
prohibit_overlap = true
|
||||
time_zone = "America/New_York"
|
||||
}
|
||||
|
||||
task "do_backups" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
|
||||
periodic {
|
||||
cron = "0 */8 * * * *"
|
||||
prohibit_overlap = true
|
||||
time_zone = "America/New_York"
|
||||
}
|
||||
} // /task do_backups
|
||||
|
||||
task "do_backups" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
|
||||
}
|
||||
} // /task do_backups
|
||||
|
||||
} //job
|
||||
|
||||
@@ -57,6 +57,7 @@ job "changedetection" {
|
||||
service {
|
||||
port = "webUI"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`changes.{{ homelab_domain_name }}`)",
|
||||
@@ -75,7 +76,6 @@ job "changedetection" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -73,6 +73,7 @@ job "chronograf" {
|
||||
service {
|
||||
port = "chronografPort"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -91,7 +92,6 @@ job "chronograf" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -63,6 +63,7 @@ job "code" {
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -82,7 +83,6 @@ job "code" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -1,109 +1,110 @@
|
||||
job "diagnostics" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "rpi1"
|
||||
}
|
||||
|
||||
group "diagnostics" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "macmini"
|
||||
}
|
||||
|
||||
network {
|
||||
port "whoami" {
|
||||
to = 80
|
||||
}
|
||||
}
|
||||
group "diagnostics" {
|
||||
|
||||
task "diagnostics" {
|
||||
count = 1
|
||||
|
||||
// env {
|
||||
// KEY = "VALUE"
|
||||
// }
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "alpine:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
args = [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"chmod 755 /local/bootstrap.sh && /local/bootstrap.sh"
|
||||
]
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/backups/config_backups:/backups",
|
||||
"${meta.localStorageRoot}:/docker"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
template {
|
||||
destination = "local/bootstrap.sh"
|
||||
data = <<EOH
|
||||
#!/bin/sh
|
||||
|
||||
apk update
|
||||
apk add --no-cache bash
|
||||
apk add --no-cache bind-tools
|
||||
apk add --no-cache curl
|
||||
apk add --no-cache git
|
||||
apk add --no-cache jq
|
||||
apk add --no-cache openssl
|
||||
apk add --no-cache iperf3
|
||||
apk add --no-cache nano
|
||||
apk add --no-cache wget
|
||||
|
||||
tail -f /dev/null # Keep container running
|
||||
EOH
|
||||
}
|
||||
|
||||
} // task diagnostics
|
||||
task "whoami" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "containous/whoami:latest"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
ports = ["whoami"]
|
||||
|
||||
} // /docker config
|
||||
|
||||
service {
|
||||
port = "whoami"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "90s"
|
||||
timeout = "15s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 2
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
}
|
||||
resources {
|
||||
cpu = 25 # MHz
|
||||
memory = 10 # MB
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
} // /task whoami
|
||||
network {
|
||||
port "whoami" {
|
||||
to = 80
|
||||
}
|
||||
}
|
||||
|
||||
} // group
|
||||
task "diagnostics" {
|
||||
|
||||
// env {
|
||||
// KEY = "VALUE"
|
||||
// }
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "alpine:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
args = [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"chmod 755 /local/bootstrap.sh && /local/bootstrap.sh"
|
||||
]
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/tmp:/diagnostics",
|
||||
"${meta.localStorageRoot}:/docker"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
template {
|
||||
destination = "local/bootstrap.sh"
|
||||
data = <<EOH
|
||||
#!/bin/sh
|
||||
|
||||
apk update
|
||||
apk add --no-cache bash
|
||||
apk add --no-cache bind-tools
|
||||
apk add --no-cache curl
|
||||
apk add --no-cache git
|
||||
apk add --no-cache jq
|
||||
apk add --no-cache openssl
|
||||
apk add --no-cache iperf3
|
||||
apk add --no-cache nano
|
||||
apk add --no-cache wget
|
||||
|
||||
tail -f /dev/null # Keep container running
|
||||
EOH
|
||||
}
|
||||
|
||||
} // task diagnostics
|
||||
|
||||
// task "whoami" {
|
||||
// driver = "docker"
|
||||
// config {
|
||||
// image = "containous/whoami:latest"
|
||||
// hostname = "${NOMAD_TASK_NAME}"
|
||||
// ports = ["whoami"]
|
||||
|
||||
// } // /docker config
|
||||
|
||||
// service {
|
||||
// port = "whoami"
|
||||
// name = "${NOMAD_JOB_NAME}"
|
||||
// provider = "nomad"
|
||||
// tags = [
|
||||
// "traefik.enable=true",
|
||||
// "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
// "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
// "traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
// "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
// "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
// ]
|
||||
// check {
|
||||
// type = "http"
|
||||
// path = "/"
|
||||
// interval = "90s"
|
||||
// timeout = "15s"
|
||||
// }
|
||||
// check_restart {
|
||||
// limit = 2
|
||||
// grace = "1m"
|
||||
// }
|
||||
// }
|
||||
// resources {
|
||||
// cpu = 25 # MHz
|
||||
// memory = 10 # MB
|
||||
// }
|
||||
|
||||
// } // /task whoami
|
||||
|
||||
} // group
|
||||
} // job
|
||||
|
||||
@@ -54,6 +54,7 @@ job "freshrss" {
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`rss.{{ homelab_domain_name }}`)",
|
||||
@@ -73,7 +74,6 @@ job "freshrss" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
404
templates/nomad_jobs/gitea.hcl
Normal file
404
templates/nomad_jobs/gitea.hcl
Normal file
@@ -0,0 +1,404 @@
|
||||
job "gitea" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
constraint {
|
||||
distinct_hosts = true
|
||||
}
|
||||
|
||||
group "gitea" {
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi"
|
||||
// }
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "webui" {
|
||||
to = "3000"
|
||||
}
|
||||
port "ssh" {
|
||||
to = "22"
|
||||
}
|
||||
}
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = [
|
||||
"${meta.restoreCommand1}",
|
||||
"${meta.restoreCommand2}",
|
||||
"${NOMAD_JOB_NAME}",
|
||||
"${meta.restoreCommand3}"
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
|
||||
task "gitea" {
|
||||
|
||||
env {
|
||||
GITEA__mailer__ENABLED = true
|
||||
GITEA__mailer__FROM = "gitea@{{ homelab_domain_name }}"
|
||||
GITEA__mailer__PASSWD = "{{ gitea_smtp_password }}"
|
||||
GITEA__mailer__PROTOCOL = "smtp+starttls"
|
||||
GITEA__mailer__SMTP_ADDR = "{{ email_smtp_host }}"
|
||||
GITEA__mailer__SMTP_PORT = "{{ email_smtp_port_starttls }}"
|
||||
GITEA__mailer__SUBJECT_PREFIX = "[Gitea]"
|
||||
GITEA__mailer__USER = "{{ email_smtp_account }}"
|
||||
GITEA__repository__DEFAULT_REPO_UNITS = "repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages" # add `repo.actions` to the list if enabling actions
|
||||
GITEA__server__DOMAIN = "{{ homelab_domain_name }}"
|
||||
GITEA__server__ROOT_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
||||
GITEA__server__SSH_DOMAIN = "${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
||||
GITEA__server__SSH_PORT = "2222" # Traefik gitea-ssh entrypoint
|
||||
GITEA__server__START_SSH_SERVER = false
|
||||
GITEA__service__ENABLE_NOTIFY_MAIL = true
|
||||
GITEA__time__DEFAULT_UI_LOCATION = "America/New_York"
|
||||
TZ = "America/New_York"
|
||||
USER_GID = "${meta.PGID}"
|
||||
USER_UID = "${meta.PUID}"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "gitea/gitea:{{ gitea_version }}"
|
||||
image_pull_timeout = "10m"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/data",
|
||||
"/etc/timezone:/etc/timezone:ro",
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
]
|
||||
ports = ["webui", "ssh"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "webui"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "webui"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
|
||||
} // service
|
||||
|
||||
service {
|
||||
port = "ssh"
|
||||
name = "gitea-ssh-svc"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.tcp.routers.gitea-ssh.rule=HostSNI(`*`)",
|
||||
"traefik.tcp.routers.gitea-ssh.entrypoints=gitea-ssh",
|
||||
"traefik.tcp.routers.gitea-ssh.service=gitea-ssh-svc"
|
||||
]
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task gitea
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = [
|
||||
"${meta.backupAllocArg1}",
|
||||
"${meta.backupAllocArg2}",
|
||||
"${meta.backupAllocArg3}",
|
||||
"${meta.backupAllocArg4}",
|
||||
"${meta.backupAllocArg5}",
|
||||
"${NOMAD_JOB_NAME}",
|
||||
"${meta.backupAllocArg6}"
|
||||
]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
// group "action-runners" {
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "macmini"
|
||||
// }
|
||||
|
||||
// constraint {
|
||||
// distinct_hosts = true
|
||||
// }
|
||||
|
||||
// count = 1
|
||||
|
||||
// restart {
|
||||
// attempts = 0
|
||||
// delay = "30s"
|
||||
// }
|
||||
|
||||
// network {
|
||||
// port "cache" {
|
||||
// to = "8088"
|
||||
// }
|
||||
// }
|
||||
|
||||
// task "await-gitea" {
|
||||
|
||||
// lifecycle {
|
||||
// hook = "prestart"
|
||||
// sidecar = false
|
||||
// }
|
||||
|
||||
// driver = "docker"
|
||||
|
||||
// config {
|
||||
// image = "busybox:latest"
|
||||
// command = "/bin/sh"
|
||||
// args = [
|
||||
// "-c",
|
||||
// "chmod 755 /local/ping.sh && /local/ping.sh"
|
||||
// ]
|
||||
// network_mode = "host"
|
||||
// }
|
||||
|
||||
// template {
|
||||
// destination = "local/ping.sh"
|
||||
// change_mode = "restart"
|
||||
// data = <<-EOH
|
||||
// #!/bin/sh
|
||||
// {% raw -%}
|
||||
// {{ range nomadService "gitea" }}
|
||||
// IP="{{ .Address }}"
|
||||
// PORT="{{ .Port }}"
|
||||
// {{ end }}
|
||||
// {% endraw -%}
|
||||
|
||||
// until [ -n "${IP}" ] && [ -n "${PORT}" ]; do
|
||||
// echo "Waiting for Nomad to populate the service information..."
|
||||
// sleep 1
|
||||
// done
|
||||
|
||||
// echo "Waiting for Gitea to start..."
|
||||
|
||||
// until nc -z "${IP}" "${PORT}"; do
|
||||
// echo "'nc -z ${IP} ${PORT}' is unavailable..."
|
||||
// sleep 1
|
||||
// done
|
||||
|
||||
// echo "Gitea is up! Found at ${IP}:${PORT}"
|
||||
|
||||
// EOH
|
||||
// }
|
||||
|
||||
// }
|
||||
|
||||
// task "gitea-action-runner" {
|
||||
|
||||
// env {
|
||||
// CONFIG_FILE = "/local/config.yml"
|
||||
// GITEA_INSTANCE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
||||
// GITEA_RUNNER_NAME = "${node.unique.name}-action-runner"
|
||||
// GITEA_RUNNER_REGISTRATION_TOKEN = "{{ gitea_runner_registration_token }}"
|
||||
// PGID = "${meta.PGID}"
|
||||
// PUID = "${meta.PUID}"
|
||||
// TZ = "America/New_York"
|
||||
// }
|
||||
|
||||
// driver = "docker"
|
||||
// config {
|
||||
// image = "gitea/act_runner:latest"
|
||||
// image_pull_timeout = "10m"
|
||||
// hostname = "${NOMAD_TASK_NAME}"
|
||||
// volumes = [
|
||||
// "${meta.nfsStorageRoot}/pi-cluster/gitea-action-runners:/data",
|
||||
// "/var/run/docker.sock:/var/run/docker.sock"
|
||||
// ]
|
||||
// ports = ["cache"]
|
||||
// } // docker config
|
||||
|
||||
// template {
|
||||
// destination = "local/config.yml"
|
||||
// env = false
|
||||
// change_mode = "noop"
|
||||
// data = <<-EOH
|
||||
// log:
|
||||
// # The level of logging, can be trace, debug, info, warn, error, fatal
|
||||
// level: info
|
||||
|
||||
// runner:
|
||||
// # Where to store the registration result.
|
||||
// {% raw %}file: .runner-{{ env "node.unique.name" }}{% endraw +%}
|
||||
// # Execute how many tasks concurrently at the same time.
|
||||
// capacity: 1
|
||||
// # Extra environment variables to run jobs.
|
||||
// envs:
|
||||
// A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||
// A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||
// # Extra environment variables to run jobs from a file.
|
||||
// # It will be ignored if it's empty or the file doesn't exist.
|
||||
// env_file: .env
|
||||
// # The timeout for a job to be finished.
|
||||
// # Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
||||
// # So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
||||
// timeout: 3h
|
||||
// # Whether skip verifying the TLS certificate of the Gitea instance.
|
||||
// insecure: false
|
||||
// # The timeout for fetching the job from the Gitea instance.
|
||||
// fetch_timeout: 5s
|
||||
// # The interval for fetching the job from the Gitea instance.
|
||||
// fetch_interval: 2s
|
||||
// # The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||
// # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
|
||||
// # If it's empty when registering, it will ask for inputting labels.
|
||||
// # If it's empty when execute `daemon`, will use labels in `.runner` file.
|
||||
// labels: []
|
||||
|
||||
// cache:
|
||||
// # Enable cache server to use actions/cache.
|
||||
// enabled: false
|
||||
// # The directory to store the cache data.
|
||||
// # If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||
// dir: ""
|
||||
// # The host of the cache server.
|
||||
// # It's not for the address to listen, but the address to connect from job containers.
|
||||
// # So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||
// {% raw %}host: "{{ env "NOMAD_IP_cache" }}"{% endraw +%}
|
||||
// # The port of the cache server.
|
||||
// {% raw %}port: {{ env "NOMAD_HOST_PORT_cache" }}{% endraw +%}
|
||||
// # The external cache server URL. Valid only when enable is true.
|
||||
// # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||
// # The URL should generally end with "/".
|
||||
// external_server: ""
|
||||
|
||||
// container:
|
||||
// # Specifies the network to which the container will connect.
|
||||
// # Could be host, bridge or the name of a custom network.
|
||||
// # If it's empty, act_runner will create a network automatically.
|
||||
// network: ""
|
||||
// # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||
// privileged: false
|
||||
// # And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
||||
// options:
|
||||
// # The parent directory of a job's working directory.
|
||||
// # If it's empty, /workspace will be used.
|
||||
// workdir_parent:
|
||||
// # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
||||
// # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
||||
// # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
||||
// # valid_volumes:
|
||||
// # - data
|
||||
// # - /src/*.json
|
||||
// # If you want to allow any volume, please use the following configuration:
|
||||
// # valid_volumes:
|
||||
// # - '**'
|
||||
// valid_volumes:
|
||||
// - '**'
|
||||
// # overrides the docker client host with the specified one.
|
||||
// # If it's empty, act_runner will find an available docker host automatically.
|
||||
// # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
||||
// # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||
// docker_host: ""
|
||||
// # Pull docker image(s) even if already present
|
||||
// force_pull: false
|
||||
|
||||
// host:
|
||||
// # The parent directory of a job's working directory.
|
||||
// # If it's empty, $HOME/.cache/act/ will be used.
|
||||
// workdir_parent:
|
||||
// EOH
|
||||
// }
|
||||
|
||||
// // service {
|
||||
// // port = "cache"
|
||||
// // name = "${NOMAD_TASK_NAME}"
|
||||
// // provider = "nomad"
|
||||
// // tags = [
|
||||
// // "traefik.enable=true",
|
||||
// // "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
// // "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
// // "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
// // "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
// // "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
// // "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
// // ]
|
||||
|
||||
// // check {
|
||||
// // type = "tcp"
|
||||
// // port = "cache"
|
||||
// // interval = "30s"
|
||||
// // timeout = "4s"
|
||||
// // }
|
||||
|
||||
// // check_restart {
|
||||
// // limit = 0
|
||||
// // grace = "1m"
|
||||
// // }
|
||||
|
||||
// // } // service
|
||||
|
||||
// resources {
|
||||
// cpu = 400 # MHz
|
||||
// memory = 600 # MB
|
||||
// } // resources
|
||||
|
||||
// } // task gitea-action-runner
|
||||
|
||||
// } // group action-runners
|
||||
|
||||
} // job
|
||||
@@ -87,6 +87,7 @@ job "grafana" {
|
||||
service {
|
||||
port = "http"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -109,7 +110,6 @@ job "grafana" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ job "headless-chrome" {
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`chrome.{{ homelab_domain_name }}`)",
|
||||
@@ -70,7 +71,6 @@ job "headless-chrome" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -3,143 +3,149 @@ job "icloud_backup" {
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// Need to authenticate within the container by running
|
||||
// icloud --username=<icloud-username> --session-directory=/app/session_data
|
||||
// and then entering the 2FA code that is sent to the user associated with the iCloud account.
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "icloud_backup" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
task "icloud_backup" {
|
||||
group "icloud_backup" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
// ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment.
|
||||
}
|
||||
count = 1
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "mandarons/icloud-drive"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud",
|
||||
"${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data",
|
||||
"local/icloud_backup.yaml:/app/config.yaml",
|
||||
"/etc/timezone:/etc/timezone:ro",
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
]
|
||||
} // docker config
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
template {
|
||||
destination = "local/icloud_backup.yaml"
|
||||
env = false
|
||||
change_mode = "restart"
|
||||
perms = "644"
|
||||
data = <<-EOH
|
||||
app:
|
||||
logger:
|
||||
# level - debug, info (default), warning, or error
|
||||
level: "info"
|
||||
# log filename icloud.log (default)
|
||||
filename: "icloud.log"
|
||||
credentials:
|
||||
# iCloud drive username
|
||||
username: "{{ icloud_backup_username }}"
|
||||
# Retry login interval
|
||||
retry_login_interval: 3600 # 1 hour
|
||||
# Drive destination
|
||||
root: "icloud"
|
||||
smtp:
|
||||
# If you want to recieve email notifications about expired/missing 2FA credentials then uncomment
|
||||
email: "{{ email_smtp_account }}"
|
||||
# optional, to email address. Default is sender email.
|
||||
#to: "receiver@test.com"
|
||||
password: "{{ icloud_backup_smtp_password }}"
|
||||
host: "{{ email_smtp_host }}"
|
||||
port: {{ email_smtp_port_starttls }}
|
||||
# If your email provider doesn't handle TLS
|
||||
no_tls: false
|
||||
drive:
|
||||
destination: "drive"
|
||||
remove_obsolete: true
|
||||
sync_interval: 172800 # 2 days
|
||||
filters:
|
||||
# File filters to be included in syncing iCloud drive content
|
||||
folders:
|
||||
- "Scanner By Readdle"
|
||||
- "Documents by Readdle"
|
||||
# - "folder3"
|
||||
file_extensions:
|
||||
# File extensions to be included
|
||||
- "pdf"
|
||||
- "png"
|
||||
- "jpg"
|
||||
- "jpeg"
|
||||
- "xls"
|
||||
- "xlsx"
|
||||
- "docx"
|
||||
- "pptx"
|
||||
- "txt"
|
||||
- "md"
|
||||
- "html"
|
||||
- "htm"
|
||||
- "css"
|
||||
- "js"
|
||||
- "json"
|
||||
- "xml"
|
||||
- "yaml"
|
||||
- "yml"
|
||||
- "csv"
|
||||
- "mp3"
|
||||
- "mp4"
|
||||
- "mov"
|
||||
- "wav"
|
||||
- "mkv"
|
||||
- "m4a"
|
||||
photos:
|
||||
destination: "photos"
|
||||
remove_obsolete: true
|
||||
sync_inteval: 172800 # 2 days
|
||||
filters:
|
||||
albums:
|
||||
# - "album1"
|
||||
file_sizes: # valid values are original, medium and/or thumb
|
||||
- "original"
|
||||
# - "medium"
|
||||
# - "thumb"
|
||||
EOH
|
||||
} // template data
|
||||
task "icloud_backup" {
|
||||
|
||||
resources {
|
||||
cpu = 900 # MHz
|
||||
memory = 100 # MB
|
||||
} // resources
|
||||
env {
|
||||
ENV_CONFIG_FILE_PATH = "/local/icloud_backup.yaml"
|
||||
PGID = "${meta.PGID}"
|
||||
PUID = "${meta.PUID}"
|
||||
TZ = "America/New_York"
|
||||
// ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment.
|
||||
}
|
||||
|
||||
} // task
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "mandarons/icloud-drive"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud",
|
||||
"${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data",
|
||||
"/etc/timezone:/etc/timezone:ro",
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
template {
|
||||
destination = "local/icloud_backup.yaml"
|
||||
env = false
|
||||
change_mode = "restart"
|
||||
perms = "644"
|
||||
data = <<-EOH
|
||||
---
|
||||
app:
|
||||
logger:
|
||||
# level - debug, info (default), warning, or error
|
||||
level: "info"
|
||||
# log filename icloud.log (default)
|
||||
filename: "icloud.log"
|
||||
credentials:
|
||||
# iCloud drive username
|
||||
username: "{{ icloud_backup_username }}"
|
||||
# Retry login interval
|
||||
retry_login_interval: 3600 # 1 hour
|
||||
root: "icloud"
|
||||
smtp:
|
||||
# If you want to receive email notifications about expired/missing 2FA credentials then uncomment
|
||||
email: "{{ email_smtp_account }}"
|
||||
# optional, to email address. Default is sender email.
|
||||
#to: "receiver@test.com"
|
||||
password: "{{ icloud_backup_smtp_password }}"
|
||||
host: "{{ email_smtp_host }}"
|
||||
port: {{ email_smtp_port_starttls }}
|
||||
# If your email provider doesn't handle TLS
|
||||
no_tls: false
|
||||
drive:
|
||||
destination: "drive"
|
||||
remove_obsolete: true
|
||||
sync_interval: 172800 # 2 days
|
||||
filters:
|
||||
# File filters to be included in syncing iCloud drive content
|
||||
folders:
|
||||
- "Scanner By Readdle"
|
||||
- "Documents by Readdle"
|
||||
# - "folder3"
|
||||
file_extensions:
|
||||
# File extensions to be included
|
||||
- "pdf"
|
||||
- "png"
|
||||
- "jpg"
|
||||
- "jpeg"
|
||||
- "xls"
|
||||
- "xlsx"
|
||||
- "docx"
|
||||
- "pptx"
|
||||
- "txt"
|
||||
- "md"
|
||||
- "html"
|
||||
- "htm"
|
||||
- "css"
|
||||
- "js"
|
||||
- "json"
|
||||
- "xml"
|
||||
- "yaml"
|
||||
- "yml"
|
||||
- "csv"
|
||||
- "mp3"
|
||||
- "mp4"
|
||||
- "mov"
|
||||
- "wav"
|
||||
- "mkv"
|
||||
- "m4a"
|
||||
photos:
|
||||
destination: "photos"
|
||||
remove_obsolete: true
|
||||
sync_interval: 172800 # 2 days
|
||||
all_albums: false # Optional, default false. If true preserve album structure. If same photo is in multiple albums creates duplicates on filesystem
|
||||
folder_format: "%Y-%m" # optional, if set put photos in subfolders according to format. Cheatsheet - https://strftime.org
|
||||
filters:
|
||||
albums:
|
||||
# - "album1"
|
||||
file_sizes: # valid values are original, medium and/or thumb
|
||||
- "original"
|
||||
# - "medium"
|
||||
# - "thumb"
|
||||
EOH
|
||||
} // template data
|
||||
|
||||
resources {
|
||||
cpu = 900 # MHz
|
||||
memory = 100 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
|
||||
@@ -78,6 +78,7 @@ job "influxdb" {
|
||||
service {
|
||||
port = "httpAPI"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
@@ -89,7 +90,6 @@ job "influxdb" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
|
||||
|
||||
|
||||
98
templates/nomad_jobs/jellyfin.hcl
Normal file
98
templates/nomad_jobs/jellyfin.hcl
Normal file
@@ -0,0 +1,98 @@
|
||||
job "jellyfin" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "macmini"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "jellyfin" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "webui" {
|
||||
static = "8096"
|
||||
to = "8096"
|
||||
}
|
||||
port "udp1" {
|
||||
static = "7359"
|
||||
to = "7359"
|
||||
}
|
||||
}
|
||||
|
||||
task "jellyfin" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "lscr.io/linuxserver/jellyfin:latest"
|
||||
image_pull_timeout = "10m"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config",
|
||||
"${meta.nfsStorageRoot}/media/media/movies:/data/movies",
|
||||
"${meta.nfsStorageRoot}/media/media/tv:/data/tv"
|
||||
]
|
||||
ports = ["webui", "udp1"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "webui"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "webui"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 2500 # MHz
|
||||
memory = 750 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
} // group
|
||||
} // job
|
||||
94
templates/nomad_jobs/ladder.hcl
Normal file
94
templates/nomad_jobs/ladder.hcl
Normal file
@@ -0,0 +1,94 @@
|
||||
job "ladder" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "ladder" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
to = "8080"
|
||||
}
|
||||
}
|
||||
|
||||
task "ladder" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
PORT = "8080"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/kubero-dev/ladder:latest"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
ports = ["port1"]
|
||||
image_pull_timeout = "10m"
|
||||
// volumes = [
|
||||
// "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/"
|
||||
// ]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
@@ -82,6 +82,7 @@ job "lidarr" {
|
||||
service {
|
||||
port = "lidarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -100,7 +101,6 @@ job "lidarr" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "10m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ job "loki" {
|
||||
service {
|
||||
port = "loki_port"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -66,7 +67,6 @@ job "loki" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ job "mealie" {
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -85,7 +86,6 @@ job "mealie" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ job "nginx" {
|
||||
service {
|
||||
port = "web"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -75,7 +76,6 @@ job "nginx" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@ job "nzbhydra" {
|
||||
service {
|
||||
port = "hydra_port"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`hydra.{{ homelab_domain_name }}`)",
|
||||
@@ -73,7 +74,6 @@ job "nzbhydra" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -1,92 +1,92 @@
|
||||
job "overseerr" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi"
|
||||
// }
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "overseerr" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "overseerr" {
|
||||
to = "5055"
|
||||
}
|
||||
}
|
||||
group "overseerr" {
|
||||
|
||||
task "overseerr" {
|
||||
count = 1
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/overseerr"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["overseerr"]
|
||||
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/overseerr:/config" ]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "overseerr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=overseerr",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "overseerr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
network {
|
||||
port "overseerr" {
|
||||
to = "5055"
|
||||
}
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1600 # MHz
|
||||
memory = 300 # MB
|
||||
} // resources
|
||||
task "overseerr" {
|
||||
|
||||
} // task
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "lscr.io/linuxserver/overseerr:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["overseerr"]
|
||||
image_pull_timeout = "10m"
|
||||
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/overseerr:/config" ]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "overseerr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=overseerr",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "overseerr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1600 # MHz
|
||||
memory = 300 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
|
||||
@@ -37,7 +37,7 @@ job "pihole" {
|
||||
// }
|
||||
}
|
||||
|
||||
task "await_filesytem" {
|
||||
task "await_filesystem" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
@@ -109,6 +109,7 @@ job "pihole" {
|
||||
service {
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
port = "web"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`p.{{ homelab_domain_name }}`)",
|
||||
@@ -118,7 +119,7 @@ job "pihole" {
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.middlewares.piholeRedirect.redirectregex.regex=^(https?://p\\.{{ homelab_domain_name }})/?$",
|
||||
"traefik.http.middlewares.piholeRedirect.redirectregex.replacement=$${1}/admin/",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,piholeRedirect"
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=piholeRedirect"
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
@@ -130,13 +131,13 @@ job "pihole" {
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "10m"
|
||||
ignore_warnings = false
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "piholeDNStcp"
|
||||
port = "dns"
|
||||
provider = "nomad"
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "dns"
|
||||
|
||||
@@ -51,7 +51,7 @@ job "promtail-syslogs" {
|
||||
|
||||
{% raw -%}
|
||||
clients:
|
||||
- url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
|
||||
- url: http://{{ range nomadService "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
|
||||
{% endraw %}
|
||||
|
||||
scrape_configs:
|
||||
|
||||
@@ -84,6 +84,7 @@ job "prowlarr" {
|
||||
service {
|
||||
port = "prowlarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -103,7 +104,6 @@ job "prowlarr" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -1,136 +1,136 @@
|
||||
job "radarr" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi3"
|
||||
// }
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi3"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "radarrGroup" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "10m"
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "radarr" {
|
||||
to = "7878"
|
||||
}
|
||||
}
|
||||
group "radarrGroup" {
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = [
|
||||
"${meta.restoreCommand1}",
|
||||
"${meta.restoreCommand2}",
|
||||
"${NOMAD_JOB_NAME}",
|
||||
"${meta.restoreCommand3}"
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
task "radarr" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
|
||||
//UMASK_SET = 022 #optional
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/radarr:develop"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
force_pull = true
|
||||
ports = ["radarr"]
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
|
||||
"${meta.nfsStorageRoot}/media:/media"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "radarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "radarr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "10m"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
network {
|
||||
port "radarr" {
|
||||
to = "7878"
|
||||
}
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 2000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
} // /task radarr
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = [
|
||||
"${meta.restoreCommand1}",
|
||||
"${meta.restoreCommand2}",
|
||||
"${NOMAD_JOB_NAME}",
|
||||
"${meta.restoreCommand3}"
|
||||
]
|
||||
}
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = [
|
||||
"${meta.backupAllocArg1}",
|
||||
"${meta.backupAllocArg2}",
|
||||
"${meta.backupAllocArg3}",
|
||||
"${meta.backupAllocArg4}",
|
||||
"${meta.backupAllocArg5}",
|
||||
"${NOMAD_JOB_NAME}",
|
||||
"${meta.backupAllocArg6}"
|
||||
]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // group
|
||||
} // /task create_filesystem
|
||||
|
||||
task "radarr" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
|
||||
//UMASK_SET = 022 #optional
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/radarr:develop"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
force_pull = true
|
||||
ports = ["radarr"]
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
|
||||
"${meta.nfsStorageRoot}/media:/media"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "radarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "radarr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 2000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // /task radarr
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = [
|
||||
"${meta.backupAllocArg1}",
|
||||
"${meta.backupAllocArg2}",
|
||||
"${meta.backupAllocArg3}",
|
||||
"${meta.backupAllocArg4}",
|
||||
"${meta.backupAllocArg5}",
|
||||
"${NOMAD_JOB_NAME}",
|
||||
"${meta.backupAllocArg6}"
|
||||
]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
|
||||
} // group
|
||||
} // job
|
||||
|
||||
@@ -81,6 +81,7 @@ job "readarr" {
|
||||
service {
|
||||
port = "readarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -100,7 +101,6 @@ job "readarr" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -32,104 +32,183 @@ job "recyclarr" {
|
||||
task "recyclarr" {
|
||||
|
||||
env {
|
||||
TZ = "America/New_York"
|
||||
TZ = "America/New_York"
|
||||
RECYCLARR_APP_DATA = "/local"
|
||||
}
|
||||
|
||||
// user = "${meta.PUID}:${meta.PGID}"
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/recyclarr/recyclarr:2"
|
||||
image = "ghcr.io/recyclarr/recyclarr:{{ recyclarr_version }}"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
init = true
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
// template {
|
||||
// destination = "local/recyclarr.yml"
|
||||
// env = false
|
||||
// change_mode = "restart"
|
||||
// perms = "644"
|
||||
// data = <<-EOH
|
||||
// ---
|
||||
// # yaml-language-server: $schema=https://raw.githubusercontent.com/recyclarr/recyclarr/master/schemas/config-schema.json
|
||||
template {
|
||||
destination = "local/recyclarr.yml"
|
||||
env = false
|
||||
change_mode = "restart"
|
||||
perms = "644"
|
||||
data = <<-EOH
|
||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/recyclarr/recyclarr/master/schemas/config-schema.json
|
||||
|
||||
// # A starter config to use with Recyclarr. Most values are set to "reasonable defaults". Update the
|
||||
// # values below as needed for your instance. You will be required to update the API Key and URL for
|
||||
// # each instance you want to use.
|
||||
// #
|
||||
// # Many optional settings have been omitted to keep this template simple.
|
||||
// #
|
||||
// # For more details on the configuration, see the Configuration Reference on the wiki here:
|
||||
// # https://github.com/recyclarr/recyclarr/wiki/Configuration-Reference
|
||||
# A starter config to use with Recyclarr. Most values are set to "reasonable defaults". Update the
|
||||
# values below as needed for your instance. You will be required to update the API Key and URL for
|
||||
# each instance you want to use.
|
||||
#
|
||||
# Many optional settings have been omitted to keep this template simple. Note that there's no "one
|
||||
# size fits all" configuration. Please refer to the guide to understand how to build the appropriate
|
||||
# configuration based on your hardware setup and capabilities.
|
||||
#
|
||||
# For any lines that mention uncommenting YAML, you simply need to remove the leading hash (`#`).
|
||||
# The YAML comments will already be at the appropriate indentation.
|
||||
#
|
||||
# For more details on the configuration, see the Configuration Reference on the wiki here:
|
||||
# https://recyclarr.dev/wiki/reference/config-reference
|
||||
|
||||
// # Configuration specific to Sonarr
|
||||
// sonarr:
|
||||
// # Set the URL/API Key to your actual instance
|
||||
# Configuration specific to Sonarr
|
||||
sonarr:
|
||||
series:
|
||||
base_url: https://sonarr.{{ homelab_domain_name }}/
|
||||
api_key: {{ sonarr_api_key }}
|
||||
delete_old_custom_formats: true
|
||||
|
||||
// {% raw -%}
|
||||
// - base_url: http://{{ range service "sonarr" }}{{ .Address }}:{{ .Port }}{{ end }}
|
||||
// api_key: f7e74ba6c80046e39e076a27af5a8444
|
||||
// {% endraw -%}
|
||||
# Quality definitions from the guide to sync to Sonarr. Choices: series, anime
|
||||
quality_definition:
|
||||
type: series
|
||||
|
||||
// # Quality definitions from the guide to sync to Sonarr. Choice: anime, series, hybrid
|
||||
// quality_definition: series
|
||||
# Release profiles from the guide to sync to Sonarr v3 (Sonarr v4 does not use this!)
|
||||
# Use `recyclarr list release-profiles` for values you can put here.
|
||||
# https://trash-guides.info/Sonarr/Sonarr-Release-Profile-RegEx/
|
||||
release_profiles:
|
||||
- trash_ids:
|
||||
- EBC725268D687D588A20CBC5F97E538B # Low Quality Groups
|
||||
- 1B018E0C53EC825085DD911102E2CA36 # Release Sources (Streaming Service)
|
||||
- 71899E6C303A07AF0E4746EFF9873532 # P2P Groups + Repack/Proper
|
||||
strict_negative_scores: false
|
||||
|
||||
// # Release profiles from the guide to sync to Sonarr.
|
||||
// # You can optionally add tags and make negative scores strictly ignored
|
||||
// release_profiles:
|
||||
// # Series
|
||||
// - trash_ids:
|
||||
// - EBC725268D687D588A20CBC5F97E538B # Low Quality Groups
|
||||
// - 1B018E0C53EC825085DD911102E2CA36 # Release Sources (Streaming Service)
|
||||
// - 71899E6C303A07AF0E4746EFF9873532 # P2P Groups + Repack/Proper
|
||||
// # Anime (Uncomment below if you want it)
|
||||
// # - trash_ids:
|
||||
// # - d428eda85af1df8904b4bbe4fc2f537c # Anime - First release profile
|
||||
// # - 6cd9e10bb5bb4c63d2d7cd3279924c7b # Anime - Second release profile
|
||||
- trash_ids:
|
||||
- 76e060895c5b8a765c310933da0a5357 # Optionals
|
||||
filter:
|
||||
include:
|
||||
- cec8880b847dd5d31d29167ee0112b57 # Golden rule
|
||||
- 436f5a7d08fbf02ba25cb5e5dfe98e55 # Ignore Dolby Vision without HDR10 fallback.
|
||||
# - f3f0f3691c6a1988d4a02963e69d11f2 # Ignore The Group -SCENE
|
||||
# - 5bc23c3a055a1a5d8bbe4fb49d80e0cb # Ignore so called scene releases
|
||||
- 538bad00ee6f8aced8e0db5218b8484c # Ignore Bad Dual Audio Groups
|
||||
- 4861d8238f9234606df6721df6e27deb # Ignore AV1
|
||||
- bc7a6383cbe88c3ee2d6396e1aacc0b3 # Prefer HDR
|
||||
- 6f2aefa61342a63387f2a90489e90790 # Dislike retags: rartv, rarbg, eztv, TGx
|
||||
- 19cd5ecc0a24bf493a75e80a51974cdd # Dislike retagged groups
|
||||
- 6a7b462c6caee4a991a9d8aa38ce2405 # Dislike release ending: en
|
||||
- 236a3626a07cacf5692c73cc947bc280 # Dislike release containing: 1-
|
||||
# - fa47da3377076d82d07c4e95b3f13d07 # Prefer Dolby Vision
|
||||
|
||||
// # Configuration specific to Radarr.
|
||||
// radarr:
|
||||
// # Set the URL/API Key to your actual instance
|
||||
// {% raw -%}
|
||||
// - base_url: http://{{ range service "radarr" }}{{ .Address }}:{{ .Port }}{{ end }}
|
||||
// api_key: f7e74ba6c80046e39e076a27af5a8444
|
||||
// {% endraw -%}
|
||||
# Configuration specific to Radarr.
|
||||
radarr:
|
||||
movies:
|
||||
base_url: https://radarr.{{ homelab_domain_name }}/
|
||||
api_key: {{ radarr_api_key }}
|
||||
delete_old_custom_formats: true
|
||||
replace_existing_custom_formats: true
|
||||
|
||||
// # Which quality definition in the guide to sync to Radarr. Only choice right now is 'movie'
|
||||
// quality_definition:
|
||||
// type: movie
|
||||
# Which quality definition in the guide to sync to Radarr. Only choice right now is 'movie'
|
||||
quality_definition:
|
||||
type: movie
|
||||
preferred_ratio: 0.5
|
||||
|
||||
// # Set to 'true' to automatically remove custom formats from Radarr when they are removed from
|
||||
// # the guide or your configuration. This will NEVER delete custom formats you manually created!
|
||||
// delete_old_custom_formats: false
|
||||
quality_profiles:
|
||||
- name: "720p/1080p"
|
||||
reset_unmatched_scores:
|
||||
enabled: true
|
||||
- name: "720p/1080p Remux"
|
||||
reset_unmatched_scores:
|
||||
enabled: true
|
||||
|
||||
// custom_formats:
|
||||
// # A list of custom formats to sync to Radarr. Must match the "trash_id" in the guide JSON.
|
||||
// - trash_ids:
|
||||
// - ed38b889b31be83fda192888e2286d83 # BR-DISK
|
||||
// - 90cedc1fea7ea5d11298bebd3d1d3223 # EVO (no WEBDL)
|
||||
// - 90a6f9a284dff5103f6346090e6280c8 # LQ
|
||||
// - dc98083864ea246d05a42df0d05f81cc # x265 (720/1080p)
|
||||
// - b8cd450cbfa689c0259a01d9e29ba3d6 # 3D
|
||||
custom_formats:
|
||||
# Use `recyclarr list custom-formats radarr` for values you can put here.
|
||||
# https://trash-guides.info/Radarr/Radarr-collection-of-custom-formats/
|
||||
|
||||
// # Uncomment the below properties to specify one or more quality profiles that should be
|
||||
// # updated with scores from the guide for each custom format. Without this, custom formats
|
||||
// # are synced to Radarr but no scores are set in any quality profiles.
|
||||
// # quality_profiles:
|
||||
// # - name: Quality Profile 1
|
||||
// # - name: Quality Profile 2
|
||||
// # #score: -9999 # Optional score to assign to all CFs. Overrides scores in the guide.
|
||||
// # #reset_unmatched_scores: true # Optionally set other scores to 0 if they are not listed in 'names' above.
|
||||
// EOH
|
||||
// }
|
||||
- trash_ids:
|
||||
# Movie versions
|
||||
- eca37840c13c6ef2dd0262b141a5482f # 4K Remaster
|
||||
- 570bc9ebecd92723d2d21500f4be314c # Remaster
|
||||
- 0f12c086e289cf966fa5948eac571f44 # Hybrid
|
||||
- 9d27d9d2181838f76dee150882bdc58c # Masters of Cinema
|
||||
- e0c07d59beb37348e975a930d5e50319 # Criterion Collection
|
||||
- 957d0f44b592285f26449575e8b1167e # Special Edition
|
||||
- eecf3a857724171f968a66cb5719e152 # IMAX
|
||||
- 9f6cbff8cfe4ebbc1bde14c7b7bec0de # IMAX Enhanced
|
||||
# Unwanted
|
||||
- b8cd450cbfa689c0259a01d9e29ba3d6 # 3D
|
||||
- ed38b889b31be83fda192888e2286d83 # BR-DISK
|
||||
- 90a6f9a284dff5103f6346090e6280c8 # LQ
|
||||
- bfd8eb01832d646a0a89c4deb46f8564 # Upscaled
|
||||
- 90cedc1fea7ea5d11298bebd3d1d3223 # EVO (no WEBDL)
|
||||
- 923b6abef9b17f937fab56cfcf89e1f1 # DV (WEBDL)
|
||||
- b6832f586342ef70d9c128d40c07b872 # Bad Dual Groups
|
||||
- ae9b7c9ebde1f3bd336a8cbd1ec4c5e5 # No-RlsGroup
|
||||
- 7357cf5161efbf8c4d5d0c30b4815ee2 # Obfuscated
|
||||
- 5c44f52a8714fdd79bb4d98e2673be1f # Retags
|
||||
- c465ccc73923871b3eb1802042331306 # Line/Mic Dubbed
|
||||
# Misc
|
||||
- e7718d7a3ce595f289bfee26adc178f5 # Repack/Proper
|
||||
- ae43b294509409a6a13919dedd4764c4 # Repack2
|
||||
# HQ Release Groups
|
||||
- ed27ebfef2f323e964fb1f61391bcb35 # HD Bluray Tier 01
|
||||
- c20c8647f2746a1f4c4262b0fbbeeeae # HD Bluray Tier 02
|
||||
- c20f169ef63c5f40c2def54abaf4438e # WEB Tier 01
|
||||
- 403816d65392c79236dcb6dd591aeda4 # WEB Tier 02
|
||||
- af94e0fe497124d1f9ce732069ec8c3b # WEB Tier 03
|
||||
quality_profiles:
|
||||
- name: "720p/1080p"
|
||||
- name: "720p/1080p Remux"
|
||||
|
||||
# HDR FORMATS
|
||||
# ########################
|
||||
- trash_ids:
|
||||
- 3a3ff47579026e76d6504ebea39390de # Remux Tier 01
|
||||
- 9f98181fe5a3fbeb0cc29340da2a468a # Remux Tier 02
|
||||
- e61e28db95d22bedcadf030b8f156d96 # HDR
|
||||
- 2a4d9069cc1fe3242ff9bdaebed239bb # HDR (undefined)
|
||||
quality_profiles:
|
||||
- name: "720p/1080p"
|
||||
score: -100
|
||||
- name: "720p/1080p Remux"
|
||||
|
||||
# AUDIO FORMATS
|
||||
# ########################
|
||||
- trash_ids:
|
||||
- 6fd7b090c3f7317502ab3b63cc7f51e3 # 6.1 Surround
|
||||
- e77382bcfeba57cb83744c9c5449b401 # 7.1 Surround
|
||||
- f2aacebe2c932337fe352fa6e42c1611 # 9.1 Surround
|
||||
quality_profiles:
|
||||
- name: "720p/1080p"
|
||||
score: -50
|
||||
- name: "720p/1080p Remux"
|
||||
score: -50
|
||||
|
||||
- trash_ids:
|
||||
- 89dac1be53d5268a7e10a19d3c896826 # 2.0 Stereo
|
||||
quality_profiles:
|
||||
- name: "720p/1080p"
|
||||
score: 120
|
||||
|
||||
- trash_ids:
|
||||
- 77ff61788dfe1097194fd8743d7b4524 # 5.1 Surround
|
||||
quality_profiles:
|
||||
- name: "720p/1080p"
|
||||
score: 80
|
||||
- name: "720p/1080p Remux"
|
||||
score: 80
|
||||
EOH
|
||||
}
|
||||
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
resources {
|
||||
cpu = 100 # MHz
|
||||
memory = 300 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
27
templates/nomad_jobs/remove_nzbs.hcl
Normal file
27
templates/nomad_jobs/remove_nzbs.hcl
Normal file
@@ -0,0 +1,27 @@
|
||||
job "remove_nzbs" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "batch"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "rpi"
|
||||
}
|
||||
|
||||
periodic {
|
||||
cron = "*/15 * * * * *"
|
||||
prohibit_overlap = true
|
||||
time_zone = "America/New_York"
|
||||
}
|
||||
|
||||
task "remove_nzbs" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
command = "/home/pi/.pyenv/shims/python"
|
||||
args = ["/home/pi/repos/bin/bin-sabnzbd/removeNZBs.py"]
|
||||
}
|
||||
|
||||
} // /task do_backups
|
||||
|
||||
} //job
|
||||
@@ -1,496 +1,529 @@
|
||||
job "reverse-proxy" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
value = "rpi1"
|
||||
}
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
group "authelia-group" {
|
||||
|
||||
group "reverse-proxy-group" {
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "rpi"
|
||||
}
|
||||
|
||||
network {
|
||||
port "authelia-port" {
|
||||
static = {{ authelia_port }}
|
||||
to = 9091
|
||||
}
|
||||
port "whoami" {
|
||||
to = 80
|
||||
}
|
||||
port "dashboard" {
|
||||
static = 8080
|
||||
to = 8080
|
||||
}
|
||||
port "web" {
|
||||
static = 80
|
||||
to = 80
|
||||
}
|
||||
port "websecure" {
|
||||
static = 443
|
||||
to = 443
|
||||
}
|
||||
port "externalwebsecure" {
|
||||
static = 4430
|
||||
to = 4430
|
||||
}
|
||||
}
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
task "authelia" {
|
||||
network {
|
||||
port "authelia-port" {
|
||||
to = 9091
|
||||
}
|
||||
}
|
||||
|
||||
env {
|
||||
TZ = "America/New_York"
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
}
|
||||
task "authelia" {
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "authelia/authelia:{{ authelia_version }}"
|
||||
hostname = "authelia"
|
||||
ports = ["authelia-port"]
|
||||
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/authelia:/config" ]
|
||||
args = [
|
||||
"--config",
|
||||
"/local/authelia/config.yml"
|
||||
]
|
||||
} // docker config
|
||||
env {
|
||||
TZ = "America/New_York"
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
}
|
||||
|
||||
template {
|
||||
destination = "local/authelia/users.yml"
|
||||
env = false
|
||||
change_mode = "restart"
|
||||
perms = "644"
|
||||
data = <<-EOH
|
||||
---
|
||||
###############################################################
|
||||
# Users Database #
|
||||
###############################################################
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "authelia/authelia:{{ authelia_version }}"
|
||||
hostname = "authelia"
|
||||
ports = ["authelia-port"]
|
||||
image_pull_timeout = "10m"
|
||||
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/authelia:/config" ]
|
||||
args = [
|
||||
"--config",
|
||||
"/local/authelia/config.yml"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
# This file can be used if you do not have an LDAP set up.
|
||||
users:
|
||||
{{ authelia_user1_name }}:
|
||||
displayname: "{{ authelia_user1_name }}"
|
||||
password: "$argon2id$v=19$m=65536,t=1,p={{ authelia_user1_password }}"
|
||||
email: {{ authelia_user1_email }}
|
||||
groups:
|
||||
- admins
|
||||
- dev
|
||||
EOH
|
||||
}
|
||||
template {
|
||||
destination = "local/authelia/users.yml"
|
||||
env = false
|
||||
change_mode = "restart"
|
||||
perms = "644"
|
||||
data = <<-EOH
|
||||
---
|
||||
###############################################################
|
||||
# Users Database #
|
||||
###############################################################
|
||||
|
||||
template {
|
||||
destination = "local/authelia/config.yml"
|
||||
env = false
|
||||
change_mode = "restart"
|
||||
perms = "644"
|
||||
data = <<-EOH
|
||||
---
|
||||
## The theme to display: light, dark, grey, auto.
|
||||
theme: auto
|
||||
# This file can be used if you do not have an LDAP set up.
|
||||
users:
|
||||
{{ authelia_user1_name }}:
|
||||
displayname: "{{ authelia_user1_name }}"
|
||||
password: "$argon2id$v=19$m=65536,t=1,p={{ authelia_user1_password }}"
|
||||
email: {{ authelia_user1_email }}
|
||||
groups:
|
||||
- admins
|
||||
- dev
|
||||
EOH
|
||||
}
|
||||
|
||||
jwt_secret: {{ authelia_jwt_secret}}
|
||||
default_redirection_url: https://authelia.{{ homelab_domain_name}}
|
||||
template {
|
||||
destination = "local/authelia/config.yml"
|
||||
env = false
|
||||
change_mode = "restart"
|
||||
perms = "644"
|
||||
data = <<-EOH
|
||||
---
|
||||
## The theme to display: light, dark, grey, auto.
|
||||
theme: auto
|
||||
|
||||
server:
|
||||
host: 0.0.0.0
|
||||
port: 9091
|
||||
path: ""
|
||||
read_buffer_size: 4096
|
||||
write_buffer_size: 4096
|
||||
enable_pprof: false
|
||||
enable_expvars: false
|
||||
disable_healthcheck: false
|
||||
jwt_secret: {{ authelia_jwt_secret}}
|
||||
default_redirection_url: https://authelia.{{ homelab_domain_name}}
|
||||
|
||||
log:
|
||||
level: info
|
||||
format: text
|
||||
# file_path: "/config/log.txt"
|
||||
keep_stdout: false
|
||||
server:
|
||||
host: 0.0.0.0
|
||||
port: 9091
|
||||
path: ""
|
||||
buffers:
|
||||
read: 4096
|
||||
write: 4096
|
||||
timeouts:
|
||||
read: 15s
|
||||
write: 15s
|
||||
idle: 30s
|
||||
enable_pprof: false
|
||||
enable_expvars: false
|
||||
disable_healthcheck: false
|
||||
|
||||
totp:
|
||||
issuer: authelia.com
|
||||
log:
|
||||
level: info
|
||||
format: text
|
||||
# file_path: "/config/log.txt"
|
||||
keep_stdout: false
|
||||
|
||||
authentication_backend:
|
||||
disable_reset_password: false
|
||||
file:
|
||||
path: /local/authelia/users.yml
|
||||
password:
|
||||
algorithm: argon2id
|
||||
iterations: 1
|
||||
salt_length: 16
|
||||
parallelism: 8
|
||||
memory: 64
|
||||
totp:
|
||||
issuer: authelia.com
|
||||
|
||||
access_control:
|
||||
default_policy: deny
|
||||
networks:
|
||||
- name: internal
|
||||
networks:
|
||||
- 10.0.0.0/8
|
||||
#- 172.16.0.0/12
|
||||
#- 192.168.0.0/18
|
||||
rules:
|
||||
# Rules applied to everyone
|
||||
- domain: "*.{{ homelab_domain_name }}"
|
||||
policy: two_factor
|
||||
authentication_backend:
|
||||
password_reset:
|
||||
disable: false
|
||||
file:
|
||||
path: /local/authelia/users.yml
|
||||
password:
|
||||
algorithm: argon2id
|
||||
iterations: 1
|
||||
salt_length: 16
|
||||
parallelism: 8
|
||||
memory: 64
|
||||
|
||||
access_control:
|
||||
default_policy: deny
|
||||
networks:
|
||||
- internal
|
||||
- name: internal
|
||||
networks:
|
||||
- 10.0.0.0/8
|
||||
#- 172.16.0.0/12
|
||||
#- 192.168.0.0/18
|
||||
rules:
|
||||
# Rules applied to everyone
|
||||
- domain: "*.{{ homelab_domain_name }}"
|
||||
policy: two_factor
|
||||
networks:
|
||||
- internal
|
||||
|
||||
session:
|
||||
name: authelia_session
|
||||
domain: {{ homelab_domain_name }}
|
||||
same_site: lax
|
||||
secret: {{ authelia_session_secret }}
|
||||
expiration: 1h
|
||||
inactivity: 15m
|
||||
remember_me_duration: 1w
|
||||
session:
|
||||
name: authelia_session
|
||||
domain: {{ homelab_domain_name }}
|
||||
same_site: lax
|
||||
secret: {{ authelia_session_secret }}
|
||||
expiration: 1h
|
||||
inactivity: 15m
|
||||
remember_me_duration: 1w
|
||||
|
||||
regulation:
|
||||
max_retries: 5
|
||||
find_time: 10m
|
||||
ban_time: 15m
|
||||
regulation:
|
||||
max_retries: 5
|
||||
find_time: 10m
|
||||
ban_time: 15m
|
||||
|
||||
storage:
|
||||
encryption_key: {{ authelia_sqlite_encryption_key}}
|
||||
local:
|
||||
path: /config/db.sqlite3
|
||||
storage:
|
||||
encryption_key: {{ authelia_sqlite_encryption_key}}
|
||||
local:
|
||||
path: /config/db.sqlite3
|
||||
|
||||
notifier:
|
||||
smtp:
|
||||
username: {{ email_smtp_account }}
|
||||
password: {{ authelia_smtp_password }}
|
||||
host: {{ email_smtp_host }}
|
||||
port: {{ email_smtp_port }}
|
||||
sender: "Authelia <{{ my_email_address }}>"
|
||||
subject: "[Authelia] {title}"
|
||||
startup_check_address: {{ my_email_address }}
|
||||
notifier:
|
||||
smtp:
|
||||
username: {{ email_smtp_account }}
|
||||
password: {{ authelia_smtp_password }}
|
||||
host: {{ email_smtp_host }}
|
||||
port: {{ email_smtp_port }}
|
||||
sender: "Authelia <{{ my_email_address }}>"
|
||||
subject: "[Authelia] {title}"
|
||||
startup_check_address: {{ my_email_address }}
|
||||
|
||||
ntp:
|
||||
address: "time.cloudflare.com:123"
|
||||
version: 3
|
||||
max_desync: 3s
|
||||
disable_startup_check: true
|
||||
disable_failure: true
|
||||
EOH
|
||||
}
|
||||
ntp:
|
||||
address: "time.cloudflare.com:123"
|
||||
version: 3
|
||||
max_desync: 3s
|
||||
disable_startup_check: true
|
||||
disable_failure: true
|
||||
EOH
|
||||
}
|
||||
|
||||
service {
|
||||
port = "authelia-port"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`authelia.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Cache-Control=no-store",
|
||||
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Pragma=no-cache",
|
||||
"traefik.http.routers.authelia.middlewares=authelia-headers"
|
||||
service {
|
||||
port = "authelia-port"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`authelia.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Cache-Control=no-store",
|
||||
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Pragma=no-cache",
|
||||
"traefik.http.routers.authelia.middlewares=authelia-headers"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "authelia-port"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 200 # MHz
|
||||
memory = 1000 # MB
|
||||
}
|
||||
|
||||
} // task authelia
|
||||
|
||||
} // authelia-group
|
||||
|
||||
group "reverse-proxy-group" {
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
value = "rpi1"
|
||||
}
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "whoami" {
|
||||
to = 80
|
||||
}
|
||||
port "dashboard" {
|
||||
static = 8080
|
||||
to = 8080
|
||||
}
|
||||
port "web" {
|
||||
static = 80
|
||||
to = 80
|
||||
}
|
||||
port "websecure" {
|
||||
static = 443
|
||||
to = 443
|
||||
}
|
||||
port "externalwebsecure" {
|
||||
static = 4430
|
||||
to = 4430
|
||||
}
|
||||
port "ssh" { # Used for gitea
|
||||
static = 2222
|
||||
to = 2222
|
||||
}
|
||||
}
|
||||
|
||||
task "whoami" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "containous/whoami:latest"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
image_pull_timeout = "10m"
|
||||
ports = ["whoami"]
|
||||
|
||||
} // /docker config
|
||||
|
||||
service {
|
||||
port = "whoami"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "90s"
|
||||
timeout = "15s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 2
|
||||
grace = "1m"
|
||||
}
|
||||
}
|
||||
resources {
|
||||
cpu = 25 # MHz
|
||||
memory = 10 # MB
|
||||
}
|
||||
|
||||
} // /task whoami
|
||||
|
||||
task "traefik" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
CF_API_EMAIL = "{{ my_email_address }}"
|
||||
CF_DNS_API_TOKEN = "{{ traefik_cf_api_token }}"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "traefik:v{{ traefik_version }}"
|
||||
hostname = "traefik"
|
||||
ports = ["dashboard", "web", "websecure","externalwebsecure", "ssh"]
|
||||
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/traefik/acme:/acme" ]
|
||||
image_pull_timeout = "10m"
|
||||
args = [
|
||||
"--global.sendAnonymousUsage=false",
|
||||
"--global.checkNewVersion=false",
|
||||
"--entryPoints.gitea-ssh.address=:2222",
|
||||
"--entryPoints.web.address=:80",
|
||||
"--entryPoints.websecure.address=:443",
|
||||
"--entryPoints.externalwebsecure.address=:4430",
|
||||
"--entrypoints.web.http.redirections.entryPoint.to=websecure",
|
||||
"--entrypoints.web.http.redirections.entryPoint.scheme=https",
|
||||
"--entrypoints.web.http.redirections.entryPoint.permanent=true",
|
||||
"--providers.file.filename=/local/traefik/siteconfigs.toml",
|
||||
"--providers.file.watch=true",
|
||||
"--providers.consulcatalog=true",
|
||||
"--providers.consulcatalog.endpoint.address=http://${NOMAD_IP_web}:8500",
|
||||
"--providers.consulcatalog.prefix=traefik",
|
||||
"--providers.consulcatalog.exposedbydefault=false",
|
||||
"--providers.nomad=true",
|
||||
"--providers.nomad.endpoint.address=http://${NOMAD_IP_web}:4646",
|
||||
// "--metrics=true",
|
||||
// "--metrics.influxdb=true",
|
||||
// "--metrics.influxdb.address=influxdb.service.consul:{{ influxdb_port }}",
|
||||
// "--metrics.influxdb.protocol=http",
|
||||
// "--metrics.influxdb.pushinterval=10s",
|
||||
// "--metrics.influxdb.database=homelab",
|
||||
// "--metrics.influxdb.retentionpolicy=2day",
|
||||
// "--metrics.influxdb.addentrypointslabels=true",
|
||||
// "--metrics.influxdb.addserviceslabels=true",
|
||||
"--accesslog=true",
|
||||
"--log=true",
|
||||
"--log.level=ERROR",
|
||||
"--api=true",
|
||||
"--api.dashboard=true",
|
||||
"--api.insecure=true",
|
||||
"--certificatesresolvers.cloudflare.acme.email={{ my_email_address }}",
|
||||
"--certificatesresolvers.cloudflare.acme.storage=/acme/acme-${node.unique.name}.json",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge=true",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.provider=cloudflare",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.delaybeforecheck=10",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.resolvers=1.1.1.1:53,8.8.8.8:53"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
template {
|
||||
destination = "local/traefik/httpasswd"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
{{ my_username }}:{{ traefik_http_pass_me }}
|
||||
family:{{ traefik_http_pass_family }}
|
||||
EOH
|
||||
}
|
||||
|
||||
template {
|
||||
destination = "local/traefik/httpasswdFamily"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
{{ my_username }}:{{ traefik_http_pass_me }}
|
||||
family:{{ traefik_http_pass_family }}
|
||||
EOH
|
||||
}
|
||||
|
||||
template {
|
||||
destination = "local/traefik/siteconfigs.toml"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
[http]
|
||||
[http.middlewares]
|
||||
[http.middlewares.compress.compress]
|
||||
|
||||
[http.middlewares.localIPOnly.ipWhiteList]
|
||||
sourceRange = ["10.0.0.0/8"]
|
||||
|
||||
[http.middlewares.redirectScheme.redirectScheme]
|
||||
scheme = "https"
|
||||
permanent = true
|
||||
|
||||
[http.middlewares.authelia.forwardAuth]
|
||||
address = {% raw %}"http://{{ range nomadService "authelia" }}{{ .Address }}:{{ .Port }}{{ end }}{% endraw %}/api/verify?rd=https://authelia.{{ homelab_domain_name }}"
|
||||
trustForwardHeader = true
|
||||
authResponseHeaders = ["Remote-User", "Remote-Groups", "Remote-Name", "Remote-Email"]
|
||||
|
||||
[http.middlewares.basicauth.basicauth]
|
||||
usersfile = "/local/traefik/httpasswd"
|
||||
removeHeader = true
|
||||
|
||||
[http.middlewares.basicauth-family.basicauth]
|
||||
usersfile = "/local/traefik/httpasswdFamily"
|
||||
removeHeader = true
|
||||
|
||||
[http.middlewares.allowFrame.headers]
|
||||
customFrameOptionsValue = "allow-from https://home.{{ homelab_domain_name }}"
|
||||
|
||||
[http.routers]
|
||||
|
||||
[http.routers.consul]
|
||||
rule = "Host(`consul.{{ homelab_domain_name }}`)"
|
||||
service = "consul"
|
||||
entrypoints = ["web","websecure"]
|
||||
[http.routers.consul.tls]
|
||||
certResolver = "cloudflare" # From static configuration
|
||||
|
||||
[http.services]
|
||||
|
||||
[http.services.consul]
|
||||
[http.services.consul.loadBalancer]
|
||||
passHostHeader = true
|
||||
[[http.services.consul.loadBalancer.servers]]
|
||||
url = "http://consul.service.consul:8500"
|
||||
|
||||
EOH
|
||||
}
|
||||
|
||||
service {
|
||||
port = "dashboard"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file,redirectScheme@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "authelia-port"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "dashboard"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 200 # MHz
|
||||
memory = 110 # MB
|
||||
}
|
||||
resources {
|
||||
cpu = 140 # MHz
|
||||
memory = 100 # MB
|
||||
} // resources
|
||||
|
||||
} // task authelia
|
||||
} // task traefik
|
||||
|
||||
task "whoami" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "containous/whoami:latest"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
ports = ["whoami"]
|
||||
// task "promtail-traefik" {
|
||||
|
||||
} // /docker config
|
||||
// driver = "docker"
|
||||
// config {
|
||||
// image = "grafana/promtail"
|
||||
// hostname = "promtail-traefik"
|
||||
// volumes = [
|
||||
// "/mnt/pi-cluster/logs:/traefik"
|
||||
// ]
|
||||
// args = [
|
||||
// "-config.file",
|
||||
// "/local/promtail-config.yaml",
|
||||
// "-print-config-stderr",
|
||||
// ]
|
||||
// } // docker config
|
||||
|
||||
service {
|
||||
port = "whoami"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "90s"
|
||||
timeout = "15s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 2
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
}
|
||||
resources {
|
||||
cpu = 25 # MHz
|
||||
memory = 10 # MB
|
||||
}
|
||||
// template {
|
||||
// destination = "local/promtail-config.yaml"
|
||||
// env = false
|
||||
// data = <<-EOH
|
||||
// server:
|
||||
// http_listen_port: 9080
|
||||
// grpc_listen_port: 0
|
||||
|
||||
} // /task whoami
|
||||
// positions:
|
||||
// filename: /alloc/positions.yaml
|
||||
|
||||
task "traefik" {
|
||||
// {% raw -%}
|
||||
// clients:
|
||||
// - url: http://{{ range nomadService "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
|
||||
// {% endraw %}
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
CF_API_EMAIL = "{{ my_email_address }}"
|
||||
CF_DNS_API_TOKEN = "{{ traefik_cf_api_token }}"
|
||||
}
|
||||
// scrape_configs:
|
||||
// - job_name: traefik
|
||||
// static_configs:
|
||||
// - targets:
|
||||
// - localhost
|
||||
// labels:
|
||||
// job: traefik_access
|
||||
// {% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
|
||||
// __path__: "/alloc/logs/traefik.std*.0"
|
||||
// pipeline_stages:
|
||||
// - regex:
|
||||
// expression: '^(?P<remote_addr>[\w\.]+) - (?P<remote_user>[^ ]*) \[(?P<time_local>.*)\] "(?P<method>[^ ]*) (?P<request>[^ ]*) (?P<protocol>[^ ]*)" (?P<status>[\d]+) (?P<body_bytes_sent>[\d]+) "(?P<http_referer>[^"]*)" "(?P<http_user_agent>[^"]*)" (?P<request_number>[^ ]+) "(?P<router>[^ ]+)" "(?P<server_URL>[^ ]+)" (?P<response_time_ms>[^ ]+)ms$'
|
||||
// - labels:
|
||||
// method:
|
||||
// status:
|
||||
// router:
|
||||
// response_time_ms:
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "traefik:{{ traefik_version }}"
|
||||
hostname = "traefik"
|
||||
ports = ["dashboard", "web", "websecure","externalwebsecure"]
|
||||
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/traefik/acme:/acme" ]
|
||||
args = [
|
||||
"--global.sendAnonymousUsage=false",
|
||||
"--global.checkNewVersion=false",
|
||||
"--entryPoints.web.address=:80",
|
||||
"--entryPoints.websecure.address=:443",
|
||||
"--entryPoints.externalwebsecure.address=:4430",
|
||||
"--entrypoints.web.http.redirections.entryPoint.to=websecure",
|
||||
"--entrypoints.web.http.redirections.entryPoint.scheme=https",
|
||||
"--entrypoints.web.http.redirections.entryPoint.permanent=true",
|
||||
"--providers.file.filename=/local/traefik/siteconfigs.toml",
|
||||
"--providers.file.watch=true",
|
||||
"--providers.consulcatalog=true",
|
||||
"--providers.consulcatalog.endpoint.address=http://consul.service.consul:8500",
|
||||
"--providers.consulcatalog.prefix=traefik",
|
||||
"--providers.consulcatalog.exposedbydefault=false",
|
||||
"--metrics=true",
|
||||
"--metrics.influxdb=true",
|
||||
"--metrics.influxdb.address=influxdb.service.consul:{{ influxdb_port }}",
|
||||
"--metrics.influxdb.protocol=http",
|
||||
"--metrics.influxdb.pushinterval=10s",
|
||||
"--metrics.influxdb.database=homelab",
|
||||
"--metrics.influxdb.retentionpolicy=2day",
|
||||
"--metrics.influxdb.addentrypointslabels=true",
|
||||
"--metrics.influxdb.addserviceslabels=true",
|
||||
"--accesslog=true",
|
||||
"--log=true",
|
||||
"--log.level=ERROR",
|
||||
"--api=true",
|
||||
"--api.dashboard=true",
|
||||
"--api.insecure=true",
|
||||
"--certificatesresolvers.cloudflare.acme.email={{ my_email_address }}",
|
||||
"--certificatesresolvers.cloudflare.acme.storage=/acme/acme-${node.unique.name}.json",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge=true",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.provider=cloudflare",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.delaybeforecheck=10",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.resolvers=1.1.1.1:53,8.8.8.8:53"
|
||||
]
|
||||
} // docker config
|
||||
// EOH
|
||||
// } // template
|
||||
|
||||
template {
|
||||
destination = "local/traefik/httpasswd"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
{{ my_username }}:{{ traefik_http_pass_me }}
|
||||
family:{{ traefik_http_pass_family }}
|
||||
EOH
|
||||
}
|
||||
// lifecycle {
|
||||
// hook = "poststart"
|
||||
// sidecar = true
|
||||
// }
|
||||
|
||||
template {
|
||||
destination = "local/traefik/httpasswdFamily"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
{{ my_username }}:{{ traefik_http_pass_me }}
|
||||
family:{{ traefik_http_pass_family }}
|
||||
EOH
|
||||
}
|
||||
// resources {
|
||||
// cpu = 30 # MHz
|
||||
// memory = 30 # MB
|
||||
// } // resources
|
||||
|
||||
template {
|
||||
destination = "local/traefik/siteconfigs.toml"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
[http]
|
||||
[http.middlewares]
|
||||
[http.middlewares.compress.compress]
|
||||
// } // promtail sidecar task
|
||||
|
||||
[http.middlewares.localIPOnly.ipWhiteList]
|
||||
sourceRange = ["10.0.0.0/8"]
|
||||
|
||||
[http.middlewares.redirectScheme.redirectScheme]
|
||||
scheme = "https"
|
||||
permanent = true
|
||||
|
||||
[http.middlewares.authelia.forwardAuth]
|
||||
address = "http://authelia.service.consul:{{ authelia_port }}/api/verify?rd=https://authelia.{{ homelab_domain_name }}"
|
||||
trustForwardHeader = true
|
||||
authResponseHeaders = ["Remote-User", "Remote-Groups", "Remote-Name", "Remote-Email"]
|
||||
|
||||
[http.middlewares.basicauth.basicauth]
|
||||
usersfile = "/local/traefik/httpasswd"
|
||||
removeHeader = true
|
||||
|
||||
[http.middlewares.basicauth-family.basicauth]
|
||||
usersfile = "/local/traefik/httpasswdFamily"
|
||||
removeHeader = true
|
||||
|
||||
[http.middlewares.allowFrame.headers]
|
||||
customFrameOptionsValue = "allow-from https://home.{{ homelab_domain_name }}"
|
||||
|
||||
[http.routers]
|
||||
|
||||
[http.routers.consul]
|
||||
rule = "Host(`consul.{{ homelab_domain_name }}`)"
|
||||
service = "consul"
|
||||
entrypoints = ["web","websecure"]
|
||||
[http.routers.consul.tls]
|
||||
certResolver = "cloudflare" # From static configuration
|
||||
|
||||
[http.services]
|
||||
|
||||
[http.services.consul]
|
||||
[http.services.consul.loadBalancer]
|
||||
passHostHeader = true
|
||||
[[http.services.consul.loadBalancer.servers]]
|
||||
url = "http://consul.service.consul:8500"
|
||||
|
||||
EOH
|
||||
}
|
||||
|
||||
service {
|
||||
port = "dashboard"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file,redirectScheme@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "dashboard"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
//cpu = 40 # MHz
|
||||
memory = 64 # MB
|
||||
} // resources
|
||||
|
||||
} // task traefik
|
||||
|
||||
// task "promtail-traefik" {
|
||||
|
||||
// driver = "docker"
|
||||
// config {
|
||||
// image = "grafana/promtail"
|
||||
// hostname = "promtail-traefik"
|
||||
// volumes = [
|
||||
// "/mnt/pi-cluster/logs:/traefik"
|
||||
// ]
|
||||
// args = [
|
||||
// "-config.file",
|
||||
// "/local/promtail-config.yaml",
|
||||
// "-print-config-stderr",
|
||||
// ]
|
||||
// } // docker config
|
||||
|
||||
// template {
|
||||
// destination = "local/promtail-config.yaml"
|
||||
// env = false
|
||||
// data = <<-EOH
|
||||
// server:
|
||||
// http_listen_port: 9080
|
||||
// grpc_listen_port: 0
|
||||
|
||||
// positions:
|
||||
// filename: /alloc/positions.yaml
|
||||
|
||||
// {% raw -%}
|
||||
// clients:
|
||||
// - url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
|
||||
// {% endraw %}
|
||||
|
||||
// scrape_configs:
|
||||
// - job_name: traefik
|
||||
// static_configs:
|
||||
// - targets:
|
||||
// - localhost
|
||||
// labels:
|
||||
// job: traefik_access
|
||||
// {% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
|
||||
// __path__: "/alloc/logs/traefik.std*.0"
|
||||
// pipeline_stages:
|
||||
// - regex:
|
||||
// expression: '^(?P<remote_addr>[\w\.]+) - (?P<remote_user>[^ ]*) \[(?P<time_local>.*)\] "(?P<method>[^ ]*) (?P<request>[^ ]*) (?P<protocol>[^ ]*)" (?P<status>[\d]+) (?P<body_bytes_sent>[\d]+) "(?P<http_referer>[^"]*)" "(?P<http_user_agent>[^"]*)" (?P<request_number>[^ ]+) "(?P<router>[^ ]+)" "(?P<server_URL>[^ ]+)" (?P<response_time_ms>[^ ]+)ms$'
|
||||
// - labels:
|
||||
// method:
|
||||
// status:
|
||||
// router:
|
||||
// response_time_ms:
|
||||
|
||||
// EOH
|
||||
// } // template
|
||||
|
||||
// lifecycle {
|
||||
// hook = "poststart"
|
||||
// sidecar = true
|
||||
// }
|
||||
|
||||
// resources {
|
||||
// cpu = 30 # MHz
|
||||
// memory = 30 # MB
|
||||
// } // resources
|
||||
|
||||
// } // promtail sidecar task
|
||||
|
||||
} // reverse-proxy-group
|
||||
} // reverse-proxy-group
|
||||
}
|
||||
|
||||
101
templates/nomad_jobs/sabnzbd.hcl
Normal file
101
templates/nomad_jobs/sabnzbd.hcl
Normal file
@@ -0,0 +1,101 @@
|
||||
job "sabnzbd" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "macmini"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "sabnzbd" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "http" {
|
||||
static = "8080"
|
||||
to = "8080"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
task "sabnzbd" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
DOCKER_MODS = "linuxserver/mods:universal-cron"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/sabnzbd"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config",
|
||||
"${meta.nfsStorageRoot}/media/downloads/nzb:/nzbd",
|
||||
"${meta.nfsStorageRoot}/media/downloads/temp:/incomplete-downloads",
|
||||
"${meta.nfsStorageRoot}/media/downloads/complete:/downloads",
|
||||
"${meta.nfsStorageRoot}/nate:/nate",
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}/startup-scripts:/custom-cont-init.d"
|
||||
]
|
||||
ports = ["http"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "http"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`sab.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
|
||||
// "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "http"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 5000 # MHz
|
||||
memory = 1000 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
@@ -82,6 +82,7 @@ job "sonarr" {
|
||||
service {
|
||||
port = "sonarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -100,7 +101,6 @@ job "sonarr" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -40,6 +40,7 @@ job "stash" {
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
STASH_STASH = "/data/"
|
||||
STASH_GENERATED = "/generated/"
|
||||
STASH_METADATA = "/metadata/"
|
||||
@@ -58,6 +59,7 @@ job "stash" {
|
||||
"${meta.nfsStorageRoot}/nate/.stash/generated:/generated",
|
||||
"${meta.nfsStorageRoot}/nate/.stash/media:/data",
|
||||
"${meta.nfsStorageRoot}/nate/.stash/metadata:/metadata",
|
||||
"${meta.nfsStorageRoot}/nate/.stash/blobs:/blobs",
|
||||
"/etc/timezone:/etc/timezone:ro"
|
||||
]
|
||||
ports = ["port1"]
|
||||
@@ -66,6 +68,7 @@ job "stash" {
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -73,7 +76,6 @@ job "stash" {
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
@@ -85,12 +87,11 @@ job "stash" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 4500 # MHz
|
||||
cpu = 3000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ job "syncthing" {
|
||||
service {
|
||||
port = "webGUI"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -89,7 +90,6 @@ job "syncthing" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ job "TEMPLATE" {
|
||||
}
|
||||
}
|
||||
|
||||
task "await-TEMPLATEdb" {
|
||||
task "await-TEMPLATEEdb" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
@@ -158,6 +158,7 @@ job "TEMPLATE" {
|
||||
service {
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
port = "port2"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -178,7 +179,6 @@ job "TEMPLATE" {
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -9,87 +9,89 @@ job "TEMPLATE" {
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "TEMPLATE" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
static = "80"
|
||||
to = "80"
|
||||
group "TEMPLATE" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
}
|
||||
|
||||
task "TEMPLATE" {
|
||||
network {
|
||||
port "port1" {
|
||||
static = "80"
|
||||
to = "80"
|
||||
}
|
||||
}
|
||||
|
||||
// env {
|
||||
// PUID = "${meta.PUID}"
|
||||
// PGID = "${meta.PGID}"
|
||||
// }
|
||||
task "TEMPLATE" {
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = ""
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/",
|
||||
"/etc/timezone:/etc/timezone:ro",
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
]
|
||||
ports = ["port1"]
|
||||
} // docker config
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
driver = "docker"
|
||||
config {
|
||||
image = ""
|
||||
image_pull_timeout = "10m"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/"
|
||||
]
|
||||
ports = ["port1"]
|
||||
} // docker config
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
} // task
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
|
||||
@@ -82,6 +82,7 @@ job "TEMPLATE" {
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -102,7 +103,6 @@ job "TEMPLATE" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ job "uptimekuma" {
|
||||
service {
|
||||
port = "web"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`uptime.{{ homelab_domain_name }}`)",
|
||||
@@ -88,7 +89,6 @@ job "uptimekuma" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
153
templates/nomad_jobs/valentina.hcl
Normal file
153
templates/nomad_jobs/valentina.hcl
Normal file
@@ -0,0 +1,153 @@
|
||||
job "valentina" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "valentina" {
|
||||
|
||||
count = 1
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
task "valentina" {
|
||||
|
||||
env {
|
||||
PGID = "${meta.PGID}"
|
||||
PUID = "${meta.PUID}"
|
||||
TZ = "America/New_York"
|
||||
VALENTINA_AWS_ACCESS_KEY_ID = "{{ valentina_aws_access_key_id }}"
|
||||
VALENTINA_AWS_SECRET_ACCESS_KEY = "{{ valentina_aws_secret_access_key }}"
|
||||
VALENTINA_DISCORD_TOKEN = "{{ valentina_discord_token }}"
|
||||
VALENTINA_GUILDS = "{{ valentina_guids }}"
|
||||
VALENTINA_LOG_LEVEL = "INFO"
|
||||
VALENTINA_LOG_LEVEL_AWS = "INFO"
|
||||
VALENTINA_MONGO_DATABASE_NAME = "{{ valentina_mongo_database_name }}"
|
||||
VALENTINA_MONGO_URI = "{{ valentina_mongo_uri }}"
|
||||
VALENTINA_OWNER_CHANNELS = "{{ valentina_owner_channels }}"
|
||||
VALENTINA_OWNER_IDS = "{{ valentina_owner_ids }}"
|
||||
VALENTINA_S3_BUCKET_NAME = "{{ valentina_s3_bucket_name}}"
|
||||
VALENTINA_GITHUB_TOKEN = "{{ valentina_github_token }}"
|
||||
VALENTINA_GITHUB_REPO = "{{ valentina_github_repo }}"
|
||||
}
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/natelandau/valentina:v{{ valentina_version }}"
|
||||
image_pull_timeout = "10m"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/valentina",
|
||||
]
|
||||
} // docker config
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
group "mongobackup" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
to = "80"
|
||||
}
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
task "mongobackup" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
AWS_ACCESS_KEY_ID = "{{ valentina_aws_access_key_id }}"
|
||||
AWS_S3_BUCKET_NAME = "{{ valentina_s3_bucket_name }}"
|
||||
AWS_S3_BUCKET_PATH = "db_backups"
|
||||
AWS_SECRET_ACCESS_KEY = "{{ valentina_aws_secret_access_key }}"
|
||||
BACKUP_DIR = "/data/db_backups"
|
||||
CRON_SCHEDULE = "0 2 * * *" # 2am daily
|
||||
// CRON_SCHEDULE = "*/1 * * * *" # Every minute
|
||||
DAILY_RETENTION = "7"
|
||||
DB_NAME = "{{ backup_mongo_db_name }}"
|
||||
LOG_FILE = "/data/backup_mongodb.log"
|
||||
LOG_LEVEL = "INFO"
|
||||
MONGODB_URI = "{{ backup_mongo_mongodb_uri }}"
|
||||
MONTHLY_RETENTION = "12"
|
||||
PORT = "80"
|
||||
STORAGE_LOCATION = "BOTH"
|
||||
WEEKLY_RETENTION = "4"
|
||||
YEARLY_RETENTION = "2"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/natelandau/backup-mongodb:v{{ backup_mongodb_version }}"
|
||||
image_pull_timeout = "10m"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
ports = ["port1"]
|
||||
volumes = ["${meta.nfsStorageRoot}/pi-cluster/valentina:/data"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "1m"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
}
|
||||
} // service
|
||||
|
||||
} // task
|
||||
} // group
|
||||
} // job
|
||||
@@ -58,6 +58,7 @@ job "whoogle" {
|
||||
service {
|
||||
port = "whoogle"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
@@ -77,7 +78,6 @@ job "whoogle" {
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ job "wikijs" {
|
||||
}
|
||||
}
|
||||
|
||||
task "await_db_filesytem" {
|
||||
task "await_db_filesystem" {
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
@@ -56,7 +56,7 @@ job "wikijs" {
|
||||
}
|
||||
} // /task
|
||||
|
||||
task "await_backup_filesytem" {
|
||||
task "await_backup_filesystem" {
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
@@ -122,6 +122,7 @@ job "wikijs" {
|
||||
service {
|
||||
port = "db"
|
||||
name = "wikijsdb"
|
||||
provider = "nomad"
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "db"
|
||||
@@ -131,7 +132,6 @@ job "wikijs" {
|
||||
check_restart {
|
||||
limit = 2
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,7 +180,7 @@ group "wikijs_app_group" {
|
||||
}
|
||||
} // /task
|
||||
|
||||
task "await_filesytem" {
|
||||
task "await_filesystem" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
@@ -225,6 +225,7 @@ group "wikijs_app_group" {
|
||||
service {
|
||||
port = "http"
|
||||
name = "wikijs"
|
||||
provider = "nomad"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.wikijs.rule=Host(`wiki.{{ homelab_domain_name }}`)",
|
||||
@@ -241,7 +242,6 @@ group "wikijs_app_group" {
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "30s"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // /service
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ _mainScript_() {
|
||||
readarr
|
||||
sonarr
|
||||
uptimekuma
|
||||
gitea
|
||||
)
|
||||
fi
|
||||
|
||||
@@ -131,7 +132,7 @@ _mainScript_() {
|
||||
BACKUP_RETENTION_HOURLY=2
|
||||
BACKUP_RETENTION_DAILY=6
|
||||
BACKUP_RETENTION_WEEKLY=3
|
||||
BACKUP_RETENTION_MONTHLY=2
|
||||
BACKUP_RETENTION_MONTHLY=4
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
332
vault.yml
332
vault.yml
@@ -1,118 +1,216 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35366634393265303030366466303232616338633038313738633637383439356439616536666230
|
||||
6566643530623337323034306366613935663334313934310a636235653531316237393231376362
|
||||
33663935366131663137363465666363336630386362313333633762656461636239366234633832
|
||||
3538353463356335360a323030643238323034343666376230356465396639636563316532373638
|
||||
35366637366663303164376661636563346330313932343462326239626264633262303739383831
|
||||
31333134613534643265643433323065303833326662346466633931373337326233303633363032
|
||||
66333336373865313333626566386665653833343638376264356430383764316134333231366466
|
||||
62336534666565343839393237356139393738396333393337666631303461373362343664396665
|
||||
63343161613462653866616566363631346566636639626138316539353362616261666337386635
|
||||
62356262363564376334336163613035643336656331653562306433363161393435343431663137
|
||||
66663936623834666364303333386335353961373031383164623766323836383462363231396263
|
||||
34343662336637633262333530623039376534643966653839346236363166646564613333633366
|
||||
33363534616466393137366234633030663036613263383733313235353364613864316139356330
|
||||
38343439346661613136316235326430326437646135636637343665663031393262653661396331
|
||||
39653739666364666564633364636231323237366265323631333234306631386362666135336461
|
||||
35646564643631666663336237636435626338346663633038353964303764626236373561323763
|
||||
34643565656462323764623263383037663735323364396437653332376137356263633963306332
|
||||
39633339366236313063643665356366346138616434316332643731666634366336623064653361
|
||||
63393134643630313632396434643131646464323737343133613364333465393834656236616134
|
||||
30313961346236326563616263373463616432393962663262616232356663636439643731383930
|
||||
32326664306563623665633164373932356163356361663465363362303661396662386630323137
|
||||
32383333656435613762393430303163383135393037363763333139633239666639303538623134
|
||||
32386635663962363939373365613138316435366433303863326561613463306338396136393965
|
||||
34333961383035383135333561313331336565383031356133626530306163333666333564353262
|
||||
38646434643234303363383965636339323633326330663736393461383461303661353365663631
|
||||
33343831356135653139633463336330646634363639326635653863343632663466336639313962
|
||||
65306438613933386664336138613066326364343738633531356664343664646464396162343861
|
||||
36663030643762343938633564373531663430303536643665613532313630636461646235666335
|
||||
62613634656232373936363439363766316561373937386261613861396566303834376134666564
|
||||
35396330636166316239336433323939363839636361643630353263663233303166313863636364
|
||||
34363134363161643234643134663361373237316466626363646264643530343064666464393166
|
||||
66366561356336616663393064376162643731343532663436646432366331643066396232393432
|
||||
63336633313963383132333639626130623737346137646561303338623136306361656630396364
|
||||
36306234643161643864313334316634396233313831613830393865353763653963656632363865
|
||||
34346439356166363839343063313263396437366163343734326162346166353465313163313236
|
||||
35343531333438303561393137323831303063353466666463303835653630353630393836393236
|
||||
32643035636335363137303134333735343964646130306339663137646261366635353632613533
|
||||
35303636373465633831353439376464386132616238613336366134383037376165396365353436
|
||||
66633937656162346661326136343266313937393436353532656634366535653762633930393239
|
||||
62383862356165336435616666346238646666613066323262323530356534373262633861646466
|
||||
66643935363334623264373338663362623439313138666338363732386666383739636162653763
|
||||
32666439316632653633363266343365393366373834323065353335613563306135383432613433
|
||||
61633835326565386662313265356536613237313364366163313562393836613061616432316638
|
||||
33376531663533376435383437393539663565616439666438646232663732663063343666646631
|
||||
36366364353339323262666630363932616461323833306666616365343530646536326363613232
|
||||
65313031333064396662363736316137656161393865383135366539636432386539623837353634
|
||||
62313638666564396462666334616365323932396236633932613362633166346265613161363863
|
||||
39396464663966353565393662363633366237323066306436616437363666666635343265666435
|
||||
32363162643761666639336464383430366565323862353161303338333232326335303462653938
|
||||
61333162306132633637653736623033373164343463333933666438326534303730613862383035
|
||||
39393939323561333738653465306165643461366534313537633162313638393630393361623432
|
||||
34306264316565333334303633323836343162373738636161656565313134643262343533666434
|
||||
37623962626263353062333939633662316663316238636331646230313861363364326636653365
|
||||
37343761626437663832346266333634666361333361313638626639633934646335613062626365
|
||||
31656132643034303032623365613530306436383437633761636238336139373739313836393336
|
||||
66653066633962333730643034653032626530663731633462393937326236656362356236666333
|
||||
62343139646139303433393163613037623963633230366236396434643163316664616435386436
|
||||
61616237366130663662643162613730303033376334333066393432333032613830316262333763
|
||||
31663936663239653361633634323736306363323864666635633465376363323838326366663630
|
||||
39383463343038666564653663616161336266313563633731623335373732343732383164623431
|
||||
39333262346539373937386531386466373863323232653265383064643863303638326566313765
|
||||
30376565643462643439316431306331346438633331616437613762323138363061336630353661
|
||||
61653139333962373261323063386231346266323762306433613363643230366265366239623832
|
||||
38353562393064633537373761643539313234333136333530343536393033313131393932633637
|
||||
63383066623930376561656436396564353264643630636332653862613933636630333633396130
|
||||
33346638663033636436626631323330336430313738313465323737386434613538346564633938
|
||||
62653236643033313062336664323335656132383430313831636334326430383938653762313835
|
||||
65656430656363653735343738326534616335636130646539363066393436383961316135346262
|
||||
30656131353733616562613239663965383864313263653063393635623838633538303433323437
|
||||
61636162363663306166343464333534316131346231303663336365303363656635363066353131
|
||||
38623263366136396466383538323637626236633163663033613934303766613931313135613132
|
||||
39346237656635303166363031353866663833303537666330346130353563373763623530373464
|
||||
65666338313039633732626564393161663335613264306637646332643133356135613366353264
|
||||
62376466613037326463623131373937303039356138336337333163303636303335333736376563
|
||||
63313730623664356165653861303139313039616231326136313634623136613365313466373561
|
||||
35313466313539383838373838386131653638653430613863313430626139353465626636386539
|
||||
37383034326561393666383566326364623337376432633864613630353662663665336232313064
|
||||
31396531313937373336383438646234343661643534316332633163653532633565613136343235
|
||||
31653739633035303430626364303961626433323835653638653839396231663662636666393563
|
||||
30346162326561636365393831333435333362356432646161633463313963346537643631393736
|
||||
66343966313361333934616534313037636238613830623938623563393230376635316631636534
|
||||
62376136386137313737646135376463343831663162616566373764643930386539306231613964
|
||||
65383633313037323234616364623139623834303466306666613334346131633531643932623362
|
||||
31623337636434623531376437623033306235346366376436396336346634303138613262373464
|
||||
30363735633663363364613139666164383436306666363362346633346663346366393634333335
|
||||
61326234303734626465616530346339303536636665626436623237383434636362393034346562
|
||||
64633765363531346138376166393030666433396339333662663036313031306434626236383664
|
||||
63636633316138653433366337303033636330333761626162373435633062366639396362376537
|
||||
30313436353964613838323332353137383433323265343831356334393238666438323735313630
|
||||
66313534646636633866313533353533643531356266643433353137653130386165353936616438
|
||||
31353331383461313130383035663837646439323366623935386236663262653165313432326639
|
||||
65316339356661623436386537353335343332616362323463613966383736306638396630653437
|
||||
62376232353763336365613438383936646265623261306338613663343864363839313663343030
|
||||
37396537626435303036613531396239353439663930363263373632333536376364336436383961
|
||||
31613237393430326663366531633633613362373265646437303530656564383830366164643465
|
||||
31333162373037323836396234383265333832376461383530383139353562666635386661383262
|
||||
31323162363834376432313766393965373763313664383966346464313865343261333030653033
|
||||
37326564663836323963663735353432653938373632356564653830616562656333383563366432
|
||||
32366464316265613565613830633264613634313134373530386562313163656434356164356139
|
||||
62643065613638323735366332316366383236653762373436393631363039636333346431666137
|
||||
33643239323062343537353061646138346661643262303363326137356461356439663166653739
|
||||
39636534653935376433633761373630656566393535373962353762646165663566613235646134
|
||||
32313838383538363532643965376464626361663431393165663238373762636337366434666437
|
||||
32636234613264666633366663616639386236386333623766383735383431323964343965643362
|
||||
39326266366162333266343133646335653837393962633731613230653665366462393931613462
|
||||
33633966626132633832653634626633393238643238393064646233663064346333653164623336
|
||||
34373634376335383639346338663830653061386161306134336530376161333637333733666533
|
||||
63626464393435626361323333656639616431333638383163626662323733613564613430323532
|
||||
31343262616133333965633462366636333762623764326231346437666634663339393563666664
|
||||
65653536333834643937326464333464353135363738663031303162396535616139663535336535
|
||||
65343062646465373831303235303933343030346562633561653534313263333033313531656430
|
||||
64623833653832323134333138663966313939303739376131383133366131323530353961633765
|
||||
63386436373262313334323932646232616435646665323736356433376332653530326230346331
|
||||
63613163343365623937336564643431653963383333363664663934633962316663306537376236
|
||||
62333531353833326232613565666563613864653364363333613737663965366133383231623839
|
||||
65323161613533343130316635636630633931633936666662303330326262376233376562633865
|
||||
3930
|
||||
62396563613031656439666537383564633031376439653235303834623532333435393666383238
|
||||
3666663262633462633732643164636635306336353834610a313664366434613561633835366137
|
||||
34303561396435626466316432643135353739326261636335346261626536616132333466663365
|
||||
6331373264396164330a653837393237633837613466353235396531653932363436636236373939
|
||||
66373031343362643237626531303231616261333737316237663965633065306666663235393063
|
||||
33663862303366343736373235383162336633653736393162373161636363633238633431396230
|
||||
65633638356262363439663530643763373863366465613539356131393337336666633332636635
|
||||
65666265393837633938656435653631356331313238373763393530363062396637623430383632
|
||||
38363662663564316136653431346464303761386639363436633466626137663966346333333031
|
||||
33643433643765366237373237653938656635643737633931656666313335626265613239623531
|
||||
31343033336132656638353533663934366464343430353734326439653235313232356631356261
|
||||
62356537323766356262323931366266386362386565323765313933346435363534646434343365
|
||||
65666462313461326464623531356438323933646537363034303638646563636432393061326439
|
||||
62663061666238623335643038326135383631653331653936336535666331626539313665656438
|
||||
35653936653237333832343438336130336632636335613264366335316363343539353161643231
|
||||
33653536326335633164633563353139386532333130386438313966353238353631363531393537
|
||||
38663839373534613232393463663439356264613930336530646464353635333566613033313235
|
||||
64333664326638616439643439313734323163366435616265333033343636633934666630653637
|
||||
65373237343764353833366663323737653239363936346461303739656234343531373864316238
|
||||
35616430346263353061313330333865366638343732643362623264636538323538363930306665
|
||||
62653565633366363930333437633137616532393633313631656561653236383736353861333737
|
||||
31646431306334313038653833633566353434616363333039663661656631313565313465303865
|
||||
38353232643132366636356466373038343262353332393730623831633266643638626433613335
|
||||
65366566633332623161633330643762323165616561353635316366323235303135326139643939
|
||||
65323234626466633962376465373732356536386631346539373437303166376434393761376338
|
||||
61383038316136656536313661353538366235383861653433313530383837306130643762383661
|
||||
64336235353066333833333834326463653430346231346462336162333432326164633330653432
|
||||
37343936316565333233303833346262366339386561373066353839383866653634376530343035
|
||||
35653232306262393032623630316332626539613730386133666463346536303239623532383939
|
||||
64393365643435623363323838353764346465306465306130346430303939363566303663336536
|
||||
30663863393563336431396164336538653263386436383533376634383965383166323766656135
|
||||
62326635653465313536343230633435346639346638393631613337626130363739636234613431
|
||||
30396161366433313836386635346430313930326638616134666433666361323439643539643730
|
||||
66626365373866616335613134653566313637373535333966666332346537393364343432333035
|
||||
36346262303631636631376338313339616139613336346638373865366466643534393334356365
|
||||
38303637343539343830633833356430656563303932386163326266386435356330626663633363
|
||||
33363661333830323831353836646436306661616230643939323033653232663237613163383138
|
||||
63616138616232306563353465326639346363353135393365323833383365363966613231643262
|
||||
61343463353232303138343231636237333833333134623634343939366538313136633661333231
|
||||
30653431666161646363633834633633656561366232363362633539356161366531353639353734
|
||||
36396335333462306331653135633736663030373163663235323161303665613366336630326634
|
||||
30373765653135316134656665656331366362383533623530363564353232323630643765626138
|
||||
39396664366631353765616430616639646565653436356665633633383132626635653832653662
|
||||
30643262353531626565303431316664356665343031373932663561303565376430383039646564
|
||||
36623162336239663034633866326537383761623638653434313761336336643866666365363431
|
||||
63663535326532383233346466613431313438393632313664336230326531353039333566636662
|
||||
64373961386263643632663430353661316137633830323735366163616564333931303266643064
|
||||
63333530653331333436353666343533613365373130613634386136316639363331636635656334
|
||||
33373938343662306164653330636461653761366638643166616239373931343637306531373933
|
||||
63363533623066343261353366373562623061343635353533616236666337303366653662323632
|
||||
31326637323534323865663738393765393333313738313436323338373265356336383861356431
|
||||
36376236323937653561336337636266636165643762343431383061306661353734336437366337
|
||||
31366366346263373533313431353161316439613933366638623235396231353634323465663737
|
||||
62353334323361363134343762343235636263663664356336363264323962373832666366366263
|
||||
38643231656237386561636533383062356433383335643636623331343731313032383537623037
|
||||
62323863303830363961623664303632383466393564343762623638303632613264346130396534
|
||||
38383363316337646635616261653232313332343832386633343837393530373964323631666130
|
||||
32313562366631356538373737316231303039366562326664363863363637353564393832376139
|
||||
61316565653635653238326261343632653966646166326234356161333763363462663562386333
|
||||
30326362363836613666303964343039313461306139343262303334623034653039363634616439
|
||||
39666536333263616231383532343236646634383835316433386638386537383065666664323336
|
||||
39303566633535613834316231663530373238663730663331326262363039356130343832323365
|
||||
38303865396330393866623964623335373533656334306134336461313730663533313161333532
|
||||
32396236386534353737646539643835393164633433303462353266623066333037343766653865
|
||||
34623761396239313063643232313736386431643030663831623262373736363163336135626161
|
||||
64373631613137646637653466653335353736303032343432636230303131616364653063663432
|
||||
35313636653461623334376332666661353465623865623164316230626238346639643139373033
|
||||
31326666636431343462363231373161663239636635333566353861383532326165663936376337
|
||||
38356566373662306265376530333063656431373332373530336262636138323763313435323535
|
||||
38366165383365653563343461386564373430623737623938666230313662333235383261336263
|
||||
38303165333136376165303636383630613231623239363032373539356531633535393734303863
|
||||
30376631653136313762623934616532616662646336393464316162353731653461333132386132
|
||||
37666665396132336464353637326431313537366132663238396361613235626466636366396435
|
||||
34643237316665313639353336633865373335386535393232343038343237363565346639653733
|
||||
62643034306336336639333535383437363635316665383338636461393038633263333037353638
|
||||
36316533323833353037613332363165633662623031353666346261613065383665633266336434
|
||||
63343638383533653864396261306635363637306265623234353939393262636233303861313437
|
||||
31323665663432363164613065663636323165666464633833346232633262656534313663333766
|
||||
32633561353361323732656139613933343664336439616534346132643832626566376665656463
|
||||
31343130663131323062336133633162623863333732353331343165653638636165653861383363
|
||||
66373238303036633034666163353663666461666335393930373436323362656430383330383237
|
||||
66353130636265333434643434383865643536326237366366396132626533646665366261633361
|
||||
61623466323536336237333564663236653063646262633561393331363931303565623365386133
|
||||
33636430643436343663396563326532303438613337663737303434346131303661366439643233
|
||||
36363530313730646534643930376235356133653362363333343432323937336430666266363565
|
||||
62373963646630303134613563623563353961313263366332663662656166613631393865643235
|
||||
64623832376634306233333130366334343961323266303835353666326562346539323534316162
|
||||
63393766373333613733303265363862346636376561633931313031653539663138313335623033
|
||||
33396161336534383835333764323736383461373332353137666462376233306463316663393331
|
||||
32336535366434333837316433363133643139643366376339623165383237373563366130656532
|
||||
33626635646639373232623631383234613332613361663734383764643964616133333462623034
|
||||
31393630316661623066316361373965656637653263623637376262656566353339356630643466
|
||||
62376531303939363165623635656634313639343036633730643565663634626166393862366564
|
||||
63393038633366633332623064353062323738663065306637303039386363653932336266333965
|
||||
65363863643538393461303664373830613263313133633337666139656630616464323666613466
|
||||
35383466303063373438656664306666356664366134373432386239623533353433346430323637
|
||||
32613666396538653563643662666461636139623934626536373362623936333035343435616435
|
||||
38343238333862643837653037653566313738653938396163313662346466363966363736383838
|
||||
61396663616361626239646332393631653963663566346138393464323335353930343231343238
|
||||
32333838376265363538303939346564383735393664333430616165313364393836313730396466
|
||||
61623165356665373634363331323234646634613361326436366334656261356435653864343330
|
||||
61313139396235623831383630313261636334333334636163653033316537623964333335326365
|
||||
30333732393162613237336166323166666636633138353562383338366362363833366130616465
|
||||
31313932623236383935346537626130623837396161323066623763663466303865353537653733
|
||||
61666539303632666664333935326666363263626134653766383130646439306231653739316235
|
||||
62613732616336633964303264616239386636643266373262386361303030653037366631323563
|
||||
31326236663838326566336233323133393239663232663039323636313731333033316230313266
|
||||
32333330643638376638353266613430383066326464633337353337663736323730316262323165
|
||||
32623663373736326164616264623066313930623332376238393037343931303338303262323965
|
||||
34386632353435643064623534616436396366343930336334666335666563373432373538373362
|
||||
63363338363130363364346466656632363837323263656562656362383064396139323837343632
|
||||
36353937303934363133346338633563363761303866333863646666633736393061653064653162
|
||||
64396462346462323439323531313035373364383137383764343164393536393365663466626161
|
||||
30653633643232633063313665313636653237366566616530656338623939353765663264353432
|
||||
39316633396662366137373762636236316131353531616236653938613530386632356434643933
|
||||
66306135356631366438353932653131333138383036653163653230366132396562643733346561
|
||||
65613332636636656266333566363131336436363539643034313862353239393736393631353036
|
||||
63353134386539656261383432306431386537656530303639383464616162383236386666393235
|
||||
66343463336238363361393861613231343138653631623030323031353430653233623435646534
|
||||
30646164643939376139333664323136323030376434326266336335393336333766653961313665
|
||||
33346639323231663835373332636637613439303164393566323833383565393534636361623736
|
||||
37363664393066663535616463333336373035623036316263363031363266373630633632353237
|
||||
37333961313430663632646633613737353732663938376262363732376262633337616130326463
|
||||
62376266396334353034383434623030633435646135323830336466303030396337396263386131
|
||||
35616237353937383731663961656365356334313665383162303930623765396338353533613930
|
||||
62306630336662386435636539366462623965343763303732396332633863663934356337386263
|
||||
32356234643230626530643337353237323165353235343137666335653364353034646634373961
|
||||
33393363616563306434366631663234343532616638396165393939323661653163636265663534
|
||||
62623036366333633566376136653938623234626238363138636261383031336631313735636661
|
||||
34613432333935653138626334633431336633343737303438626234306330373130386566613066
|
||||
36316636373733323161636162386635653534623263616431623064656531626661613039343861
|
||||
36623537626333643936313238343733353038623836323234353531636337613163393062376666
|
||||
38636333626365363231623663323834396262616536303362356230303664346163663162646663
|
||||
62306331616336666462333966333739336337313164316266353264353334353837393163346132
|
||||
33333765353139376132663633616332383138396361383831616631376237376332396530643937
|
||||
32373936643133343538356561333235363132616462633862623066386266616535626634616635
|
||||
36373766646332646361373264626363613935396633323263633237633433333864623238653536
|
||||
30663430646236303063323865326363316161393064383666633432643861376164333961613031
|
||||
32663738336266356366353162353031386330623163626564666561653630616230376634396634
|
||||
65363935666436646361633837326636636134383236643132343739306331356534316565333264
|
||||
64646464636263323036356437336461633636353039616664323565333866393833363930613431
|
||||
36653764646261363931376332646636343830653036653331386263356132373433313338366239
|
||||
34626632616336373334643138343137663030653762646230353132623637613137633336313539
|
||||
39343830383462653031396162633730656631383438323133363335663363646465653831333337
|
||||
39343732663462343933316264353437613735353764623665666263663563663137373361383464
|
||||
33373136623666613065326563613732366430313561643966313832653165613565393761663835
|
||||
39386531356233623161626263646261643634643330623730383235303766636131393630353361
|
||||
61636661323330663963643534383634623466376363633437613364616634326133613363353038
|
||||
62383336303638373039613034386230636639623165626666666235386230343566386636386261
|
||||
35376465623737383165633065666439626638666538656230613265636261376163613064383630
|
||||
33656137636464613932653531643333333532396439613861373033306637656363663834336534
|
||||
34396332313230613962653539333635323663333862326235396263393332363835663833643733
|
||||
62613233323361363532383961333565326432333033383361303332323065336165633365663361
|
||||
38643837666136393465333936303933386636373035646532633665333137393036643830396132
|
||||
36646231393361313531376463623238643431303935393436303438633837643863636263353637
|
||||
32663032346662623861616538633535396230653933646336353338383161636538313334393565
|
||||
35633636343366323334623631386534666232666436303435323534303335326665636138306263
|
||||
65646362313937373837383132306133666435633161643936363532383135663362393866356662
|
||||
37343930323362663261386130356236333736613138326531646439373538323831363966333034
|
||||
65643836356366616636303331316638646164633639363735386633393865646364666434373838
|
||||
63313235313165313230623331373738646437663565613865623666353138343937346531626436
|
||||
65656365383432363931303238646431383064356330383239616533396534636135653266613631
|
||||
65386261656531663561313661396139373263633834323131666634663338656564393232303163
|
||||
35653862643634326362353134663130303839616435376537303763343339366463383532616563
|
||||
64636638303635333964373336663037303837653137663036393966326636303435646133393465
|
||||
33616565613765353165313230633461303730363231623530623538646662333434303534363266
|
||||
66616265356634663064616536643864633638383536363866323137636365303264613839623337
|
||||
38346431396437313239366266346331306430386137376339366535626335653066663364373762
|
||||
37636462316238666138323235643833643839346632343038393766393862636366353261646534
|
||||
38316130663465313433643265623561346532666565323030643537623465366336326435363763
|
||||
62663531623365386335666636383531306264303062636435373138393535623033643964333438
|
||||
31633034636535666133653830646339646464386232326162386565333563343466326265386539
|
||||
32643039373833343933313666653963643034303039656365396639383465616661616436613437
|
||||
38343764303335313063636266346463343439346235353763636662303564313265666533666263
|
||||
37336632633836316463356333643238393030643433613133643835376138353166336138303165
|
||||
37373837623562366436373730356666316233656135353662346262393539353136316265346464
|
||||
35393831336261316230343264316134353965303032306536326439383739666634623663623563
|
||||
66643363376132643737353862313536353933353132303931333066396330646139363563653534
|
||||
64653234613461303637663330333766333637643862336261373765326261646263633136336538
|
||||
33303161373336643737323235336632393137653935306337626436336161386263636339663933
|
||||
36396162313861663333656463663863323264333163343631333039653332633535623934653963
|
||||
65303038326536373339656635303635626136633031643138646137653865643830376362366436
|
||||
30366336366363643365373364643937363664613436376665616432363333653932326130343235
|
||||
36613131623730316462633834643131356337356164656339346539323933653461316435343039
|
||||
61386438636330326461633632666631363538656230343035656633353730626231373834323964
|
||||
38363762353736616566323831346164303630383662333162613639663637363533383734356665
|
||||
62393164343265386132356530303337626366623830373434376565336133393230346635623330
|
||||
36326463303665306136373165643764633339313638366532376336353432636632663434646566
|
||||
33333434643665356331633666356633623433363036393765356234613635363363663436623064
|
||||
62646534393336343961383137633339376661396264333265623831383033653938373130396164
|
||||
33396264363237313633383562333637376165316261313166356631636162346563386163393739
|
||||
63333031633166613966343339353234613965343234616537336666306564613031636137636261
|
||||
63386336653430373939306466653064633636623136306138663731386232303564313762643035
|
||||
30376634393961326636336135353036643935643431343666326331316338376639616138386435
|
||||
64666531353139653563303932353832306438663337383761663463613165323132343664393630
|
||||
39333862343264356266643439653639623434663235666136616234303735613636333630393032
|
||||
38643334383534636232303238356238353135343664303034323565366335653064333038376631
|
||||
32646661373331383664623337663831626563613964663039623333366363323032336330313130
|
||||
39303162356637626564313335386630656234323734656332663364643732643261386662643436
|
||||
31666333326537336362643062646165356164386630653464396335376562636432666532616536
|
||||
62353466383635356238396132336133633332366336636663303333656235636265396238323430
|
||||
32383431656162663964323862626631636361316364316563353138636138313263373566646261
|
||||
38626634663932346139383363346132353462353462323265363838323531303061633562386130
|
||||
35643465633266663463613365396139616130646635636439623633346332636333623166313936
|
||||
39313131613461623636366462613337316664623161393635383661656361633836646664633364
|
||||
30323361623866306532386136303562636130313464346138306637656431376136303062636236
|
||||
62623535633663656365633462366630343034366533346230336566313531646363396135373565
|
||||
63303433656131636666363032363161376534303966353730386139366437356262366334393431
|
||||
62343033353463646537656130353339336138633962323136353737373365366435353933313439
|
||||
39653532386263336333393062303436393361363735306561366663643639393838353338653366
|
||||
34643836303363363466613932653666303130356330303932353737346236323035633533393766
|
||||
34326630303863393138386261643163353963316232393232653438396233386661616263326430
|
||||
33393733353535303030653062326238393537316436323262643763376434663561323363613831
|
||||
34303932643236363835373463306630663461383636346561353237393233313163616363373138
|
||||
336232363362326263343834666337396137
|
||||
|
||||
Reference in New Issue
Block a user