Compare commits

42 Commits

Author SHA1 Message Date
Nathaniel Landau
855ff49ef1 feat: add speedtest-tracker to nomad services 2024-02-11 16:49:19 -05:00
Nathaniel Landau
12d1467369 feat: add hishtory-server to nomad services 2024-02-11 16:48:51 -05:00
Nathaniel Landau
55dea40077 fix(recyclarr): rules for sonarr v4 2024-02-09 16:26:50 -05:00
Nathaniel Landau
feb1fbedf4 fix: update job files 2024-01-09 08:53:40 -05:00
Nathaniel Landau
6b00bf557c feat: purge dangling Docker containers on job sync 2024-01-09 08:51:58 -05:00
Nathaniel Landau
ea9678eba6 build(vscode): use Ansible extension 2023-12-29 10:19:57 -05:00
Nathaniel Landau
1714dff877 feat: add gitea 2023-12-28 15:47:10 -05:00
Nathaniel Landau
7bde47d43a fix: cleanup jobs 2023-12-24 12:35:50 -05:00
Nathaniel Landau
394b34a5d1 fix: update jobs 2023-12-23 14:11:48 -05:00
Nathaniel Landau
846fb2bc31 fix: fix nomad configuration 2023-12-11 11:22:54 -05:00
Nathaniel Landau
eff9059bac feat(jobs): add jellyfin 2023-12-11 11:22:20 -05:00
Nathaniel Landau
5e35cf4400 fix: add ladders 2023-11-08 13:50:37 -05:00
Nathaniel Landau
7f94a62989 fix(sabnzbd): increase memory/cpu and add cron docker plugin 2023-10-22 16:52:40 -04:00
Nathaniel Landau
95f4c129ba build(deps): update dependencies 2023-10-21 22:24:01 -04:00
Nathaniel Landau
9a46bc9ebc feat(sabnzbd): add nomad job for sabnzbd 2023-10-21 22:19:23 -04:00
Nathaniel Landau
5b426da3ae feat: depreciate consul and use nomad service discovery 2023-10-21 22:18:23 -04:00
Nathaniel Landau
041649cc5e build(deps): bump dependencies 2023-09-15 16:34:28 -04:00
Nathaniel Landau
ce0cb6c5f1 fix(valentina): add new env vars 2023-09-03 15:50:24 -04:00
Nathaniel Landau
98d9a5a86f build(precommit): add typos checks 2023-08-30 08:34:07 -04:00
Nathaniel Landau
f7ba237d0d build(deps): update dependencies 2023-08-28 09:14:37 -04:00
Nathaniel Landau
e134616692 fix(recyclarr): migrate to v5 2023-08-28 08:49:03 -04:00
Nathaniel Landau
9194190591 build(deps): bump dependencies 2023-08-08 09:30:49 -04:00
Nathaniel Landau
2bb55f3d51 fix(valentina): update environment variables 2023-08-08 09:26:58 -04:00
Nathaniel Landau
7365e8b3d6 fix(ansible): update transfer method config 2023-08-08 09:25:55 -04:00
Nathaniel Landau
87c2a4e1b4 fix(consul): pull image from hashicorp/consul 2023-08-08 09:25:04 -04:00
Nathaniel Landau
edd9704258 feat: add valentain discord bot 2023-06-18 13:27:38 -04:00
Nathaniel Landau
cb4a0e9f8a build: add exclude_paths to ansible-lint 2023-05-24 14:30:01 -04:00
Nathaniel Landau
57c1a42f66 fix(nomad): bump to v1.5.6 2023-05-22 10:03:11 -04:00
Nathaniel Landau
8499f5029b fix(proxy): update traefik version and bump RAM allocation 2023-05-22 09:34:19 -04:00
Nathaniel Landau
47288456a5 build(deps): bump dependencies 2023-05-22 09:33:10 -04:00
Nathaniel Landau
0f35061a2c fix(recyclarr): move config to Nomad task template 2023-04-25 11:40:17 -04:00
Nathaniel Landau
2842e27282 build(deps): bump dependencies 2023-04-25 11:32:55 -04:00
Nathaniel Landau
d36212b7d7 style: pass ansible-lint 2023-04-25 11:32:29 -04:00
Nathaniel Landau
76f4af703e build: poetry venv in project folder 2023-03-31 10:14:14 -04:00
Nathaniel Landau
9bb7eeb439 fix: bump versions 2023-03-29 16:18:26 -04:00
Nathaniel Landau
5526024244 fix(nomad): run nomad as root user to enable docker plugin on raspberry pis
Nomad is running as root rather than the Nomad user due to the Docker driver not being started when cgroups v2 are enabled. More info: https://github.com/hashicorp/nomad/pull/16063
2023-03-16 22:44:52 -04:00
Nathaniel Landau
ec52175c5c fix(nomad): authelia requires additional capabilities 2023-03-16 21:48:38 -04:00
Nathaniel Landau
be56f2a308 fix: revert to nomad v1.4.6 2023-03-13 21:29:09 -04:00
Nathaniel Landau
a757ff0cf2 build: add ignore file for anslible-lint 2023-03-13 10:25:20 -04:00
Nathaniel Landau
d6c155bef1 fix: bump versions 2023-03-12 16:52:16 -04:00
Nathaniel Landau
440d570c87 fix: bump authelia version 2023-02-12 14:41:05 -05:00
Nathaniel Landau
049267cec7 ci: add poe pb to run playbook 2023-02-12 14:40:47 -05:00
82 changed files with 5123 additions and 4127 deletions

32
.ansible-lint-ignore Normal file
View File

@@ -0,0 +1,32 @@
# This file contains ignores rule violations for ansible-lint
handlers/main.yml ignore-errors
handlers/main.yml name[casing]
main.yml name[casing]
main.yml name[missing]
tasks/backups.yml name[casing]
tasks/cluster_storage.yml name[casing]
tasks/consul.yml command-instead-of-module
tasks/consul.yml name[template]
tasks/consul.yml no-changed-when
tasks/debug.yml name[casing]
tasks/docker.yml name[casing]
tasks/docker.yml no-changed-when
tasks/interpolated_variables.yml name[casing]
tasks/logrotate.yml ignore-errors
tasks/logrotate.yml name[casing]
tasks/nomad.yml name[casing]
tasks/nomad.yml name[template]
tasks/orchestration_jobs.yml name[casing]
tasks/packages.yml ignore-errors
tasks/packages.yml name[casing]
tasks/pull_repositories.yml name[casing]
tasks/pull_repositories.yml no-changed-when
tasks/sanity.yml name[casing]
tasks/service_prometheus_nodeExporter.yml name[casing]
tasks/service_prometheus_nodeExporter.yml no-changed-when
tasks/tdarr.yml name[casing]
tasks/tdarr.yml no-changed-when
tasks/telegraf.yml name[casing]
tasks/telegraf.yml name[template]
tasks/telegraf.yml package-latest
vault.yml yaml[document-start]

View File

@@ -10,9 +10,10 @@ exclude_paths:
- galaxy-roles/
- .cz.yaml
- vault.yml
- .venv/
- ansible_collections/
skip_list:
- command-instead-of-shell
- name[template]
- ignore-errors
- meta-incorrect
@@ -21,10 +22,11 @@ skip_list:
- role-name
- unnamed-task
- var-naming
- name[casing]
- latest[git]
warn_list:
- experimental
- risky-file-permissions
- command-instead-of-module
- no-changed-when
- command-instead-of-shell

View File

@@ -1,12 +1,12 @@
---
repos:
- repo: "https://github.com/commitizen-tools/commitizen"
rev: v2.40.0
rev: v3.14.1
hooks:
- id: "commitizen"
- repo: "https://github.com/pre-commit/pre-commit-hooks"
rev: v4.4.0
rev: v4.5.0
hooks:
- id: check-added-large-files
- id: check-ast
@@ -31,7 +31,7 @@ repos:
args: [--markdown-linebreak-ext=md]
- repo: "https://github.com/adrienverge/yamllint.git"
rev: v1.29.0
rev: v1.34.0
hooks:
- id: yamllint
files: \.(yaml|yml)$
@@ -43,6 +43,11 @@ repos:
)\.(yaml|yml)$
entry: yamllint --strict --config-file .yamllint.yml
- repo: "https://github.com/crate-ci/typos"
rev: v1.16.26
hooks:
- id: typos
- repo: local
hooks:
- id: vault-pre-commit
@@ -50,10 +55,14 @@ repos:
entry: scripts/ansible-vault-precommit.sh
language: system
# This calls a custom script. Remove if you don't need it.
- id: stopwords
name: check stopwords
entry: scripts/stopwords.sh
name: stopwords
entry: git-stopwords
# args: ["-v"]
language: system
pass_filenames: true
types: [text]
- id: ansible-lint
name: running ansible-lint
@@ -68,12 +77,6 @@ repos:
files: \.sh\.j2$
entry: shellcheck -x --exclude=1009,1054,1056,1072,1073,1083,2001,2148
- id: "run-shellscripts-bats-tests"
name: run bats unit tests
language: system
files: \.bats$
entry: bats -t
- id: "ansible-encryption-check"
name: Ansible Encryption Check
language: system

9
.typos.toml Normal file
View File

@@ -0,0 +1,9 @@
[default]
default.locale = "en_us"
[default.extend-words]
Hashi = "Hashi" # Hashicorpt
hishtory = "hishtory" # Used for the hishtory package
[files]
extend-exclude = ["galaxy-roles/"]

15
.vscode/settings.json vendored
View File

@@ -1,5 +1,14 @@
{
"yaml.schemas": {
"https://raw.githubusercontent.com/ansible-community/schemas/main/f/ansible.json#/$defs/playbook": "file:///Users/natelandau/repos/ansible-homelab-config/main.yml"
}
"yaml.schemas": {
"https://raw.githubusercontent.com/ansible-community/schemas/main/f/ansible.json#/$defs/playbook": "file:///Users/natelandau/repos/ansible-homelab-config/main.yml"
},
"ansible.python.interpreterPath": "${workspaceFolder}/.venv/bin/python",
"files.associations": {
"**/tasks/*.yml": "ansible",
"**/handlers/*.yml": "ansible",
"main.yml": "ansible",
"inventory.yml": "ansible",
"default_variables.yml": "ansible",
"vault.yml": "ansible"
}
}

View File

@@ -41,14 +41,13 @@ This playbook adds storage, services, applications, and configurations to a prev
- Custom shell scripts for backups and house keeping
* **Syncs Nomad and Docker Compose job files** to servers:
- [ASN-to-IP](https://hub.docker.com/r/ddimick/asn-to-ip) - Used by Opnsense to build firewall aliases
- [Authelia](https://www.authelia.com/) - Open-source full-featured authentication server
- [Changedetection.io](https://github.com/dgtlmoon/changedetection.io) - Website change detection monitoring and notification service
- [Diun](https://crazymax.dev/diun/) - Docker Image Update Notifier is a CLI application
- [FreshRSS](https://freshrss.org/) - A containerized RSS reader
- [Gitea](https://about.gitea.com/) - Slef-hodted Git service
- [Grafana](https://grafana.com/) - Operational dashboards
- [Grafana Loki](https://grafana.com/oss/loki/) - Log aggregation system
- [Headless Trunk](https://github.com/alpeware/chrome-headless-trunk) - Headless Chromium
- [iCloud Drive Docker](https://github.com/mandarons/icloud-drive-docker) - Backup files and photos from Apple iCloud
- [InfluxDB](https://www.influxdata.com/) - Time series database
- [Lidarr](https://lidarr.audio/) - Music collection manager

View File

@@ -1,7 +1,7 @@
[defaults]
nocows = True
roles_path = ./galaxy-roles:./roles
collections_paths = ./
collections_path = ./
inventory = ./inventory.yml
stdout_callback = yaml
any_errors_fatal = True
@@ -9,4 +9,4 @@ display_skipped_hosts = False
vault_password_file = ./.password_file
[ssh_connection]
scp_if_ssh = True
transfer_method = smart

View File

@@ -1,17 +1,21 @@
---
# ---------------------------------- SOFTWARE VERSIONS
authelia_version: 4.37.3
consul_version: 1.14.2
influxdb_version: 1.8.10
nomad_version: 1.4.3
prometheus_verssion: 1.1.2
authelia_version: 4.37.5
backup_mongodb_version: 1.1.0
consul_version: 1.16.1
gitea_version: 1.21.3
influxdb_version: 1.11.1
nomad_version: 1.7.1
prometheus_verssion: 2.46.0
recyclarr_version: 6.0.2
speedtest_cli_version: 1.2.0
tdarr_installer_version: 2.00.13
telegraf_version: 1.25.0
traefik_version: "v2.9.6"
telegraf_version: 1.28.4
traefik_version: 2.10.7
valentina_version: 2.2.0
sabnzbd_version: 4.2.1
# ---------------------------------- SERVICE STATIC PORT MAPPINGS
authelia_port: "9091"
influxdb_port: "8086"
tdarr_node_port: "8267"
tdarr_server_port: "8266"
@@ -20,6 +24,7 @@ tdarr_webui_port: "8265"
# ---------------------------------- DIRECTORIES FOR SERVICE LOCAL STORAGE
# These folders must be created, even if empty, to allow mounting nomad local storage end-points
service_localfs_dirs:
- gitea
- influxdb
- lidarr
- prowlarr
@@ -99,6 +104,7 @@ apt_packages_list:
- logrotate
- lsof
- nano
- netcat
- net-tools
- nmap
- openssh-server

View File

@@ -3,80 +3,96 @@
- name: Mount shared storage on Mac
become: true
ansible.builtin.command:
cmd: automount -cv
cmd: automount -cv
register: automount_output
failed_when: automount_output.rc > 0
changed_when: automount_output.rc == 0
when:
- "'macs' in group_names"
- not ansible_check_mode
- "'macs' in group_names"
- not ansible_check_mode
listen: "mac_run_automount"
- name: Mount and unmount shared storage on Mac
become: true
ansible.builtin.command:
cmd: automount -cvu
cmd: automount -cvu
register: automount_output
failed_when: automount_output.rc > 0
changed_when: automount_output.rc == 0
when:
- "'macs' in group_names"
- not ansible_check_mode
- "'macs' in group_names"
- not ansible_check_mode
listen: "mac_run_automount_unmount"
##################################### TELEGRAF
- name: (Re)Start telegraf (Debian)
become: true
ansible.builtin.service:
name: telegraf
state: restarted
name: telegraf
state: restarted
register: telegraf_service
failed_when: telegraf_service.rc > 0
changed_when: telegraf_service.rc == 0
when:
- ansible_os_family == 'Debian'
- ansible_os_family == 'Debian'
listen: restart_telegraf
- name: (Re)Start telegraf
ansible.builtin.shell:
cmd: /usr/local/bin/brew services restart telegraf
executable: /usr/local/bin/bash
cmd: /usr/local/bin/brew services restart telegraf
executable: /usr/local/bin/bash
ignore_errors: true
register: telegraf_service
failed_when: telegraf_service.rc > 0
changed_when: telegraf_service.rc == 0
when:
- ansible_os_family == 'Darwin'
- ansible_os_family == 'Darwin'
listen: restart_telegraf
##################################### NOMAD
- name: restart nomad (Debian)
- name: Restart nomad (Debian)
become: true
ansible.builtin.systemd:
name: nomad
enabled: true
state: restarted
name: nomad
enabled: true
state: restarted
register: nomad_service
failed_when: nomad_service.rc > 0
changed_when: nomad_service.rc == 0
when:
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
listen: "restart nomad"
- name: "unload nomad agent (MacOSX)"
- name: "Unload nomad agent (MacOSX)"
ansible.builtin.command:
cmd: "launchctl unload -w {{ nomad_plist_macos }}"
failed_when: false
cmd: "launchctl unload -w {{ nomad_plist_macos }}"
register: nomad_service
changed_when: nomad_service.rc == 0
failed_when: nomad_service.rc > 0
when:
- ansible_os_family == 'Darwin'
- "'nostart' not in ansible_run_tags"
- ansible_os_family == 'Darwin'
- "'nostart' not in ansible_run_tags"
listen: "restart nomad"
- name: "load the nomad agent (MacOSX)"
- name: "Load the nomad agent (MacOSX)"
ansible.builtin.command:
cmd: "launchctl load -w {{ nomad_plist_macos }}"
cmd: "launchctl load -w {{ nomad_plist_macos }}"
register: nomad_service
changed_when: nomad_service.rc == 0
failed_when: nomad_service.rc > 0
when:
- ansible_os_family == 'Darwin'
- "'nostart' not in ansible_run_tags"
- ansible_os_family == 'Darwin'
- "'nostart' not in ansible_run_tags"
listen: "restart nomad"
- name: "ensure nomad is really running"
- name: "Ensure nomad is really running"
ansible.builtin.shell:
cmd: "sleep 10 && /usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
cmd: "set -o pipefail && sleep 10 && /usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
register: node_status_response
failed_when: node_status_response.rc > 0
changed_when: false
changed_when: node_status_response.rc == 0
when: "'nostart' not in ansible_run_tags"
listen: "restart nomad"
# - name: "Ensure sure Nomad service is really running"

View File

@@ -42,7 +42,7 @@ all:
pis:
hosts:
rpi1:
ansible_host: 10.0.30.91
ansible_host: "{{ rpi1_ip_address }}"
ansible_user: "{{ pi_username }}"
ansible_become_pass: "{{ pi_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi1"
@@ -58,7 +58,7 @@ all:
manage_apt_packages_list: true
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
rpi2:
ansible_host: 10.0.30.92
ansible_host: "{{ rpi2_ip_address }}"
ansible_user: "{{ pi_username }}"
ansible_become_pass: "{{ pi_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi2"
@@ -72,7 +72,7 @@ all:
manage_apt_packages_list: true
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
rpi3:
ansible_host: 10.0.30.93
ansible_host: "{{ rpi3_ip_address }}"
ansible_user: "{{ pi_username }}"
ansible_become_pass: "{{ pi_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi3"
@@ -86,7 +86,7 @@ all:
manage_apt_packages_list: true
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
rpi4:
ansible_host: 10.0.30.94
ansible_host: "{{ rpi4_ip_address }}"
ansible_user: "{{ pi_username }}"
ansible_become_pass: "{{ pi_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi4"
@@ -102,7 +102,7 @@ all:
macs:
hosts:
macmini:
ansible_host: 10.0.0.4
ansible_host: "{{ macmini_ip_address }}"
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ mac_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/macMini"
@@ -117,7 +117,7 @@ all:
manage_homebrew_package_list: true
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
imac:
ansible_host: 10.0.0.25
ansible_host: "{{ imac_ip_address }}"
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ mac_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/imac"
@@ -129,7 +129,7 @@ all:
is_shared_storage_client: true
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
skimmbook:
ansible_host: 10.0.0.21
ansible_host: "{{ skimmbook_ip_address }}"
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ mac_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/skimmbook"
@@ -140,22 +140,11 @@ all:
is_tdarr_node: true
is_shared_storage_client: true
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
vpnmac:
ansible_host: 10.0.90.2
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ mac_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/skimmbook"
ansible_python_interpreter: "/Users/natelandau/.pyenv/shims/python"
ansible_port: 22
mac_arm: true
manage_homebrew_package_list: true
is_tdarr_node: true
ansible_ssh_extra_args: "-o IdentitiesOnly=yes"
nas:
hosts:
synology:
ansible_host: 10.0.0.6
synology_second_ip: 10.0.30.6
ansible_host: "{{ synology_ip_address_1 }}"
synology_second_ip: "{{ synology_ip_address_2 }}"
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ synology_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/synology"

138
main.yml
View File

@@ -1,79 +1,79 @@
---
- hosts: all
name: "Running playbook"
- name: "Running playbook"
hosts: all
serial: 1
vars_files:
- default_variables.yml
- vault.yml
- default_variables.yml
- vault.yml
pre_tasks:
- name: Run sanity checks
ansible.builtin.import_tasks: tasks/sanity.yml
tags: ["always", "sanity"]
- name: populate service facts
ansible.builtin.service_facts:
tags: ["nomad", "consul"]
- name: Run debug tasks
ansible.builtin.import_tasks: tasks/debug.yml
tags: [never, debug]
- name: populate device specific variables
ansible.builtin.import_tasks: tasks/interpolated_variables.yml
tags: ["always"]
- name: Ensure we have up-to-date packages
ansible.builtin.import_tasks: tasks/packages.yml
tags: ["packages", "update"]
- name: Set clean nomad_jobs_dir variable
ansible.builtin.set_fact:
clean_nomad_jobs: true
tags: ["never", "clean"]
- name: Run sanity checks
ansible.builtin.import_tasks: tasks/sanity.yml
tags: ["always", "sanity"]
- name: Populate service facts
ansible.builtin.service_facts:
tags: ["nomad", "consul"]
- name: Run debug tasks
ansible.builtin.import_tasks: tasks/debug.yml
tags: [never, debug]
- name: Populate device specific variables
ansible.builtin.import_tasks: tasks/interpolated_variables.yml
tags: ["always"]
- name: Ensure we have up-to-date packages
ansible.builtin.import_tasks: tasks/packages.yml
tags: ["packages", "update"]
- name: Set clean nomad_jobs_dir variable
ansible.builtin.set_fact:
clean_nomad_jobs: true
tags: ["never", "clean"]
tasks:
- name: Configure cluster NFS mounts
ansible.builtin.import_tasks: tasks/cluster_storage.yml
tags: ["storage"]
when:
- is_nomad_client or is_nomad_server or is_shared_storage_client
- name: Install Docker
ansible.builtin.import_tasks: tasks/docker.yml
tags: ["docker"]
when: "'nas' not in group_names"
- name: Install and Upgrade Consul
ansible.builtin.import_tasks: tasks/consul.yml
tags: ["consul"]
when: is_consul_client or is_consul_server
- name: Install and Upgrade Nomad
ansible.builtin.import_tasks: tasks/nomad.yml
tags: ["nomad"]
when: is_nomad_client or is_nomad_server
- name: Orchestration Jobs
ansible.builtin.import_tasks: tasks/orchestration_jobs.yml
tags: ["jobs", "update"]
- name: Prometheus Node Exporter
ansible.builtin.import_tasks: tasks/service_prometheus_nodeExporter.yml
tags: ["prometheus_exporter"]
when:
- is_prometheus_node
- "'pis' in group_names"
- name: Install backup scripts
ansible.builtin.import_tasks: tasks/backups.yml
tags: ["backup", "backups"]
when: is_nomad_client or is_nomad_server
- name: Install and configure Telegraf
ansible.builtin.import_tasks: tasks/telegraf.yml
tags: ["telegraf"]
when: is_telegraf_client
- name: Pull repositories
ansible.builtin.import_tasks: tasks/pull_repositories.yml
tags: ["never", "update", "repos"]
- name: Configure log rotate
ansible.builtin.import_tasks: tasks/logrotate.yml
tags: ["logrotate"]
when: is_cluster_leader
- name: Install and configure tdarr
ansible.builtin.import_tasks: tasks/tdarr.yml
tags: ["tdarr"]
when: is_tdarr_server or is_tdarr_node
- name: Configure cluster NFS mounts
ansible.builtin.import_tasks: tasks/cluster_storage.yml
tags: ["storage"]
when:
- is_nomad_client or is_nomad_server or is_shared_storage_client
- name: Install Docker
ansible.builtin.import_tasks: tasks/docker.yml
tags: ["docker"]
when: "'nas' not in group_names"
- name: Install and Upgrade Consul
ansible.builtin.import_tasks: tasks/consul.yml
tags: ["consul"]
when: is_consul_client or is_consul_server
- name: Install and Upgrade Nomad
ansible.builtin.import_tasks: tasks/nomad.yml
tags: ["nomad"]
when: is_nomad_client or is_nomad_server
- name: Orchestration Jobs
ansible.builtin.import_tasks: tasks/orchestration_jobs.yml
tags: ["jobs", "update"]
- name: Prometheus Node Exporter
ansible.builtin.import_tasks: tasks/service_prometheus_nodeExporter.yml
tags: ["prometheus_exporter"]
when:
- is_prometheus_node
- "'pis' in group_names"
- name: Install backup scripts
ansible.builtin.import_tasks: tasks/backups.yml
tags: ["backup", "backups"]
when: is_nomad_client or is_nomad_server
- name: Install and configure Telegraf
ansible.builtin.import_tasks: tasks/telegraf.yml
tags: ["telegraf"]
when: is_telegraf_client
- name: Pull repositories
ansible.builtin.import_tasks: tasks/pull_repositories.yml
tags: ["never", "update", "repos"]
- name: Configure log rotate
ansible.builtin.import_tasks: tasks/logrotate.yml
tags: ["logrotate"]
when: is_cluster_leader
- name: Install and configure tdarr
ansible.builtin.import_tasks: tasks/tdarr.yml
tags: ["tdarr"]
when: is_tdarr_server or is_tdarr_node
handlers:
- ansible.builtin.import_tasks: handlers/main.yml
- ansible.builtin.import_tasks: handlers/main.yml

1147
poetry.lock generated

File diff suppressed because it is too large Load Diff

2
poetry.toml Normal file
View File

@@ -0,0 +1,2 @@
[virtualenvs]
in-project = true

View File

@@ -7,18 +7,26 @@
version = "0.2.0"
[tool.poetry.dependencies]
ansible = "^7.2.0"
ansible-lint = { version = "^6.12.1", markers = "platform_system != 'Windows'" }
ansible = "^8.6.0"
ansible-lint = { version = "^6.18.0", markers = "platform_system != 'Windows'" }
commitizen = "^2.40.0"
poethepoet = "^0.18.1"
pre-commit = "^3.0.4"
pre-commit = "^3.3.3"
python = "^3.9"
yamllint = "^1.29.0"
yamllint = "^1.32.0"
[tool.poetry.group.dev.dependencies]
black = "^23.11.0"
sh = "^2.0.6"
typos = "^1.16.23"
[build-system]
build-backend = "poetry.core.masonry.api"
requires = ["poetry-core"]
[tool.black]
line-length = 100
[tool.commitizen]
bump_message = "bump(release): v$current_version → v$new_version"
tag_format = "v$version"
@@ -27,11 +35,18 @@
version_files = ["pyproject.toml:version"]
[tool.poe.tasks]
pb = """
ansible-playbook
--vault-password-file .password_file
main.yml
-i inventory.yml
"""
[tool.poe.tasks.lint]
help = "Run linters"
[[tool.poe.tasks.lint.sequence]]
shell = "yamllint --strict --config-file .yamllint.yml tasks/ handlers/ main.yml inventory.yml default_variables.yml"
cmd = "yamllint --strict --config-file .yamllint.yml tasks/ handlers/ main.yml inventory.yml default_variables.yml"
[[tool.poe.tasks.lint.sequence]]
shell = "ansible-lint --force-color --config-file .ansible-lint.yml"
cmd = "ansible-lint --force-color --config-file .ansible-lint.yml"

View File

@@ -1,821 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC2317
_mainScript_() {
_customStopWords_() {
# DESC: Check if any specified stop words are in the commit diff. If found, the pre-commit hook will exit with a non-zero exit code.
# ARGS:
# $1 (Required): Path to file
# OUTS:
# 0: Success
# 1: Failure
# USAGE:
# _customStopWords_ "/path/to/file.sh"
# NOTE:
# Requires a plaintext stopword file located at
# `~/.git_stop_words` containing one stopword per line.
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
local _gitDiffTmp
local FILE_TO_CHECK="${1}"
_gitDiffTmp="${TMP_DIR}/${RANDOM}.${RANDOM}.${RANDOM}.diff.txt"
if [ -f "${STOP_WORD_FILE}" ]; then
if [[ $(basename "${STOP_WORD_FILE}") == "$(basename "${FILE_TO_CHECK}")" ]]; then
debug "$(basename "${1}"): Don't check stop words file for stop words."
return 0
fi
debug "$(basename "${FILE_TO_CHECK}"): Checking for stop words..."
# remove blank lines from stopwords file
sed '/^$/d' "${STOP_WORD_FILE}" >"${TMP_DIR}/pattern_file.txt"
# Check for stopwords
if git diff --cached -- "${FILE_TO_CHECK}" | grep i -q "new file mode"; then
if grep -i --file="${TMP_DIR}/pattern_file.txt" "${FILE_TO_CHECK}"; then
return 1
else
return 0
fi
else
# Add diff to a temporary file
git diff --cached -- "${FILE_TO_CHECK}" | grep '^+' >"${_gitDiffTmp}"
if grep -i --file="${TMP_DIR}/pattern_file.txt" "${_gitDiffTmp}"; then
return 1
else
return 0
fi
fi
else
notice "Could not find git stopwords file expected at '${STOP_WORD_FILE}'. Continuing..."
return 0
fi
}
# Don;t lint binary files
if [[ ${ARGS[0]} =~ \.(jpg|jpeg|gif|png|exe|zip|gzip|tiff|tar|dmg|ttf|otf|m4a|mp3|mkv|mov|avi|eot|svg|woff2?|aac|wav|flac|pdf|doc|xls|ppt|7z|bin|dmg|dat|sql|ico|mpe?g)$ ]]; then
_safeExit_ 0
fi
if ! _customStopWords_ "${ARGS[0]}"; then
error "Stop words found in ${ARGS[0]}"
_safeExit_ 1
fi
}
# end _mainScript_
# ################################## Flags and defaults
# Required variables
LOGFILE="${HOME}/logs/$(basename "$0").log"
QUIET=false
LOGLEVEL=ERROR
VERBOSE=false
FORCE=false
DRYRUN=false
declare -a ARGS=()
# Script specific
LOGLEVEL=NONE
STOP_WORD_FILE="${HOME}/.git_stop_words"
shopt -s nocasematch
# ################################## Custom utility functions (Pasted from repository)
# ################################## Functions required for this template to work
_setColors_() {
# DESC:
# Sets colors use for alerts.
# ARGS:
# None
# OUTS:
# None
# USAGE:
# printf "%s\n" "${blue}Some text${reset}"
if tput setaf 1 >/dev/null 2>&1; then
bold=$(tput bold)
underline=$(tput smul)
reverse=$(tput rev)
reset=$(tput sgr0)
if [[ $(tput colors) -ge 256 ]] >/dev/null 2>&1; then
white=$(tput setaf 231)
blue=$(tput setaf 38)
yellow=$(tput setaf 11)
green=$(tput setaf 82)
red=$(tput setaf 9)
purple=$(tput setaf 171)
gray=$(tput setaf 250)
else
white=$(tput setaf 7)
blue=$(tput setaf 38)
yellow=$(tput setaf 3)
green=$(tput setaf 2)
red=$(tput setaf 9)
purple=$(tput setaf 13)
gray=$(tput setaf 7)
fi
else
bold="\033[4;37m"
reset="\033[0m"
underline="\033[4;37m"
# shellcheck disable=SC2034
reverse=""
white="\033[0;37m"
blue="\033[0;34m"
yellow="\033[0;33m"
green="\033[1;32m"
red="\033[0;31m"
purple="\033[0;35m"
gray="\033[0;37m"
fi
}
_alert_() {
# DESC:
# Controls all printing of messages to log files and stdout.
# ARGS:
# $1 (required) - The type of alert to print
# (success, header, notice, dryrun, debug, warning, error,
# fatal, info, input)
# $2 (required) - The message to be printed to stdout and/or a log file
# $3 (optional) - Pass '${LINENO}' to print the line number where the _alert_ was triggered
# OUTS:
# stdout: The message is printed to stdout
# log file: The message is printed to a log file
# USAGE:
# [_alertType] "[MESSAGE]" "${LINENO}"
# NOTES:
# - The colors of each alert type are set in this function
# - For specified alert types, the funcstac will be printed
local _color
local _alertType="${1}"
local _message="${2}"
local _line="${3-}" # Optional line number
[[ $# -lt 2 ]] && fatal 'Missing required argument to _alert_'
if [[ -n ${_line} && ${_alertType} =~ ^fatal && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
_message="${_message} ${gray}(line: ${_line}) $(_printFuncStack_)"
elif [[ -n ${_line} && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
_message="${_message} ${gray}(line: ${_line})"
elif [[ -z ${_line} && ${_alertType} =~ ^fatal && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
_message="${_message} ${gray}$(_printFuncStack_)"
fi
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
_color="${bold}${red}"
elif [ "${_alertType}" == "info" ]; then
_color="${gray}"
elif [ "${_alertType}" == "warning" ]; then
_color="${red}"
elif [ "${_alertType}" == "success" ]; then
_color="${green}"
elif [ "${_alertType}" == "debug" ]; then
_color="${purple}"
elif [ "${_alertType}" == "header" ]; then
_color="${bold}${white}${underline}"
elif [ "${_alertType}" == "notice" ]; then
_color="${bold}"
elif [ "${_alertType}" == "input" ]; then
_color="${bold}${underline}"
elif [ "${_alertType}" = "dryrun" ]; then
_color="${blue}"
else
_color=""
fi
_writeToScreen_() {
("${QUIET}") && return 0 # Print to console when script is not 'quiet'
[[ ${VERBOSE} == false && ${_alertType} =~ ^(debug|verbose) ]] && return 0
if ! [[ -t 1 || -z ${TERM-} ]]; then # Don't use colors on non-recognized terminals
_color=""
reset=""
fi
if [[ ${_alertType} == header ]]; then
printf "${_color}%s${reset}\n" "${_message}"
else
printf "${_color}[%7s] %s${reset}\n" "${_alertType}" "${_message}"
fi
}
_writeToScreen_
_writeToLog_() {
[[ ${_alertType} == "input" ]] && return 0
[[ ${LOGLEVEL} =~ (off|OFF|Off) ]] && return 0
if [ -z "${LOGFILE-}" ]; then
LOGFILE="$(pwd)/$(basename "$0").log"
fi
[ ! -d "$(dirname "${LOGFILE}")" ] && mkdir -p "$(dirname "${LOGFILE}")"
[[ ! -f ${LOGFILE} ]] && touch "${LOGFILE}"
# Don't use colors in logs
local _cleanmessage
_cleanmessage="$(printf "%s" "${_message}" | sed -E 's/(\x1b)?\[(([0-9]{1,2})(;[0-9]{1,3}){0,2})?[mGK]//g')"
# Print message to log file
printf "%s [%7s] %s %s\n" "$(date +"%b %d %R:%S")" "${_alertType}" "[$(/bin/hostname)]" "${_cleanmessage}" >>"${LOGFILE}"
}
# Write specified log level data to logfile
case "${LOGLEVEL:-ERROR}" in
ALL | all | All)
_writeToLog_
;;
DEBUG | debug | Debug)
_writeToLog_
;;
INFO | info | Info)
if [[ ${_alertType} =~ ^(error|fatal|warning|info|notice|success) ]]; then
_writeToLog_
fi
;;
NOTICE | notice | Notice)
if [[ ${_alertType} =~ ^(error|fatal|warning|notice|success) ]]; then
_writeToLog_
fi
;;
WARN | warn | Warn)
if [[ ${_alertType} =~ ^(error|fatal|warning) ]]; then
_writeToLog_
fi
;;
ERROR | error | Error)
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
_writeToLog_
fi
;;
FATAL | fatal | Fatal)
if [[ ${_alertType} =~ ^fatal ]]; then
_writeToLog_
fi
;;
OFF | off)
return 0
;;
*)
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
_writeToLog_
fi
;;
esac
} # /_alert_
error() { _alert_ error "${1}" "${2-}"; }
warning() { _alert_ warning "${1}" "${2-}"; }
notice() { _alert_ notice "${1}" "${2-}"; }
info() { _alert_ info "${1}" "${2-}"; }
success() { _alert_ success "${1}" "${2-}"; }
dryrun() { _alert_ dryrun "${1}" "${2-}"; }
input() { _alert_ input "${1}" "${2-}"; }
header() { _alert_ header "${1}" "${2-}"; }
debug() { _alert_ debug "${1}" "${2-}"; }
fatal() {
_alert_ fatal "${1}" "${2-}"
_safeExit_ "1"
}
_printFuncStack_() {
# DESC:
# Prints the function stack in use. Used for debugging, and error reporting.
# ARGS:
# None
# OUTS:
# stdout: Prints [function]:[file]:[line]
# NOTE:
# Does not print functions from the alert class
local _i
declare -a _funcStackResponse=()
for ((_i = 1; _i < ${#BASH_SOURCE[@]}; _i++)); do
case "${FUNCNAME[${_i}]}" in
_alert_ | _trapCleanup_ | fatal | error | warning | notice | info | debug | dryrun | header | success)
continue
;;
*)
_funcStackResponse+=("${FUNCNAME[${_i}]}:$(basename "${BASH_SOURCE[${_i}]}"):${BASH_LINENO[_i - 1]}")
;;
esac
done
printf "( "
printf %s "${_funcStackResponse[0]}"
printf ' < %s' "${_funcStackResponse[@]:1}"
printf ' )\n'
}
_safeExit_() {
# DESC:
# Cleanup and exit from a script
# ARGS:
# $1 (optional) - Exit code (defaults to 0)
# OUTS:
# None
if [[ -d ${SCRIPT_LOCK-} ]]; then
if command rm -rf "${SCRIPT_LOCK}"; then
debug "Removing script lock"
else
warning "Script lock could not be removed. Try manually deleting ${yellow}'${SCRIPT_LOCK}'"
fi
fi
if [[ -n ${TMP_DIR-} && -d ${TMP_DIR-} ]]; then
if [[ ${1-} == 1 && -n "$(ls "${TMP_DIR}")" ]]; then
command rm -r "${TMP_DIR}"
else
command rm -r "${TMP_DIR}"
debug "Removing temp directory"
fi
fi
trap - INT TERM EXIT
exit "${1:-0}"
}
_trapCleanup_() {
# DESC:
# Log errors and cleanup from script when an error is trapped. Called by 'trap'
# ARGS:
# $1: Line number where error was trapped
# $2: Line number in function
# $3: Command executing at the time of the trap
# $4: Names of all shell functions currently in the execution call stack
# $5: Scriptname
# $6: $BASH_SOURCE
# USAGE:
# trap '_trapCleanup_ ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${FUNCNAME[*]}" "${0}" "${BASH_SOURCE[0]}"' EXIT INT TERM SIGINT SIGQUIT SIGTERM ERR
# OUTS:
# Exits script with error code 1
local _line=${1-} # LINENO
local _linecallfunc=${2-}
local _command="${3-}"
local _funcstack="${4-}"
local _script="${5-}"
local _sourced="${6-}"
# Replace the cursor in-case 'tput civis' has been used
tput cnorm
if declare -f "fatal" &>/dev/null && declare -f "_printFuncStack_" &>/dev/null; then
_funcstack="'$(printf "%s" "${_funcstack}" | sed -E 's/ / < /g')'"
if [[ ${_script##*/} == "${_sourced##*/}" ]]; then
fatal "${7-} command: '${_command}' (line: ${_line}) [func: $(_printFuncStack_)]"
else
fatal "${7-} command: '${_command}' (func: ${_funcstack} called at line ${_linecallfunc} of '${_script##*/}') (line: ${_line} of '${_sourced##*/}') "
fi
else
printf "%s\n" "Fatal error trapped. Exiting..."
fi
if declare -f _safeExit_ &>/dev/null; then
_safeExit_ 1
else
exit 1
fi
}
_makeTempDir_() {
# DESC:
# Creates a temp directory to house temporary files
# ARGS:
# $1 (Optional) - First characters/word of directory name
# OUTS:
# Sets $TMP_DIR variable to the path of the temp directory
# USAGE:
# _makeTempDir_ "$(basename "$0")"
[ -d "${TMP_DIR-}" ] && return 0
if [ -n "${1-}" ]; then
TMP_DIR="${TMPDIR:-/tmp/}${1}.${RANDOM}.${RANDOM}.$$"
else
TMP_DIR="${TMPDIR:-/tmp/}$(basename "$0").${RANDOM}.${RANDOM}.${RANDOM}.$$"
fi
(umask 077 && mkdir "${TMP_DIR}") || {
fatal "Could not create temporary directory! Exiting."
}
debug "\$TMP_DIR=${TMP_DIR}"
}
# shellcheck disable=SC2120
_acquireScriptLock_() {
# DESC:
# Acquire script lock to prevent running the same script a second time before the
# first instance exits
# ARGS:
# $1 (optional) - Scope of script execution lock (system or user)
# OUTS:
# exports $SCRIPT_LOCK - Path to the directory indicating we have the script lock
# Exits script if lock cannot be acquired
# NOTE:
# If the lock was acquired it's automatically released in _safeExit_()
local _lockDir
if [[ ${1-} == 'system' ]]; then
_lockDir="${TMPDIR:-/tmp/}$(basename "$0").lock"
else
_lockDir="${TMPDIR:-/tmp/}$(basename "$0").${UID}.lock"
fi
if command mkdir "${_lockDir}" 2>/dev/null; then
readonly SCRIPT_LOCK="${_lockDir}"
debug "Acquired script lock: ${yellow}${SCRIPT_LOCK}${purple}"
else
if declare -f "_safeExit_" &>/dev/null; then
error "Unable to acquire script lock: ${yellow}${_lockDir}${red}"
fatal "If you trust the script isn't running, delete the lock dir"
else
printf "%s\n" "ERROR: Could not acquire script lock. If you trust the script isn't running, delete: ${_lockDir}"
exit 1
fi
fi
}
_setPATH_() {
# DESC:
# Add directories to $PATH so script can find executables
# ARGS:
# $@ - One or more paths
# OPTS:
# -x - Fail if directories are not found
# OUTS:
# 0: Success
# 1: Failure
# Adds items to $PATH
# USAGE:
# _setPATH_ "/usr/local/bin" "${HOME}/bin" "$(npm bin)"
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
local opt
local OPTIND=1
local _failIfNotFound=false
while getopts ":xX" opt; do
case ${opt} in
x | X) _failIfNotFound=true ;;
*)
{
error "Unrecognized option '${1}' passed to _backupFile_" "${LINENO}"
return 1
}
;;
esac
done
shift $((OPTIND - 1))
local _newPath
for _newPath in "$@"; do
if [ -d "${_newPath}" ]; then
if ! printf "%s" "${PATH}" | grep -Eq "(^|:)${_newPath}($|:)"; then
if PATH="${_newPath}:${PATH}"; then
debug "Added '${_newPath}' to PATH"
else
debug "'${_newPath}' already in PATH"
fi
else
debug "_setPATH_: '${_newPath}' already exists in PATH"
fi
else
debug "_setPATH_: can not find: ${_newPath}"
if [[ ${_failIfNotFound} == true ]]; then
return 1
fi
continue
fi
done
return 0
}
_useGNUutils_() {
# DESC:
# Add GNU utilities to PATH to allow consistent use of sed/grep/tar/etc. on MacOS
# ARGS:
# None
# OUTS:
# 0 if successful
# 1 if unsuccessful
# PATH: Adds GNU utilities to the path
# USAGE:
# # if ! _useGNUUtils_; then exit 1; fi
# NOTES:
# GNU utilities can be added to MacOS using Homebrew
! declare -f "_setPATH_" &>/dev/null && fatal "${FUNCNAME[0]} needs function _setPATH_"
if _setPATH_ \
"/usr/local/opt/gnu-tar/libexec/gnubin" \
"/usr/local/opt/coreutils/libexec/gnubin" \
"/usr/local/opt/gnu-sed/libexec/gnubin" \
"/usr/local/opt/grep/libexec/gnubin" \
"/usr/local/opt/findutils/libexec/gnubin" \
"/opt/homebrew/opt/findutils/libexec/gnubin" \
"/opt/homebrew/opt/gnu-sed/libexec/gnubin" \
"/opt/homebrew/opt/grep/libexec/gnubin" \
"/opt/homebrew/opt/coreutils/libexec/gnubin" \
"/opt/homebrew/opt/gnu-tar/libexec/gnubin"; then
return 0
else
return 1
fi
}
_homebrewPath_() {
# DESC:
# Add homebrew bin dir to PATH
# ARGS:
# None
# OUTS:
# 0 if successful
# 1 if unsuccessful
# PATH: Adds homebrew bin directory to PATH
# USAGE:
# # if ! _homebrewPath_; then exit 1; fi
! declare -f "_setPATH_" &>/dev/null && fatal "${FUNCNAME[0]} needs function _setPATH_"
if _uname=$(command -v uname); then
if "${_uname}" | tr '[:upper:]' '[:lower:]' | grep -q 'darwin'; then
if _setPATH_ "/usr/local/bin" "/opt/homebrew/bin"; then
return 0
else
return 1
fi
fi
else
if _setPATH_ "/usr/local/bin" "/opt/homebrew/bin"; then
return 0
else
return 1
fi
fi
}
_parseOptions_() {
# DESC:
# Iterates through options passed to script and sets variables. Will break -ab into -a -b
# when needed and --foo=bar into --foo bar
# ARGS:
# $@ from command line
# OUTS:
# Sets array 'ARGS' containing all arguments passed to script that were not parsed as options
# USAGE:
# _parseOptions_ "$@"
# Iterate over options
local _optstring=h
declare -a _options
local _c
local i
while (($#)); do
case $1 in
# If option is of type -ab
-[!-]?*)
# Loop over each character starting with the second
for ((i = 1; i < ${#1}; i++)); do
_c=${1:i:1}
_options+=("-${_c}") # Add current char to options
# If option takes a required argument, and it's not the last char make
# the rest of the string its argument
if [[ ${_optstring} == *"${_c}:"* && -n ${1:i+1} ]]; then
_options+=("${1:i+1}")
break
fi
done
;;
# If option is of type --foo=bar
--?*=*) _options+=("${1%%=*}" "${1#*=}") ;;
# add --endopts for --
--) _options+=(--endopts) ;;
# Otherwise, nothing special
*) _options+=("$1") ;;
esac
shift
done
set -- "${_options[@]-}"
unset _options
# Read the options and set stuff
# shellcheck disable=SC2034
while [[ ${1-} == -?* ]]; do
case $1 in
# Custom options
# Common options
-h | --help)
_usage_
_safeExit_
;;
--loglevel)
shift
LOGLEVEL=${1}
;;
--logfile)
shift
LOGFILE="${1}"
;;
-n | --dryrun) DRYRUN=true ;;
-v | --verbose) VERBOSE=true ;;
-q | --quiet) QUIET=true ;;
--force) FORCE=true ;;
--endopts)
shift
break
;;
*)
if declare -f _safeExit_ &>/dev/null; then
fatal "invalid option: $1"
else
printf "%s\n" "ERROR: Invalid option: $1"
exit 1
fi
;;
esac
shift
done
if [[ -z ${*} || ${*} == null ]]; then
ARGS=()
else
ARGS+=("$@") # Store the remaining user input as arguments.
fi
}
_columns_() {
# DESC:
# Prints a two column output from a key/value pair.
# Optionally pass a number of 2 space tabs to indent the output.
# ARGS:
# $1 (required): Key name (Left column text)
# $2 (required): Long value (Right column text. Wraps around if too long)
# $3 (optional): Number of 2 character tabs to indent the command (default 1)
# OPTS:
# -b Bold the left column
# -u Underline the left column
# -r Reverse background and foreground colors
# OUTS:
# stdout: Prints the output in columns
# NOTE:
# Long text or ANSI colors in the first column may create display issues
# USAGE:
# _columns_ "Key" "Long value text" [tab level]
[[ $# -lt 2 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
local opt
local OPTIND=1
local _style=""
while getopts ":bBuUrR" opt; do
case ${opt} in
b | B) _style="${_style}${bold}" ;;
u | U) _style="${_style}${underline}" ;;
r | R) _style="${_style}${reverse}" ;;
*) fatal "Unrecognized option '${1}' passed to ${FUNCNAME[0]}. Exiting." ;;
esac
done
shift $((OPTIND - 1))
local _key="${1}"
local _value="${2}"
local _tabLevel="${3-}"
local _tabSize=2
local _line
local _rightIndent
local _leftIndent
if [[ -z ${3-} ]]; then
_tabLevel=0
fi
_leftIndent="$((_tabLevel * _tabSize))"
local _leftColumnWidth="$((30 + _leftIndent))"
if [ "$(tput cols)" -gt 180 ]; then
_rightIndent=110
elif [ "$(tput cols)" -gt 160 ]; then
_rightIndent=90
elif [ "$(tput cols)" -gt 130 ]; then
_rightIndent=60
elif [ "$(tput cols)" -gt 120 ]; then
_rightIndent=50
elif [ "$(tput cols)" -gt 110 ]; then
_rightIndent=40
elif [ "$(tput cols)" -gt 100 ]; then
_rightIndent=30
elif [ "$(tput cols)" -gt 90 ]; then
_rightIndent=20
elif [ "$(tput cols)" -gt 80 ]; then
_rightIndent=10
else
_rightIndent=0
fi
local _rightWrapLength=$(($(tput cols) - _leftColumnWidth - _leftIndent - _rightIndent))
local _first_line=0
while read -r _line; do
if [[ ${_first_line} -eq 0 ]]; then
_first_line=1
else
_key=" "
fi
printf "%-${_leftIndent}s${_style}%-${_leftColumnWidth}b${reset} %b\n" "" "${_key}${reset}" "${_line}"
done <<<"$(fold -w${_rightWrapLength} -s <<<"${_value}")"
}
_usage_() {
cat <<USAGE_TEXT
${bold}$(basename "$0") [OPTION]... [FILE]...${reset}
Custom pre-commit hook script. This script is intended to be used as part of the pre-commit pipeline managed within .pre-commit-config.yaml.
${bold}${underline}Options:${reset}
$(_columns_ -b -- '-h, --help' "Display this help and exit" 2)
$(_columns_ -b -- "--loglevel [LEVEL]" "One of: FATAL, ERROR (default), WARN, INFO, NOTICE, DEBUG, ALL, OFF" 2)
$(_columns_ -b -- "--logfile [FILE]" "Full PATH to logfile. (Default is '\${HOME}/logs/$(basename "$0").log')" 2)
$(_columns_ -b -- "-n, --dryrun" "Non-destructive. Makes no permanent changes." 2)
$(_columns_ -b -- "-q, --quiet" "Quiet (no output)" 2)
$(_columns_ -b -- "-v, --verbose" "Output more information. (Items echoed to 'verbose')" 2)
$(_columns_ -b -- "--force" "Skip all user interaction. Implied 'Yes' to all actions." 2)
${bold}${underline}Example Usage:${reset}
${gray}# Run the script and specify log level and log file.${reset}
$(basename "$0") -vn --logfile "/path/to/file.log" --loglevel 'WARN'
USAGE_TEXT
}
# ################################## INITIALIZE AND RUN THE SCRIPT
# (Comment or uncomment the lines below to customize script behavior)
trap '_trapCleanup_ ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${FUNCNAME[*]}" "${0}" "${BASH_SOURCE[0]}"' EXIT INT TERM SIGINT SIGQUIT SIGTERM
# Trap errors in subshells and functions
set -o errtrace
# Exit on error. Append '||true' if you expect an error
set -o errexit
# Use last non-zero exit code in a pipeline
set -o pipefail
# Confirm we have BASH greater than v4
[ "${BASH_VERSINFO:-0}" -ge 4 ] || {
printf "%s\n" "ERROR: BASH_VERSINFO is '${BASH_VERSINFO:-0}'. This script requires BASH v4 or greater."
exit 1
}
# Make `for f in *.txt` work when `*.txt` matches zero files
shopt -s nullglob globstar
# Set IFS to preferred implementation
IFS=$' \n\t'
# Run in debug mode
# set -o xtrace
# Initialize color constants
_setColors_
# Disallow expansion of unset variables
set -o nounset
# Force arguments when invoking the script
# [[ $# -eq 0 ]] && _parseOptions_ "-h"
# Parse arguments passed to script
_parseOptions_ "$@"
# Create a temp directory '$TMP_DIR'
_makeTempDir_ "$(basename "$0")"
# Acquire script lock
# _acquireScriptLock_
# Add Homebrew bin directory to PATH (MacOS)
# _homebrewPath_
# Source GNU utilities from Homebrew (MacOS)
# _useGNUutils_
# Run the main logic script
_mainScript_
# Exit cleanly
_safeExit_

150
scripts/update_dependencies.py Executable file
View File

@@ -0,0 +1,150 @@
#!/usr/bin/env python
"""Script to update the pyproject.toml file with the latest versions of the dependencies."""
from pathlib import Path
from textwrap import wrap
try:
import tomllib
except ModuleNotFoundError: # pragma: no cover
import tomli as tomllib # type: ignore [no-redef]
import sh
from rich.console import Console
console = Console()
def dryrun(msg: str) -> None:
"""Print a message if the dry run flag is set.
Args:
msg: Message to print
"""
console.print(f"[cyan]DRYRUN | {msg}[/cyan]")
def success(msg: str) -> None:
"""Print a success message without using logging.
Args:
msg: Message to print
"""
console.print(f"[green]SUCCESS | {msg}[/green]")
def warning(msg: str) -> None:
"""Print a warning message without using logging.
Args:
msg: Message to print
"""
console.print(f"[yellow]WARNING | {msg}[/yellow]")
def error(msg: str) -> None:
"""Print an error message without using logging.
Args:
msg: Message to print
"""
console.print(f"[red]ERROR | {msg}[/red]")
def notice(msg: str) -> None:
"""Print a notice message without using logging.
Args:
msg: Message to print
"""
console.print(f"[bold]NOTICE | {msg}[/bold]")
def info(msg: str) -> None:
"""Print a notice message without using logging.
Args:
msg: Message to print
"""
console.print(f"INFO | {msg}")
def usage(msg: str, width: int = 80) -> None:
"""Print a usage message without using logging.
Args:
msg: Message to print
width (optional): Width of the message
"""
for _n, line in enumerate(wrap(msg, width=width)):
if _n == 0:
console.print(f"[dim]USAGE | {line}")
else:
console.print(f"[dim] | {line}")
def debug(msg: str) -> None:
"""Print a debug message without using logging.
Args:
msg: Message to print
"""
console.print(f"[blue]DEBUG | {msg}[/blue]")
def dim(msg: str) -> None:
"""Print a message in dimmed color.
Args:
msg: Message to print
"""
console.print(f"[dim]{msg}[/dim]")
# Load the pyproject.toml file
pyproject = Path(__file__).parents[1] / "pyproject.toml"
if not pyproject.exists():
console.print("pyproject.toml file not found")
raise SystemExit(1)
with pyproject.open("rb") as f:
try:
data = tomllib.load(f)
except tomllib.TOMLDecodeError as e:
raise SystemExit(1) from e
# Get the latest versions of all dependencies
info("Getting latest versions of dependencies...")
packages: dict = {}
for line in sh.poetry("--no-ansi", "show", "--outdated").splitlines():
package, current, latest = line.split()[:3]
packages[package] = {"current_version": current, "new_version": latest}
if not packages:
success("All dependencies are up to date")
raise SystemExit(0)
dependencies = data["tool"]["poetry"]["dependencies"]
groups = data["tool"]["poetry"]["group"]
for p in dependencies:
if p in packages:
notice(
f"Updating {p} from {packages[p]['current_version']} to {packages[p]['new_version']}"
)
sh.poetry("add", f"{p}@latest", _fg=True)
for group in groups:
for p in groups[group]["dependencies"]:
if p in packages:
notice(
f"Updating {p} from {packages[p]['current_version']} to {packages[p]['new_version']}"
)
sh.poetry("add", f"{p}@latest", "--group", group, _fg=True)
sh.poetry("update", _fg=True)
success("All dependencies are up to date")
raise SystemExit(0)

View File

@@ -6,42 +6,42 @@
# 1. Copies a backup and restore shellscript to /usr/local/bin
# 2. Edits the sudoers file to allow the script to be invoked with sudo privileges
- name: copy backup shellscript to server
- name: Copy backup shellscript to server
become: true
ansible.builtin.template:
src: scripts/service_backups.sh.j2
dest: /usr/local/bin/service_backups
mode: 0755
src: scripts/service_backups.sh.j2
dest: /usr/local/bin/service_backups
mode: 0755
when:
- is_nomad_client or is_nomad_server
- is_nomad_client or is_nomad_server
- name: copy restore shellscript to server
- name: Copy restore shellscript to server
become: true
ansible.builtin.template:
src: scripts/service_restore.sh.j2
dest: /usr/local/bin/service_restore
mode: 0755
src: scripts/service_restore.sh.j2
dest: /usr/local/bin/service_restore
mode: 0755
when:
- is_nomad_client or is_nomad_server
- is_nomad_client or is_nomad_server
- name: ensure nomad user can run sudo with the restore script
- name: Ensure nomad user can run sudo with the restore script
become: true
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "nomad ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
validate: "/usr/sbin/visudo -cf %s"
path: /etc/sudoers
state: present
line: "nomad ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
validate: "/usr/sbin/visudo -cf %s"
when:
- is_nomad_client or is_nomad_server
- "'pis' in group_names"
- is_nomad_client or is_nomad_server
- "'pis' in group_names"
- name: ensure my user can run sudo with the restore script
- name: Ensure my user can run sudo with the restore script
become: true
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "{{ ansible_user }} ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
validate: "/usr/sbin/visudo -cf %s"
path: /etc/sudoers
state: present
line: "{{ ansible_user }} ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
validate: "/usr/sbin/visudo -cf %s"
when:
- is_nomad_client or is_nomad_server
- "'pis' in group_names"
- is_nomad_client or is_nomad_server
- "'pis' in group_names"

View File

@@ -6,159 +6,159 @@
- name: "Mount storage on Raspberry Pis"
when: "'pis' in group_names"
block:
- name: ensure local mount points exist
become: true
ansible.builtin.file:
path: "{{ item.local }}"
state: directory
mode: 0777
# owner: "{{ ansible_user_uid }}"
# group: "{{ ansible_user_gid }}"
loop: "{{ rpi_nfs_mounts_list }}"
- name: Ensure local mount points exist
become: true
ansible.builtin.file:
path: "{{ item.local }}"
state: directory
mode: 0777
# owner: "{{ ansible_user_uid }}"
# group: "{{ ansible_user_gid }}"
loop: "{{ rpi_nfs_mounts_list }}"
- name: remove old nfs drives
become: true
ansible.posix.mount:
path: "{{ item.local }}"
src: "{{ item.src }}"
fstype: nfs
opts: defaults,hard,intr,timeo=14
state: absent
loop: "{{ rpi_nfs_mounts_remove }}"
- name: Remove old nfs drives
become: true
ansible.posix.mount:
path: "{{ item.local }}"
src: "{{ item.src }}"
fstype: nfs
opts: defaults,hard,intr,timeo=14
state: absent
loop: "{{ rpi_nfs_mounts_remove }}"
- name: mount all nfs drives
become: true
ansible.posix.mount:
path: "{{ item.local }}"
src: "{{ item.src }}"
fstype: nfs
opts: defaults,hard,intr,timeo=14
state: mounted
boot: true
loop: "{{ rpi_nfs_mounts_list }}"
- name: Mount all nfs drives
become: true
ansible.posix.mount:
path: "{{ item.local }}"
src: "{{ item.src }}"
fstype: nfs
opts: defaults,hard,intr,timeo=14
state: mounted
boot: true
loop: "{{ rpi_nfs_mounts_list }}"
# --------------------------------- Mount on Macs
# https://gist.github.com/l422y/8697518
- name: "Mount storage on Macs"
when: "'macs' in group_names"
block:
- name: create mount_point
become: true
ansible.builtin.file:
path: "{{ mac_storage_mount_point }}"
state: directory
mode: 0755
- name: Create mount_point
become: true
ansible.builtin.file:
path: "{{ mac_storage_mount_point }}"
state: directory
mode: 0755
# I ran into problems getting this to run successfully. If errors occur, add the line manually using:
# $ sudo nano /private/etc/auto_master
# I ran into problems getting this to run successfully. If errors occur, add the line manually using:
# $ sudo nano /private/etc/auto_master
- name: add NFS shared drives to macs
when: mac_autofs_type == 'nfs'
block:
- name: add auto_nfs to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_nfs"
line: "/- auto_nfs -nobrowse,nosuid"
unsafe_writes: true
- name: Add NFS shared drives to macs
when: mac_autofs_type == 'nfs'
block:
- name: Add auto_nfs to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_nfs"
line: "/- auto_nfs -nobrowse,nosuid"
unsafe_writes: true
- name: add mounts to /etc/auto_nfs
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_nfs
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_nfs_mounts_list if mac_nfs_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: Add mounts to /etc/auto_nfs
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_nfs
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_nfs_mounts_list if mac_nfs_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: remove old mounts from /etc/auto_nfs
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_nfs
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
state: absent
unsafe_writes: true
mode: 0644
notify: mac_run_automount_unmount
loop: "{{ mac_nfs_mounts_remove if mac_nfs_mounts_remove is iterable else [] }}"
- name: Remove old mounts from /etc/auto_nfs
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_nfs
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
state: absent
unsafe_writes: true
mode: 0644
notify: mac_run_automount_unmount
loop: "{{ mac_nfs_mounts_remove if mac_nfs_mounts_remove is iterable else [] }}"
- name: add AFP shared drives to macs
when: mac_autofs_type == 'afp'
block:
- name: add auto_afp to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_afp"
line: "/- auto_afp -nobrowse,nosuid"
unsafe_writes: true
- name: Add AFP shared drives to macs
when: mac_autofs_type == 'afp'
block:
- name: Add auto_afp to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_afp"
line: "/- auto_afp -nobrowse,nosuid"
unsafe_writes: true
- name: add mounts to /etc/auto_afp
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_afp
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: Add mounts to /etc/auto_afp
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_afp
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: remove mounts from /etc/auto_afp
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_afp
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
notify: mac_run_automount_unmount
- name: Remove mounts from /etc/auto_afp
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_afp
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
notify: mac_run_automount_unmount
- name: add SMB shared drives to macs
when: mac_autofs_type == 'smb'
block:
- name: add auto_smb to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_smb"
line: "/- auto_smb -noowners,nosuid"
unsafe_writes: true
- name: Add SMB shared drives to macs
when: mac_autofs_type == 'smb'
block:
- name: Add auto_smb to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_smb"
line: "/- auto_smb -noowners,nosuid"
unsafe_writes: true
- name: add mounts to /etc/auto_smb
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_smb
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=smbfs,soft,noowners,nosuid,rw ://{{ smb_username }}:{{ smb_password }}@{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: Add mounts to /etc/auto_smb
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_smb
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=smbfs,soft,noowners,nosuid,rw ://{{ smb_username }}:{{ smb_password }}@{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: remove mounts from /etc/auto_smb
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_smb
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
notify: mac_run_automount_unmount
- name: Remove mounts from /etc/auto_smb
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_smb
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
notify: mac_run_automount_unmount

View File

@@ -4,356 +4,362 @@
- name: Set variables needed to install consul
block:
- name: "Set variable: check if we have a mounted USB drive (Debian)"
ansible.builtin.stat:
path: "{{ rpi_usb_drive_mount_point }}"
register: have_usb_drive
changed_when: false
when:
- ansible_os_family == 'Debian'
- name: "Set variable: check if we have a mounted USB drive (Debian)"
ansible.builtin.stat:
path: "{{ rpi_usb_drive_mount_point }}"
register: have_usb_drive
changed_when: false
when:
- ansible_os_family == 'Debian'
- name: "Set variable: Use USB drive for consul /opt (Debian)"
ansible.builtin.set_fact:
consul_opt_dir: "{{ rpi_usb_drive_mount_point }}/opt/consul"
when:
- ansible_os_family == 'Debian'
- have_usb_drive.stat.exists
- name: "Set variable: Use USB drive for consul /opt (Debian)"
ansible.builtin.set_fact:
consul_opt_dir: "{{ rpi_usb_drive_mount_point }}/opt/consul"
when:
- ansible_os_family == 'Debian'
- have_usb_drive.stat.exists
- name: "Set variable: Use root disk for consul /opt (Debian)"
ansible.builtin.set_fact:
consul_opt_dir: "/opt/consul"
when:
- ansible_os_family == 'Debian'
- not have_usb_drive.stat.exists
- name: "Set variable: Use root disk for consul /opt (Debian)"
ansible.builtin.set_fact:
consul_opt_dir: "/opt/consul"
when:
- ansible_os_family == 'Debian'
- not have_usb_drive.stat.exists
- name: "Set variable: Use ~/library for /opt files (macOSX)"
ansible.builtin.set_fact:
consul_opt_dir: "/Users/{{ ansible_user }}/Library/consul"
when:
- mac_intel or mac_arm
- name: "Set variable: Use ~/library for /opt files (macOSX)"
ansible.builtin.set_fact:
consul_opt_dir: "/Users/{{ ansible_user }}/Library/consul"
when:
- mac_intel or mac_arm
- name: "Set variable: Use ~/volume1/docker/consul/data for /opt files (synology)"
ansible.builtin.set_fact:
consul_opt_dir: "/volume1/docker/consul/data"
when:
- inventory_hostname == 'synology'
- name: "Set variable: Use ~/volume1/docker/consul/data for /opt files (synology)"
ansible.builtin.set_fact:
consul_opt_dir: "/volume1/docker/consul/data"
when:
- inventory_hostname == 'synology'
- name: "Set variable: Set Consul download Binary (armv7l)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "Set variable: Set Consul download Binary (armv7l)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "Set variable: Set Consul download Binary (aarch64)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm64.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'aarch64'
- name: "Set variable: Set Consul download Binary (aarch64)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm64.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'aarch64'
- name: "Set variable: Set Consul download Binary (MacOSX)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_amd64.zip"
when: mac_intel
- name: "Set variable: Set Consul download Binary (MacOSX)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_amd64.zip"
when: mac_intel
- name: "Set variable: Set Consul download Binary (MacOSX)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_arm64.zip"
when: mac_arm
- name: "Set variable: Set Consul download Binary (MacOSX)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_arm64.zip"
when: mac_arm
- name: Assert that we can install Consul
ansible.builtin.assert:
that:
- consul_download_uri is defined
- consul_opt_dir is defined
fail_msg: "Unable to install consul on this host"
when: inventory_hostname != 'synology'
- name: Assert that we can install Consul
ansible.builtin.assert:
that:
- consul_download_uri is defined
- consul_opt_dir is defined
fail_msg: "Unable to install consul on this host"
when: inventory_hostname != 'synology'
- name: "Stop Consul"
block:
- name: "Stop consul systemd service (Debian)"
become: true
ansible.builtin.systemd:
name: consul
state: stopped
when:
- ansible_os_family == 'Debian'
- ansible_facts.services["consul.service"] is defined
- name: "Stop consul systemd service (Debian)"
become: true
ansible.builtin.systemd:
name: consul
state: stopped
when:
- ansible_os_family == 'Debian'
- ansible_facts.services["consul.service"] is defined
- name: "Check if plist file exists (MacOSX)"
ansible.builtin.stat:
path: "{{ consul_plist_macos }}"
register: consul_file
when:
- ansible_os_family == 'Darwin'
- name: "Check if plist file exists (MacOSX)"
ansible.builtin.stat:
path: "{{ consul_plist_macos }}"
register: consul_file
when:
- ansible_os_family == 'Darwin'
- name: "Unload consul agent (MacOSX)"
become: true
ansible.builtin.command:
cmd: "launchctl unload {{ consul_plist_macos }}"
when:
- ansible_os_family == 'Darwin'
- consul_file.stat.exists
- name: "Unload consul agent (MacOSX)"
become: true
ansible.builtin.command:
cmd: "launchctl unload {{ consul_plist_macos }}"
register: consul_unload
failed_when: consul_unload.rc != 0
changed_when: consul_unload.rc == 0
when:
- ansible_os_family == 'Darwin'
- consul_file.stat.exists
- name: "Create 'consul' user and group"
when:
- ansible_os_family == 'Debian'
- ansible_os_family == 'Debian'
block:
- name: "Ensure group 'consul' exists (Debian)"
become: true
ansible.builtin.group:
name: consul
state: present
- name: "Ensure group 'consul' exists (Debian)"
become: true
ansible.builtin.group:
name: consul
state: present
- name: "Add the user 'consul' with group 'consul' (Debian)"
become: true
ansible.builtin.user:
name: consul
group: consul
- name: "Add the user 'consul' with group 'consul' (Debian)"
become: true
ansible.builtin.user:
name: consul
group: consul
- name: "Create Consul /opt storage and copy certificates"
block:
- name: "Create {{ consul_opt_dir }} directories"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
recurse: true
mode: 0755
loop:
- "{{ consul_opt_dir }}"
- "{{ consul_opt_dir }}/logs"
- "{{ consul_opt_dir }}/plugins"
- "{{ consul_opt_dir }}/certs"
- name: "Create {{ consul_opt_dir }} directories"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
recurse: true
mode: 0755
loop:
- "{{ consul_opt_dir }}"
- "{{ consul_opt_dir }}/logs"
- "{{ consul_opt_dir }}/plugins"
- "{{ consul_opt_dir }}/certs"
- name: Copy certs to servers
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: "certs/consul/consul-agent-ca.pem", dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem" }
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0.pem" }
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0-key.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0-key.pem" }
when:
- is_consul_server
- name: Copy certs to servers
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: "certs/consul/consul-agent-ca.pem", dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem" }
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0.pem" }
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0-key.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0-key.pem" }
when:
- is_consul_server
- name: Copy certs to clients
become: true
ansible.builtin.copy:
src: certs/consul/consul-agent-ca.pem
dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
mode: 0755
when:
- is_consul_client
- not is_consul_server
- name: Copy certs to clients
become: true
ansible.builtin.copy:
src: certs/consul/consul-agent-ca.pem
dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
mode: 0755
when:
- is_consul_client
- not is_consul_server
- name: "Set owner of files to consul:consul (debian)"
become: true
ansible.builtin.file:
path: "{{ consul_opt_dir }}"
owner: consul
group: consul
recurse: true
when:
- ansible_os_family == 'Debian'
- name: "Set owner of files to consul:consul (debian)"
become: true
ansible.builtin.file:
path: "{{ consul_opt_dir }}"
owner: consul
group: consul
recurse: true
when:
- ansible_os_family == 'Debian'
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }}"
become: true
ansible.builtin.file:
path: "{{ consul_opt_dir }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- mac_intel or mac_arm or inventory_hostname == 'synology'
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }}"
become: true
ansible.builtin.file:
path: "{{ consul_opt_dir }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- mac_intel or mac_arm or inventory_hostname == 'synology'
- name: "Template out Consul configuration file"
block:
- name: "Create {{ interpolated_consul_configuration_dir }}"
become: true
ansible.builtin.file:
path: "{{ interpolated_consul_configuration_dir }}"
state: directory
mode: 0755
- name: "Create {{ interpolated_consul_configuration_dir }}"
become: true
ansible.builtin.file:
path: "{{ interpolated_consul_configuration_dir }}"
state: directory
mode: 0755
- name: Copy consul base config file
become: true
ansible.builtin.template:
src: consul.hcl.j2
dest: "{{ interpolated_consul_configuration_dir }}/consul.hcl"
mode: 0644
- name: Copy consul base config file
become: true
ansible.builtin.template:
src: consul.hcl.j2
dest: "{{ interpolated_consul_configuration_dir }}/consul.hcl"
mode: 0644
- name: "Set owner of files to consul:consul (Debian)"
become: true
ansible.builtin.file:
path: "{{ interpolated_consul_configuration_dir }}"
owner: consul
group: consul
recurse: true
when:
- ansible_os_family == 'Debian'
- name: "Set owner of files to consul:consul (Debian)"
become: true
ansible.builtin.file:
path: "{{ interpolated_consul_configuration_dir }}"
owner: consul
group: consul
recurse: true
when:
- ansible_os_family == 'Debian'
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }}"
become: true
ansible.builtin.file:
path: "{{ interpolated_consul_configuration_dir }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- mac_intel or mac_arm or inventory_hostname == 'synology'
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }}"
become: true
ansible.builtin.file:
path: "{{ interpolated_consul_configuration_dir }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- mac_intel or mac_arm or inventory_hostname == 'synology'
- name: "Set owner of root consul dir to {{ ansible_user_uid }}:{{ ansible_user_gid }} (synology)"
become: true
ansible.builtin.file:
path: /volume1/docker/consul/
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- inventory_hostname == 'synology'
- name: "Set owner of root consul dir to {{ ansible_user_uid }}:{{ ansible_user_gid }} (synology)"
become: true
ansible.builtin.file:
path: /volume1/docker/consul/
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- inventory_hostname == 'synology'
- name: "Install Consul binary"
block:
- name: "Set fact: need install consul?"
ansible.builtin.set_fact:
need_consul_install: false
when:
- consul_download_uri is defined
- name: "Set fact: need install consul?"
ansible.builtin.set_fact:
need_consul_install: false
when:
- consul_download_uri is defined
- name: Check if Consul is installed
ansible.builtin.stat:
path: /usr/local/bin/consul
register: consul_binary_file_location
when:
- consul_download_uri is defined
- name: Check if Consul is installed
ansible.builtin.stat:
path: /usr/local/bin/consul
register: consul_binary_file_location
when:
- consul_download_uri is defined
- name: "Set fact: need consul install?"
ansible.builtin.set_fact:
need_consul_install: true
when:
- consul_download_uri is defined
- not consul_binary_file_location.stat.exists
- name: "Set fact: need consul install?"
ansible.builtin.set_fact:
need_consul_install: true
when:
- consul_download_uri is defined
- not consul_binary_file_location.stat.exists
- name: Check current version of Consul
ansible.builtin.shell:
cmd: /usr/local/bin/consul --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
changed_when: false
register: installed_consul_version
check_mode: false
when:
- consul_download_uri is defined
- not need_consul_install
- name: Check current version of Consul
ansible.builtin.shell:
cmd: /usr/local/bin/consul --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
changed_when: false
register: installed_consul_version
check_mode: false
when:
- consul_download_uri is defined
- not need_consul_install
- name: "Set fact: need consul install?"
ansible.builtin.set_fact:
need_consul_install: true
when:
- consul_download_uri is defined
- not need_consul_install
- installed_consul_version.stdout is version(consul_version, '<')
- name: "Set fact: need consul install?"
ansible.builtin.set_fact:
need_consul_install: true
when:
- consul_download_uri is defined
- not need_consul_install
- installed_consul_version.stdout is version(consul_version, '<')
- name: Install Consul
become: true
ansible.builtin.unarchive:
src: "{{ consul_download_uri }}"
dest: /usr/local/bin
remote_src: true
when:
- consul_download_uri is defined
- need_consul_install
- name: Install Consul
become: true
ansible.builtin.unarchive:
src: "{{ consul_download_uri }}"
dest: /usr/local/bin
remote_src: true
when:
- consul_download_uri is defined
- need_consul_install
- name: "Validate consul config"
ansible.builtin.command:
cmd: "/usr/local/bin/consul validate {{ interpolated_consul_configuration_dir }}"
cmd: "/usr/local/bin/consul validate {{ interpolated_consul_configuration_dir }}"
register: consul_config_valid
changed_when: false
failed_when: consul_config_valid.rc != 0
when:
- inventory_hostname != 'synology'
- inventory_hostname != 'synology'
- name: "Copy system.d or launchctl service files"
block:
- name: Ensure /Library/LaunchAgents exists (MacOSX)
ansible.builtin.file:
path: "{{ consul_plist_macos | dirname }}"
state: directory
mode: 0755
when:
- ansible_os_family == 'Darwin'
- name: Ensure /Library/LaunchAgents exists (MacOSX)
ansible.builtin.file:
path: "{{ consul_plist_macos | dirname }}"
state: directory
mode: 0755
when:
- ansible_os_family == 'Darwin'
- name: Create Consul launchd service (MacOSX)
ansible.builtin.template:
src: consul.launchd.j2
dest: "{{ consul_plist_macos }}"
mode: 0644
when:
- ansible_os_family == 'Darwin'
- name: Create Consul launchd service (MacOSX)
ansible.builtin.template:
src: consul.launchd.j2
dest: "{{ consul_plist_macos }}"
mode: 0644
when:
- ansible_os_family == 'Darwin'
- name: Create Consul service (Debian)
become: true
ansible.builtin.template:
src: consul.service.j2
dest: /etc/systemd/system/consul.service
mode: 0644
when:
- ansible_os_family == 'Debian'
- name: Create Consul service (Debian)
become: true
ansible.builtin.template:
src: consul.service.j2
dest: /etc/systemd/system/consul.service
mode: 0644
when:
- ansible_os_family == 'Debian'
- name: "Start Consul"
block:
- name: Load the Consul agent (MacOSX)
ansible.builtin.command:
cmd: "launchctl load -w {{ consul_plist_macos }}"
when:
- mac_intel or mac_arm
- "'nostart' not in ansible_run_tags"
- name: Load the Consul agent (MacOSX)
ansible.builtin.command:
cmd: "launchctl load -w {{ consul_plist_macos }}"
register: consul_loaded
changed_when: consul_loaded.rc == 0
failed_when: consul_loaded.rc > 0
when:
- mac_intel or mac_arm
- "'nostart' not in ansible_run_tags"
- name: Start Consul (Debian)
become: true
ansible.builtin.systemd:
name: consul
enabled: true
state: started
when:
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
- name: Start Consul (Debian)
become: true
ansible.builtin.systemd:
name: consul
enabled: true
state: started
when:
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
- name: Make sure Consul service is really running
ansible.builtin.command:
cmd: systemctl is-active consul
register: is_consul_really_running
changed_when: false
failed_when: is_consul_really_running.rc != 0
when:
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
- name: Make sure Consul service is really running
ansible.builtin.command:
cmd: systemctl is-active consul
register: is_consul_really_running
changed_when: false
failed_when: is_consul_really_running.rc != 0
when:
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
- name: "Copy Consul service checks to synology"
when:
- inventory_hostname == 'synology'
- inventory_hostname == 'synology'
block:
- name: Copy config file
ansible.builtin.template:
src: consul_services/consul_synology_checks.json.j2
dest: "{{ interpolated_consul_configuration_dir }}/service_checks.json"
mode: 0644
- name: Copy config file
ansible.builtin.template:
src: consul_services/consul_synology_checks.json.j2
dest: "{{ interpolated_consul_configuration_dir }}/service_checks.json"
mode: 0644
- name: Reload configuration file
ansible.builtin.uri:
url: "http://{{ synology_second_ip }}:8500/v1/agent/reload"
method: PUT
status_code: 200
ignore_errors: true
check_mode: false
register: consul_agent_reload_http_response
failed_when: consul_agent_reload_http_response.status != 200
- name: Reload configuration file
ansible.builtin.uri:
url: "http://{{ synology_second_ip }}:8500/v1/agent/reload"
method: PUT
status_code: 200
ignore_errors: true
check_mode: false
register: consul_agent_reload_http_response
failed_when: consul_agent_reload_http_response.status != 200
- name: Debug when consul agent reload fails
ansible.builtin.debug:
var: consul_agent_reload_http_response.msg
check_mode: false
when: consul_agent_reload_http_response.status != 200
- name: Debug when consul agent reload fails
ansible.builtin.debug:
var: consul_agent_reload_http_response.msg
check_mode: false
when: consul_agent_reload_http_response.status != 200

View File

@@ -33,5 +33,5 @@
# when:
# - ansible_facts['system_vendor'] is search("Synology")
- name: "end play"
- name: "End play"
ansible.builtin.meta: end_play

View File

@@ -4,85 +4,91 @@
- name: Check if Docker is already present
ansible.builtin.command:
cmd: docker --version
cmd: docker --version
register: docker_command_result
changed_when: docker_command_result.rc == 1
failed_when: false
- name: install docker on Debian
- name: Install docker on Debian
when: ansible_os_family == 'Debian'
block:
- name: "Add docker local filesystem storage directory"
ansible.builtin.file:
path: "{{ rpi_localfs_service_storage }}"
mode: 0755
state: directory
- name: "Add docker local filesystem storage directory"
ansible.builtin.file:
path: "{{ rpi_localfs_service_storage }}"
mode: 0755
state: directory
- name: Download Docker install convenience script
ansible.builtin.get_url:
url: "https://get.docker.com/"
dest: /tmp/get-docker.sh
mode: 0775
when: docker_command_result.rc == 1
- name: Download Docker install convenience script
ansible.builtin.get_url:
url: "https://get.docker.com/"
dest: /tmp/get-docker.sh
mode: 0775
when: docker_command_result.rc == 1
- name: Run Docker install convenience script
ansible.builtin.command: /tmp/get-docker.sh
environment:
CHANNEL: stable
when: docker_command_result.rc == 1
- name: Run Docker install convenience script
ansible.builtin.command: /tmp/get-docker.sh
environment:
CHANNEL: stable
register: docker_install
failed_when: docker_install.rc > 0
changed_when: docker_install.rc == 0
when: docker_command_result.rc == 1
- name: Make sure Docker CE is the version specified
ansible.builtin.apt:
name: "docker-ce"
state: present
when: docker_command_result.rc == 1
- name: Make sure Docker CE is the version specified
ansible.builtin.apt:
name: "docker-ce"
state: present
when: docker_command_result.rc == 1
- name: Ensure Docker is started
ansible.builtin.service:
name: docker
state: started
enabled: true
- name: Ensure Docker is started
ansible.builtin.service:
name: docker
state: started
enabled: true
- name: Ensure docker users are added to the docker group
become: true
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: docker
append: true
when: docker_command_result.rc == 1
- name: Ensure docker users are added to the docker group
become: true
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: docker
append: true
when: docker_command_result.rc == 1
- name: install docker on macOS
- name: Install docker on macOS
when: "'macs' in group_names"
block:
- name: "Add docker directory to ~/Library"
ansible.builtin.file:
path: "{{ mac_localfs_service_storage }}"
mode: 0755
state: directory
- name: "Add docker directory to ~/Library"
ansible.builtin.file:
path: "{{ mac_localfs_service_storage }}"
mode: 0755
state: directory
- name: install base homebrew packages
community.general.homebrew:
name: docker
state: present
update_homebrew: false
upgrade_all: false
when: docker_command_result.rc == 1
- name: Install base homebrew packages
community.general.homebrew:
name: docker
state: present
update_homebrew: false
upgrade_all: false
when: docker_command_result.rc == 1
- name: open docker application
ansible.builtin.command:
cmd: open /Applications/Docker.app
when: docker_command_result.rc == 1
- name: Open docker application
ansible.builtin.command:
cmd: open /Applications/Docker.app
register: docker_open_app
failed_when: docker_open_app.rc > 0
changed_when: docker_open_app.rc == 0
when: docker_command_result.rc == 1
- name: Must install Docker manually
ansible.builtin.debug:
msg: |
Docker must be installed manually on MacOS. Log in to mac to install then rerun playbook
- name: Must install Docker manually
ansible.builtin.debug:
msg: |
Docker must be installed manually on MacOS. Log in to mac to install then rerun playbook
Be certain to configure the following:
- run on login
- add '{{ mac_storage_mount_point }}' to mountable file system directories
when: docker_command_result.rc == 1
Be certain to configure the following:
- run on login
- add '{{ mac_storage_mount_point }}' to mountable file system directories
when: docker_command_result.rc == 1
- name: end play
ansible.builtin.meta: end_play
when: docker_command_result.rc == 1
- name: End play
ansible.builtin.meta: end_play
when: docker_command_result.rc == 1

View File

@@ -8,46 +8,46 @@
- name: "Set local filesystem location (pis)"
ansible.builtin.set_fact:
interpolated_localfs_service_storage: "{{ rpi_localfs_service_storage }}"
interpolated_localfs_service_storage: "{{ rpi_localfs_service_storage }}"
changed_when: false
when:
- "'pis' in group_names"
- "'pis' in group_names"
- name: "Set local filesystem location (macs)"
ansible.builtin.set_fact:
interpolated_localfs_service_storage: "{{ mac_localfs_service_storage }}"
interpolated_localfs_service_storage: "{{ mac_localfs_service_storage }}"
changed_when: false
when:
- "'macs' in group_names"
- "'macs' in group_names"
- name: "Set NFS mount location (pis)"
ansible.builtin.set_fact:
interpolated_nfs_service_storage: "{{ rpi_nfs_mount_point }}"
interpolated_nfs_service_storage: "{{ rpi_nfs_mount_point }}"
changed_when: false
when:
- "'pis' in group_names"
- "'pis' in group_names"
- name: "Set NFS mount location location (macs)"
ansible.builtin.set_fact:
interpolated_nfs_service_storage: "{{ mac_storage_mount_point }}"
interpolated_nfs_service_storage: "{{ mac_storage_mount_point }}"
changed_when: false
when:
- "'macs' in group_names"
- "'macs' in group_names"
- name: "set consul configuration directory (synology)"
- name: "Set consul configuration directory (synology)"
ansible.builtin.set_fact:
interpolated_consul_configuration_dir: "{{ synology_consul_configuration_dir }}"
interpolated_consul_configuration_dir: "{{ synology_consul_configuration_dir }}"
when:
- inventory_hostname == 'synology'
- inventory_hostname == 'synology'
- name: "set consul configuration directory (pis)"
- name: "Set consul configuration directory (pis)"
ansible.builtin.set_fact:
interpolated_consul_configuration_dir: "{{ rpi_consul_configuration_dir }}"
interpolated_consul_configuration_dir: "{{ rpi_consul_configuration_dir }}"
when:
- "'pis' in group_names"
- "'pis' in group_names"
- name: "set consul configuration directory (macs)"
- name: "Set consul configuration directory (macs)"
ansible.builtin.set_fact:
interpolated_consul_configuration_dir: "{{ mac_consul_configuration_dir }}"
interpolated_consul_configuration_dir: "{{ mac_consul_configuration_dir }}"
when:
- "'macs' in group_names"
- "'macs' in group_names"

View File

@@ -4,29 +4,29 @@
#
# NOTE: This task exists due to the arillso.logrotate failing completely on macOS
- name: add service_backups.log to logrotate
- name: Add service_backups.log to logrotate
become: true
vars:
logrotate_applications:
- name: service_backups
definitions:
- logs:
- "{{ rpi_nfs_mount_point }}/pi-cluster/logs/service_backups.log"
options:
- rotate 1
- size 100k
- missingok
- notifempty
- su root root
- extension .log
- compress
- nodateext
- nocreate
- delaycompress
logrotate_applications:
- name: service_backups
definitions:
- logs:
- "{{ rpi_nfs_mount_point }}/pi-cluster/logs/service_backups.log"
options:
- rotate 1
- size 100k
- missingok
- notifempty
- su root root
- extension .log
- compress
- nodateext
- nocreate
- delaycompress
ansible.builtin.import_role:
name: arillso.logrotate
name: arillso.logrotate
failed_when: false
ignore_errors: true
when:
- "'macs' not in group_names"
- is_cluster_leader
- "'macs' not in group_names"
- is_cluster_leader

View File

@@ -4,243 +4,243 @@
- name: "Set variables needed to install Nomad"
block:
- name: "set variable: check if we have a mounted USB drive (Debian)"
ansible.builtin.stat:
path: "{{ rpi_usb_drive_mount_point }}"
register: have_usb_drive
changed_when: false
when:
- ansible_os_family == 'Debian'
- name: "Set variable: check if we have a mounted USB drive (Debian)"
ansible.builtin.stat:
path: "{{ rpi_usb_drive_mount_point }}"
register: have_usb_drive
changed_when: false
when:
- ansible_os_family == 'Debian'
- name: "set variable: Use USB drive for nomad /opt (Debian)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "{{ rpi_usb_drive_mount_point }}/opt/nomad"
when:
- ansible_os_family == 'Debian'
- have_usb_drive.stat.exists
- name: "Set variable: Use USB drive for nomad /opt (Debian)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "{{ rpi_usb_drive_mount_point }}/opt/nomad"
when:
- ansible_os_family == 'Debian'
- have_usb_drive.stat.exists
- name: "set variable: Use root dist for nomad /opt (Debian)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "/opt/nomad"
when:
- ansible_os_family == 'Debian'
- not have_usb_drive.stat.exists
- name: "Set variable: Use root dist for nomad /opt (Debian)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "/opt/nomad"
when:
- ansible_os_family == 'Debian'
- not have_usb_drive.stat.exists
- name: "set variable: Use ~/library for /opt files (macOSX)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "/Users/{{ ansible_user }}/Library/nomad"
when:
- ansible_os_family == 'Darwin'
- name: "Set variable: Use ~/library for /opt files (macOSX)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "/Users/{{ ansible_user }}/Library/nomad"
when:
- ansible_os_family == 'Darwin'
- name: "set variable: Set Nomad download Binary (armv7l)"
ansible.builtin.set_fact:
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "Set variable: Set Nomad download Binary (armv7l)"
ansible.builtin.set_fact:
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "set variable: Set Nomad download Binary (aarch64)"
ansible.builtin.set_fact:
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm64.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'aarch64'
- name: "Set variable: Set Nomad download Binary (aarch64)"
ansible.builtin.set_fact:
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm64.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'aarch64'
- name: "set variable: Set Nomad download Binary (MacOSX)"
ansible.builtin.set_fact:
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_darwin_amd64.zip"
when:
- mac_intel
- name: "Set variable: Set Nomad download Binary (MacOSX)"
ansible.builtin.set_fact:
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_darwin_amd64.zip"
when:
- mac_intel
- name: Assert that we can install Nomad
ansible.builtin.assert:
that:
- nomad_download_file_uri is defined
- nomad_opt_dir_location is defined
fail_msg: "Unable to install Nomad on this host"
- name: Assert that we can install Nomad
ansible.builtin.assert:
that:
- nomad_download_file_uri is defined
- nomad_opt_dir_location is defined
fail_msg: "Unable to install Nomad on this host"
- name: "Create Nomad user and group (Debian)"
when: ansible_os_family == 'Debian'
block:
- name: "Ensure group 'nomad' exists (Debian)"
become: true
ansible.builtin.group:
name: nomad
state: present
- name: "Ensure group 'nomad' exists (Debian)"
become: true
ansible.builtin.group:
name: nomad
state: present
- name: "Add the user 'nomad' with group 'nomad' (Debian)"
become: true
ansible.builtin.user:
name: nomad
group: nomad
- name: "Add the user 'nomad' with group 'nomad' (Debian)"
become: true
ansible.builtin.user:
name: nomad
group: nomad
- name: "Add user 'nomad' to docker and sudo groups (Debian)"
become: true
ansible.builtin.user:
user: nomad
groups: docker,sudo
append: true
- name: "Add user 'nomad' to docker and sudo groups (Debian)"
become: true
ansible.builtin.user:
user: nomad
groups: docker,sudo
append: true
- name: "Create Nomad /opt storage"
block:
- name: "create {{ nomad_opt_dir_location }} directories"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
recurse: true
mode: 0755
loop:
- "{{ nomad_opt_dir_location }}/logs"
- "{{ nomad_opt_dir_location }}/plugins"
- "{{ nomad_opt_dir_location }}/certs"
- name: "Create {{ nomad_opt_dir_location }} directories"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
recurse: true
mode: 0755
loop:
- "{{ nomad_opt_dir_location }}/logs"
- "{{ nomad_opt_dir_location }}/plugins"
- "{{ nomad_opt_dir_location }}/certs"
- name: Copy server certs
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
- { src: certs/nomad/server.pem, dest: "{{ nomad_opt_dir_location }}/certs/server.pem" }
- { src: certs/nomad/server-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/server-key.pem" }
notify: "restart nomad"
when: is_nomad_server
- name: Copy server certs
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
- { src: certs/nomad/server.pem, dest: "{{ nomad_opt_dir_location }}/certs/server.pem" }
- { src: certs/nomad/server-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/server-key.pem" }
notify: "restart nomad"
when: is_nomad_server
- name: Copy client certs
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
- { src: certs/nomad/client.pem, dest: "{{ nomad_opt_dir_location }}/certs/client.pem" }
- { src: certs/nomad/client-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/client-key.pem" }
notify: "restart nomad"
when: is_nomad_client
- name: Copy client certs
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
- { src: certs/nomad/client.pem, dest: "{{ nomad_opt_dir_location }}/certs/client.pem" }
- { src: certs/nomad/client-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/client-key.pem" }
notify: "restart nomad"
when: is_nomad_client
- name: "set owner of files to nomad:nomad (debian)"
become: true
ansible.builtin.file:
path: "{{ nomad_opt_dir_location }}"
owner: nomad
group: nomad
recurse: true
when: ansible_os_family == 'Debian'
- name: "Set owner of files to nomad:nomad (debian)"
become: true
ansible.builtin.file:
path: "{{ nomad_opt_dir_location }}"
owner: nomad
group: nomad
recurse: true
when: ansible_os_family == 'Debian'
- name: "set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }} (MacOSX)"
become: true
ansible.builtin.file:
path: "{{ nomad_opt_dir_location }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when: ansible_os_family != 'Debian'
- name: "Set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }} (MacOSX)"
become: true
ansible.builtin.file:
path: "{{ nomad_opt_dir_location }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when: ansible_os_family != 'Debian'
- name: "Template out the configuration file"
block:
- name: "create {{ nomad_configuration_dir }}"
become: true
ansible.builtin.file:
path: "{{ nomad_configuration_dir }}"
state: directory
mode: 0755
- name: "Create {{ nomad_configuration_dir }}"
become: true
ansible.builtin.file:
path: "{{ nomad_configuration_dir }}"
state: directory
mode: 0755
- name: copy base config file
become: true
ansible.builtin.template:
src: nomad.hcl.j2
dest: "{{ nomad_configuration_dir }}/nomad.hcl"
mode: 0644
notify: "restart nomad"
- name: Copy base config file
become: true
ansible.builtin.template:
src: nomad.hcl.j2
dest: "{{ nomad_configuration_dir }}/nomad.hcl"
mode: 0644
notify: "restart nomad"
- name: "set owner of files to nomad:nomad (Debian)"
become: true
ansible.builtin.file:
path: "{{ nomad_configuration_dir }}"
owner: nomad
group: nomad
recurse: true
when:
- ansible_os_family == 'Debian'
- name: "Set owner of files to nomad:nomad (Debian)"
become: true
ansible.builtin.file:
path: "{{ nomad_configuration_dir }}"
owner: nomad
group: nomad
recurse: true
when:
- ansible_os_family == 'Debian'
- name: Install or Update Nomad
block:
- name: "set fact: do we need a nomad install?"
ansible.builtin.set_fact:
need_nomad_install: false
- name: "Set fact: do we need a nomad install?"
ansible.builtin.set_fact:
need_nomad_install: false
- name: Check if nomad is installed
ansible.builtin.stat:
path: /usr/local/bin/nomad
register: nomad_binary_file_location
- name: Check if nomad is installed
ansible.builtin.stat:
path: /usr/local/bin/nomad
register: nomad_binary_file_location
- name: "set fact: do we need a nomad install"
ansible.builtin.set_fact:
need_nomad_install: true
when:
- not nomad_binary_file_location.stat.exists
- name: "Set fact: do we need a nomad install"
ansible.builtin.set_fact:
need_nomad_install: true
when:
- not nomad_binary_file_location.stat.exists
- name: Check current version of Nomad
ansible.builtin.shell: /usr/local/bin/nomad --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
register: current_nomad_version
check_mode: false
changed_when: false
when:
- not need_nomad_install
- name: Check current version of Nomad
ansible.builtin.shell: /usr/local/bin/nomad --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
register: current_nomad_version
check_mode: false
changed_when: false
when:
- not need_nomad_install
- name: "set fact: do we need a nomad install"
ansible.builtin.set_fact:
need_nomad_install: true
when:
- not need_nomad_install
- current_nomad_version.stdout is version(nomad_version, '<')
- name: "Set fact: do we need a nomad install"
ansible.builtin.set_fact:
need_nomad_install: true
when:
- not need_nomad_install
- current_nomad_version.stdout is version(nomad_version, '<')
- name: install Nomad
become: true
ansible.builtin.unarchive:
src: "{{ nomad_download_file_uri }}"
dest: /usr/local/bin
remote_src: true
notify: "restart nomad"
when:
- need_nomad_install
- name: Install Nomad
become: true
ansible.builtin.unarchive:
src: "{{ nomad_download_file_uri }}"
dest: /usr/local/bin
remote_src: true
notify: "restart nomad"
when:
- need_nomad_install
- name: "Copy system.d or launchctrl service files"
block:
- name: ensure /Library/LaunchAgents exists (MacOSX)
ansible.builtin.file:
path: "{{ nomad_plist_macos | dirname }}"
state: directory
mode: 0755
when:
- ansible_os_family == 'Darwin'
- name: Ensure /Library/LaunchAgents exists (MacOSX)
ansible.builtin.file:
path: "{{ nomad_plist_macos | dirname }}"
state: directory
mode: 0755
when:
- ansible_os_family == 'Darwin'
- name: create nomad launchd service (MacOSX)
ansible.builtin.template:
src: nomad.launchd.j2
dest: "{{ nomad_plist_macos }}"
mode: 0644
notify: "restart nomad"
when:
- ansible_os_family == 'Darwin'
- name: Create nomad launchd service (MacOSX)
ansible.builtin.template:
src: nomad.launchd.j2
dest: "{{ nomad_plist_macos }}"
mode: 0644
notify: "restart nomad"
when:
- ansible_os_family == 'Darwin'
- name: create nomad service (Debian)
become: true
ansible.builtin.template:
src: nomad.service.j2
dest: /etc/systemd/system/nomad.service
mode: 0644
notify: "restart nomad"
when:
- ansible_os_family == 'Debian'
- name: Create nomad service (Debian)
become: true
ansible.builtin.template:
src: nomad.service.j2
dest: /etc/systemd/system/nomad.service
mode: 0644
notify: "restart nomad"
when:
- ansible_os_family == 'Debian'
- name: "start nomad, if stopped"
- name: "Start nomad, if stopped"
ansible.builtin.shell:
cmd: "/usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
cmd: "/usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
register: node_status_response
ignore_errors: true
failed_when: false

View File

@@ -10,67 +10,79 @@
- name: "Sync Nomad Jobs"
block:
- name: Remove nomad jobs directory
ansible.builtin.file:
path: "{{ nomad_jobfile_location }}"
state: absent
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- clean_nomad_jobs
- name: Remove nomad jobs directory
ansible.builtin.file:
path: "{{ nomad_jobfile_location }}"
state: absent
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- clean_nomad_jobs
- name: (Re)Create nomad jobs directory
ansible.builtin.file:
path: "{{ nomad_jobfile_location }}"
state: directory
mode: 0755
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: (Re)Create nomad jobs directory
ansible.builtin.file:
path: "{{ nomad_jobfile_location }}"
state: directory
mode: 0755
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: synchronize nomad job templates (jinja)
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ nomad_jobfile_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
mode: 0644
with_fileglob: "templates/nomad_jobs/*.j2"
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: Synchronize nomad job templates (jinja)
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ nomad_jobfile_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
mode: 0644
with_fileglob: "templates/nomad_jobs/*.j2"
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: synchronize nomad job templates (hcl)
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ nomad_jobfile_location }}/{{ item | basename }}"
mode: 0644
with_fileglob: "templates/nomad_jobs/*.hcl"
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: Synchronize nomad job templates (hcl)
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ nomad_jobfile_location }}/{{ item | basename }}"
mode: 0644
with_fileglob: "templates/nomad_jobs/*.hcl"
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: Ensure we have local storage folders
become: true
ansible.builtin.file:
path: "{{ interpolated_localfs_service_storage }}/{{ item }}"
state: directory
mode: 0777
group: "{{ ansible_user_gid }}"
owner: "{{ ansible_user_uid }}"
when:
- is_nomad_client or is_nomad_server
loop: "{{ service_localfs_dirs }}"
- name: Ensure we have local storage folders
become: true
ansible.builtin.file:
path: "{{ interpolated_localfs_service_storage }}/{{ item }}"
state: directory
mode: 0777
group: "{{ ansible_user_gid }}"
owner: "{{ ansible_user_uid }}"
when:
- is_nomad_client or is_nomad_server
loop: "{{ service_localfs_dirs }}"
- name: "Sync docker compose files"
- name: Sync docker compose files
when: is_docker_compose_client
block:
- name: confirm compose file dir exists
ansible.builtin.file:
path: "{{ docker_compose_file_location }}"
state: directory
mode: 0755
- name: Confirm compose file dir exists
ansible.builtin.file:
path: "{{ docker_compose_file_location }}"
state: directory
mode: 0755
- name: synchronize docker-compose files
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ docker_compose_file_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
mode: 0644
with_fileglob: "../templates/docker_compose_files/*.j2"
- name: Synchronize docker-compose files
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ docker_compose_file_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
mode: 0644
with_fileglob: "../templates/docker_compose_files/*.j2"
- name: "Prune docker caches"
community.docker.docker_prune:
containers: true
images: true
images_filters:
dangling: false
networks: true
volumes: true
builder_cache: true
when:
- is_docker_compose_client or is_nomad_client or is_nomad_server

View File

@@ -4,64 +4,64 @@
- name: "Update and install APT packages"
when:
- ansible_os_family != 'Darwin'
- manage_apt_packages_list
- ansible_os_family != 'Darwin'
- manage_apt_packages_list
block:
- name: update APT package cache
become: true
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
- name: Update APT package cache
become: true
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
- name: "upgrade APT to the latest packages (this may take a while)"
become: true
ansible.builtin.apt:
upgrade: safe
- name: "Upgrade APT to the latest packages (this may take a while)"
become: true
ansible.builtin.apt:
upgrade: safe
- name: "install/upgrade APT packages (this may take a while)"
become: true
ansible.builtin.apt:
pkg: "{{ item }}"
state: present
loop: "{{ apt_packages_list }}"
register: apt_output
- name: "Install/upgrade APT packages (this may take a while)"
become: true
ansible.builtin.apt:
pkg: "{{ item }}"
state: present
loop: "{{ apt_packages_list }}"
register: apt_output
- name: "Update and install Homebrew packages"
when:
- manage_homebrew_package_list
- ansible_os_family == 'Darwin'
- manage_homebrew_package_list
- ansible_os_family == 'Darwin'
block:
- name: upgrade homebrew and all packages
community.general.homebrew:
update_homebrew: true
upgrade_all: true
register: homebrew_output
ignore_errors: true
- name: Upgrade homebrew and all packages
community.general.homebrew:
update_homebrew: true
upgrade_all: true
register: homebrew_output
ignore_errors: true
- name: install base homebrew packages
community.general.homebrew:
name: "{{ homebrew_package_list | join(',') }}"
state: present
update_homebrew: false
upgrade_all: false
register: homebrew_output
- name: Install base homebrew packages
community.general.homebrew:
name: "{{ homebrew_package_list | join(',') }}"
state: present
update_homebrew: false
upgrade_all: false
register: homebrew_output
- name: homebrew packages updated or installed
ansible.builtin.debug:
msg: "{{ homebrew_output.changed_pkgs }}"
- name: Homebrew packages updated or installed
ansible.builtin.debug:
msg: "{{ homebrew_output.changed_pkgs }}"
- name: unchanged homebrew packages
ansible.builtin.debug:
msg: "{{ homebrew_output.unchanged_pkgs }}"
- name: Unchanged homebrew packages
ansible.builtin.debug:
msg: "{{ homebrew_output.unchanged_pkgs }}"
- name: install homebrew casks
community.general.homebrew_cask:
name: "{{ item }}"
state: present
install_options: "appdir=/Applications"
accept_external_apps: true
upgrade_all: false
update_homebrew: false
greedy: false
loop: "{{ homebrew_casks_list }}"
ignore_errors: true
- name: Install homebrew casks
community.general.homebrew_cask:
name: "{{ item }}"
state: present
install_options: "appdir=/Applications"
accept_external_apps: true
upgrade_all: false
update_homebrew: false
greedy: false
loop: "{{ homebrew_casks_list }}"
ignore_errors: true

View File

@@ -5,36 +5,37 @@
- name: "Check if pull_all_repos exists"
ansible.builtin.stat:
path: "~/bin/pull_all_repos"
path: "~/bin/pull_all_repos"
check_mode: false
register: pull_script_check
- name: "Check if ~/repos exists"
ansible.builtin.stat:
path: "~/repos"
path: "~/repos"
check_mode: false
register: repos_directory_check
- name: "run pull_all_repos script"
- name: "Run pull_all_repos script"
ansible.builtin.command:
cmd: "~/bin/pull_all_repos --directory ~/repos"
cmd: "~/bin/pull_all_repos --directory ~/repos"
register: pull_script_output
when:
- not ansible_check_mode
- pull_script_check.stat.exists
- pull_script_check.stat.executable
- repos_directory_check.stat.isdir is defined
- repos_directory_check.stat.isdir
- repos_directory_check.stat.writeable
- not ansible_check_mode
- pull_script_check.stat.exists
- pull_script_check.stat.executable
- repos_directory_check.stat.isdir is defined
- repos_directory_check.stat.isdir
- repos_directory_check.stat.writeable
failed_when: pull_script_output.rc > 1
changed_when: pull_script_output.rc == 0
- name: "Output from pull_all_repos"
ansible.builtin.debug:
msg: "{{ pull_script_output.stdout }}"
msg: "{{ pull_script_output.stdout }}"
when:
- not ansible_check_mode
- pull_script_check.stat.exists
- pull_script_check.stat.executable
- repos_directory_check.stat.isdir is defined
- repos_directory_check.stat.isdir
- repos_directory_check.stat.writeable
- not ansible_check_mode
- pull_script_check.stat.exists
- pull_script_check.stat.executable
- repos_directory_check.stat.isdir is defined
- repos_directory_check.stat.isdir
- repos_directory_check.stat.writeable

View File

@@ -1,12 +1,12 @@
---
# TASK DESCRIPTION:
# Always runs fist. Confirms we can actually use Ansible
- name: sanity - user mode
- name: Sanity - user mode
become: false
ansible.builtin.debug:
msg: "sanity check: user mode"
msg: "Sanity check: user mode"
- name: sanity - become mode
- name: Sanity - become mode
become: true
ansible.builtin.debug:
msg: "sanity check: become mode"
msg: "Sanity check: become mode"

View File

@@ -4,90 +4,92 @@
#
# NOTE: This is depreciated, I no longer use Prometheus and have migrated to Telegraf
- name: populate service facts
- name: Populate service facts
ansible.builtin.service_facts:
- name: stop node_exporter
- name: Stop node_exporter
become: true
ansible.builtin.systemd:
name: node_exporter
state: stopped
name: node_exporter
state: stopped
when: ansible_facts.services["node_exporter.service"] is defined
- name: Ensure group "prometheus" exists
become: true
ansible.builtin.group:
name: prometheus
state: present
name: prometheus
state: present
- name: Add the user 'prometheus' with group 'prometheus'
become: true
ansible.builtin.user:
name: prometheus
group: prometheus
groups: docker
append: true
name: prometheus
group: prometheus
groups: docker
append: true
# --------------- Install or Update Prometheus
- name: "set fact: need to install Prometheus?"
- name: "Set fact: need to install Prometheus?"
ansible.builtin.set_fact:
need_prometheus_install: false
need_prometheus_install: false
- name: Check if node_exporter is installed
ansible.builtin.stat:
path: /usr/local/bin/node_exporter
path: /usr/local/bin/node_exporter
register: prometheus_binary_file_location
- name: "set fact: need to install Prometheus?"
- name: "Set fact: need to install Prometheus?"
ansible.builtin.set_fact:
need_prometheus_install: true
need_prometheus_install: true
when:
- not prometheus_binary_file_location.stat.exists
- not prometheus_binary_file_location.stat.exists
- name: Check current version of Prometheus
ansible.builtin.shell: /usr/local/bin/node_exporter --version 3>&1 1>&2 2>&3 | head -n1 | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
register: current_prometheus_version
failed_when: false
changed_when: false
check_mode: false
when:
- need_prometheus_install is false
- need_prometheus_install is false
- name: "set fact: need to install Prometheus?"
- name: "Set fact: need to install Prometheus?"
ansible.builtin.set_fact:
need_prometheus_install: true
need_prometheus_install: true
when:
- need_prometheus_install is false
- current_prometheus_version.stdout != prometheus_verssion
- need_prometheus_install is false
- current_prometheus_version.stdout != prometheus_verssion
- name: install node_exporter
- name: Install node_exporter
become: true
ansible.builtin.unarchive:
src: "https://github.com/prometheus/node_exporter/releases/download/v{{ prometheus_verssion }}/node_exporter-{{ prometheus_verssion }}.linux-armv7.tar.gz"
dest: /usr/local/bin
group: prometheus
owner: prometheus
# reference for extra_opts: https://github.com/ansible/ansible/issues/27081
extra_opts:
- --strip=1
- --no-anchored
- "node_exporter"
remote_src: true
src: "https://github.com/prometheus/node_exporter/releases/download/v{{ prometheus_verssion }}/node_exporter-{{ prometheus_verssion }}.linux-armv7.tar.gz"
dest: /usr/local/bin
group: prometheus
owner: prometheus
# reference for extra_opts: https://github.com/ansible/ansible/issues/27081
extra_opts:
- --strip=1
- --no-anchored
- "node_exporter"
remote_src: true
when:
- need_prometheus_install is true
- need_prometheus_install is true
- name: create node_exporter service
- name: Create node_exporter service
become: true
ansible.builtin.template:
src: node_exporter.service.j2
dest: /etc/systemd/system/node_exporter.service
mode: 0644
src: node_exporter.service.j2
dest: /etc/systemd/system/node_exporter.service
mode: 0644
- name: start node_exporter
- name: Start node_exporter
become: true
ansible.builtin.systemd:
name: node_exporter
daemon_reload: true
enabled: true
state: started
name: node_exporter
daemon_reload: true
enabled: true
state: started
when:
- "'nostart' not in ansible_run_tags"
- "'nostart' not in ansible_run_tags"

View File

@@ -4,186 +4,187 @@
- name: "Set variables"
block:
- name: "Set tdarr local filesystem location (pis)"
ansible.builtin.set_fact:
interpolated_tdarr_dir: "{{ rpi1_tdarr_file_location }}"
changed_when: false
when:
- "'pis' in group_names"
- name: "Set tdarr local filesystem location (pis)"
ansible.builtin.set_fact:
interpolated_tdarr_dir: "{{ rpi1_tdarr_file_location }}"
changed_when: false
when:
- "'pis' in group_names"
- name: "Set tdarr local filesystem location (macs)"
ansible.builtin.set_fact:
interpolated_tdarr_dir: "{{ mac_tdarr_file_location }}"
changed_when: false
when:
- "'macs' in group_names"
- name: "Set tdarr local filesystem location (macs)"
ansible.builtin.set_fact:
interpolated_tdarr_dir: "{{ mac_tdarr_file_location }}"
changed_when: false
when:
- "'macs' in group_names"
- name: "set variable: Set tdarr download Binary (armv7l)"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/linux_arm/Tdarr_Updater.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "Set variable: Set tdarr download Binary (armv7l)"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/linux_arm/Tdarr_Updater.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "set variable: Set tdarr download Binary (MacOSX) - Intel"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_x64/Tdarr_Updater.zip"
when:
- mac_intel
- name: "Set variable: Set tdarr download Binary (MacOSX) - Intel"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_x64/Tdarr_Updater.zip"
when:
- mac_intel
- name: "set variable: Set tdarr download Binary (MacOSX) - ARM"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_arm64/Tdarr_Updater.zip"
when:
- mac_arm
- name: "Set variable: Set tdarr download Binary (MacOSX) - ARM"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_arm64/Tdarr_Updater.zip"
when:
- mac_arm
- name: "set fact: do we need a tdarr install?"
ansible.builtin.set_fact:
need_tdarr_install: false
- name: "Set fact: do we need a tdarr install?"
ansible.builtin.set_fact:
need_tdarr_install: false
- name: Assert that we can install Tdarr
ansible.builtin.assert:
that:
- tdarr_download_uri is defined
- interpolated_tdarr_dir is defined
fail_msg: "Unable to install Tdarr on this host"
- name: Assert that we can install Tdarr
ansible.builtin.assert:
that:
- tdarr_download_uri is defined
- interpolated_tdarr_dir is defined
fail_msg: "Unable to install Tdarr on this host"
- name: "Install ffmpeg and HandbrakeCLI"
block:
- name: "ensure ffmpeg and handbrake are installed (Debian)"
become: true
ansible.builtin.apt:
pkg: "{{ item }}"
state: present
loop:
- ffmpeg
- handbrake
when: "'pis' in group_names"
- name: "Ensure ffmpeg and handbrake are installed (Debian)"
become: true
ansible.builtin.apt:
pkg: "{{ item }}"
state: present
loop:
- ffmpeg
- handbrake
when: "'pis' in group_names"
- name: "ensure ffmpeg and handbrake are installed (MacOS)"
community.general.homebrew:
name: "{{ item }}"
state: present
update_homebrew: false
upgrade_all: false
loop:
- ffmpeg
- handbrake
when: "'macs' in group_names"
- name: "Ensure ffmpeg and handbrake are installed (MacOS)"
community.general.homebrew:
name: "{{ item }}"
state: present
update_homebrew: false
upgrade_all: false
loop:
- ffmpeg
- handbrake
when: "'macs' in group_names"
- name: "ensure tdarr directory exists"
- name: "Ensure tdarr directory exists"
become: true
ansible.builtin.file:
path: "{{ interpolated_tdarr_dir }}"
mode: 0755
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
state: directory
path: "{{ interpolated_tdarr_dir }}"
mode: 0755
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
state: directory
- name: "Install tdarr"
block:
- name: "set_fact: need Tdarr install?"
ansible.builtin.stat:
path: "{{ interpolated_tdarr_dir }}/configs"
register: tdarr_exists
changed_when: false
failed_when: false
- name: "Set fact: need Tdarr install?"
ansible.builtin.stat:
path: "{{ interpolated_tdarr_dir }}/configs"
register: tdarr_exists
changed_when: false
failed_when: false
- name: "set fact: do we need a tdarr install?"
ansible.builtin.set_fact:
need_tdarr_install: true
when: not tdarr_exists.stat.exists
- name: "Set fact: do we need a tdarr install?"
ansible.builtin.set_fact:
need_tdarr_install: true
when: not tdarr_exists.stat.exists
- name: Download tdarr
ansible.builtin.unarchive:
src: "{{ tdarr_download_uri }}"
dest: "{{ interpolated_tdarr_dir }}"
remote_src: true
when: need_tdarr_install
- name: Download tdarr
ansible.builtin.unarchive:
src: "{{ tdarr_download_uri }}"
dest: "{{ interpolated_tdarr_dir }}"
remote_src: true
when: need_tdarr_install
- name: Did tdarr download?
ansible.builtin.stat:
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
register: tdarr_installer_exists
failed_when: not tdarr_installer_exists.stat.exists
when: need_tdarr_install
- name: Did tdarr download?
ansible.builtin.stat:
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
register: tdarr_installer_exists
failed_when: not tdarr_installer_exists.stat.exists
when: need_tdarr_install
- name: Ensure correct permissions on Tdarr_Updater
ansible.builtin.file:
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
mode: 0755
when: need_tdarr_install
- name: Ensure correct permissions on Tdarr_Updater
ansible.builtin.file:
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
mode: 0755
when: need_tdarr_install
- name: Install tdarr
ansible.builtin.command:
cmd: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
register: tdarr_install
failed_when: tdarr_install.rc > 0
when: need_tdarr_install
- name: Install tdarr
ansible.builtin.command:
cmd: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
register: tdarr_install
failed_when: tdarr_install.rc > 0
changed_when: tdarr_install.rc == 0
when: need_tdarr_install
- name: Ensure correct permissions on server/node executables
ansible.builtin.file:
path: "{{ interpolated_tdarr_dir }}/{{ item }}"
mode: 0755
loop:
- Tdarr_Server/Tdarr_Server
- Tdarr_Node/Tdarr_Node
when: need_tdarr_install
- name: Ensure correct permissions on server/node executables
ansible.builtin.file:
path: "{{ interpolated_tdarr_dir }}/{{ item }}"
mode: 0755
loop:
- Tdarr_Server/Tdarr_Server
- Tdarr_Node/Tdarr_Node
when: need_tdarr_install
- name: "configure tdarr"
- name: "Configure tdarr"
block:
- name: update server configuration file
ansible.builtin.template:
src: Tdarr_Server_Config.json.j2
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Server_Config.json"
mode: 0644
when: is_tdarr_server
- name: Update server configuration file
ansible.builtin.template:
src: Tdarr_Server_Config.json.j2
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Server_Config.json"
mode: 0644
when: is_tdarr_server
- name: update node configuration file
ansible.builtin.template:
src: Tdarr_Node_Config.json.j2
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Node_Config.json"
mode: 0644
when: is_tdarr_node
- name: Update node configuration file
ansible.builtin.template:
src: Tdarr_Node_Config.json.j2
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Node_Config.json"
mode: 0644
when: is_tdarr_node
- name: check if consul is installed?
ansible.builtin.stat:
path: "{{ interpolated_consul_configuration_dir }}"
register: consul_installed
changed_when: false
failed_when: false
when:
- is_tdarr_server
- name: Check if consul is installed?
ansible.builtin.stat:
path: "{{ interpolated_consul_configuration_dir }}"
register: consul_installed
changed_when: false
failed_when: false
when:
- is_tdarr_server
- name: move consul service config into place
become: true
ansible.builtin.template:
src: consul_services/tdarr_service.json.j2
dest: "{{ interpolated_consul_configuration_dir }}/tdarr_service.json"
mode: 0644
when:
- is_tdarr_server
- consul_installed.stat.exists
- name: Move consul service config into place
become: true
ansible.builtin.template:
src: consul_services/tdarr_service.json.j2
dest: "{{ interpolated_consul_configuration_dir }}/tdarr_service.json"
mode: 0644
when:
- is_tdarr_server
- consul_installed.stat.exists
- name: Reload consul agent
ansible.builtin.uri:
url: "http://{{ ansible_host }}:8500/v1/agent/reload"
method: PUT
status_code: 200
ignore_errors: true
register: consul_agent_reload_http_response
failed_when: consul_agent_reload_http_response.status != 200
when:
- is_tdarr_server
- consul_installed.stat.exists
- name: Reload consul agent
ansible.builtin.uri:
url: "http://{{ ansible_host }}:8500/v1/agent/reload"
method: PUT
status_code: 200
ignore_errors: true
register: consul_agent_reload_http_response
failed_when: consul_agent_reload_http_response.status != 200
when:
- is_tdarr_server
- consul_installed.stat.exists
- name: debug when consul agent reload fails
ansible.builtin.debug:
var: consul_agent_reload_http_response.msg
when:
- is_tdarr_server
- consul_installed.stat.exists
- consul_agent_reload_http_response.status != 200
- name: Debug when consul agent reload fails
ansible.builtin.debug:
var: consul_agent_reload_http_response.msg
when:
- is_tdarr_server
- consul_installed.stat.exists
- consul_agent_reload_http_response.status != 200
- name: mount shared storage
- name: Mount shared storage
ansible.builtin.import_tasks: cluster_storage.yml

View File

@@ -5,146 +5,146 @@
# --------------------------------- Set variables depending on system type
- name: "Configure variables"
block:
- name: "set variable: telegraph_binary_location (Debian)"
ansible.builtin.set_fact:
telegraph_binary_location: "/usr/bin/telegraf"
when:
- ansible_os_family == 'Debian'
- name: "Set variable: telegraph_binary_location (Debian)"
ansible.builtin.set_fact:
telegraph_binary_location: "/usr/bin/telegraf"
when:
- ansible_os_family == 'Debian'
- name: "set variable: telegraph_binary_location (MacOS)"
ansible.builtin.set_fact:
telegraph_binary_location: "/usr/local/bin/telegraf"
when:
- ansible_os_family == 'Darwin'
- name: "Set variable: telegraph_binary_location (MacOS)"
ansible.builtin.set_fact:
telegraph_binary_location: "/usr/local/bin/telegraf"
when:
- ansible_os_family == 'Darwin'
- name: "set fact: telegraph_config_location (Debian)"
ansible.builtin.set_fact:
telegraph_config_location: "/etc/telegraf"
when:
- ansible_os_family == 'Debian'
- name: "Set fact: telegraph_config_location (Debian)"
ansible.builtin.set_fact:
telegraph_config_location: "/etc/telegraf"
when:
- ansible_os_family == 'Debian'
- name: "set fact: telegraph_config_location (macOS)"
ansible.builtin.set_fact:
telegraph_config_location: "/usr/local/etc"
when:
- ansible_os_family == 'Darwin'
- name: "Set fact: telegraph_config_location (macOS)"
ansible.builtin.set_fact:
telegraph_config_location: "/usr/local/etc"
when:
- ansible_os_family == 'Darwin'
- name: "set fact: telegraph_config_location (macOS)"
ansible.builtin.set_fact:
telegraph_config_location: "/volume1/docker/telegraf/config"
when:
- inventory_hostname == 'synology'
- name: "Set fact: telegraph_config_location (macOS)"
ansible.builtin.set_fact:
telegraph_config_location: "/volume1/docker/telegraf/config"
when:
- inventory_hostname == 'synology'
- name: "Fail if arm Mac (need to update task) or variables not defined"
ansible.builtin.assert:
that:
- telegraph_binary_location is defined
- telegraph_config_location is defined
- not mac_arm
fail_msg: "Unable to install Telegraf on this host"
- name: "Fail if arm Mac (need to update task) or variables not defined"
ansible.builtin.assert:
that:
- telegraph_binary_location is defined
- telegraph_config_location is defined
- not mac_arm
fail_msg: "Unable to install Telegraf on this host"
- name: "set variable: Set speedtest download Binary (armv7l)"
ansible.builtin.set_fact:
speedtest_download_file_uri: "https://install.speedtest.net/app/cli/ookla-speedtest-{{ speedtest_cli_version }}-linux-armhf.tgz"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "Set variable: Set speedtest download Binary (armv7l)"
ansible.builtin.set_fact:
speedtest_download_file_uri: "https://install.speedtest.net/app/cli/ookla-speedtest-{{ speedtest_cli_version }}-linux-armhf.tgz"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "set variable: Set speedtest download Binary (aarch64)"
ansible.builtin.set_fact:
speedtest_download_file_uri: "https://install.speedtest.net/app/cli/ookla-speedtest-{{ speedtest_cli_version }}-linux-aarch64.tgz"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'aarch64'
- name: "Set variable: Set speedtest download Binary (aarch64)"
ansible.builtin.set_fact:
speedtest_download_file_uri: "https://install.speedtest.net/app/cli/ookla-speedtest-{{ speedtest_cli_version }}-linux-aarch64.tgz"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'aarch64'
- name: "Install/upgrade Telegraf"
block:
- name: "set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: false
when: telegraph_binary_location is defined
- name: "Set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: false
when: telegraph_binary_location is defined
- name: Check if telegraf is installed
ansible.builtin.stat:
path: "{{ telegraph_binary_location }}"
check_mode: false
register: telegraf_binary_exists
when: telegraph_binary_location is defined
- name: Check if telegraf is installed
ansible.builtin.stat:
path: "{{ telegraph_binary_location }}"
check_mode: false
register: telegraf_binary_exists
when: telegraph_binary_location is defined
- name: "set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: true
check_mode: false
when:
- telegraph_binary_location is defined
- not telegraf_binary_exists.stat.exists
- name: "Set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: true
check_mode: false
when:
- telegraph_binary_location is defined
- not telegraf_binary_exists.stat.exists
- name: Check current version of telegraf
ansible.builtin.shell: "{{ telegraph_binary_location }} --version | grep -oE '[0-9]+\\.[0-9]+\\.[0-9]+'"
ignore_errors: true
register: current_telegraf_version
check_mode: false
changed_when: false
when:
- not need_telegraf_install
- telegraph_binary_location is defined
- name: Check current version of telegraf
ansible.builtin.shell: "{{ telegraph_binary_location }} --version | grep -oE '[0-9]+\\.[0-9]+\\.[0-9]+'"
ignore_errors: true
register: current_telegraf_version
check_mode: false
changed_when: false
when:
- not need_telegraf_install
- telegraph_binary_location is defined
- name: "set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: true
when:
- telegraph_binary_location is defined
- not need_telegraf_install
- current_telegraf_version.stdout is version(telegraf_version, '<')
- name: "Set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: true
when:
- telegraph_binary_location is defined
- not need_telegraf_install
- current_telegraf_version.stdout is version(telegraf_version, '<')
- name: install telegraf (MacOS)
community.general.homebrew:
name: telegraf
state: present
notify: restart_telegraf
when:
- ansible_os_family == 'Darwin'
- need_telegraf_install
- name: Install telegraf (MacOS)
community.general.homebrew:
name: telegraf
state: present
notify: restart_telegraf
when:
- ansible_os_family == 'Darwin'
- need_telegraf_install
- name: install base apt-transport (Debian)
become: true
ansible.builtin.apt:
pkg: apt-transport-https
state: present
update_cache: true
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: Install base apt-transport (Debian)
become: true
ansible.builtin.apt:
pkg: apt-transport-https
state: present
update_cache: true
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: Download telegraf GPG key (Debian)
become: true
ansible.builtin.apt_key:
state: present
url: "https://repos.influxdata.com/influxdb.key"
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: Download telegraf GPG key (Debian)
become: true
ansible.builtin.apt_key:
state: present
url: "https://repos.influxdata.com/influxdb.key"
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: Add telegraf repository to apt (Debian)
become: true
ansible.builtin.apt_repository:
repo: deb https://repos.influxdata.com/debian bullseye stable
state: present
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: Add telegraf repository to apt (Debian)
become: true
ansible.builtin.apt_repository:
repo: deb https://repos.influxdata.com/debian bullseye stable
state: present
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: install telegraf (Debian)
become: true
ansible.builtin.apt:
pkg: telegraf
state: latest
update_cache: true
only_upgrade: true
notify: restart_telegraf
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: Install telegraf (Debian)
become: true
ansible.builtin.apt:
pkg: telegraf
state: latest
update_cache: true
only_upgrade: true
notify: restart_telegraf
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
# - name: give telegraf access to docker
# become: true
@@ -162,115 +162,115 @@
- name: "Install speedtest"
when: "'pis' in group_names"
block:
- name: "set fact: do we need speedtest installed?"
ansible.builtin.set_fact:
need_speedtest_install: false
- name: "Set fact: do we need speedtest installed?"
ansible.builtin.set_fact:
need_speedtest_install: false
- name: Check if speedtest is installed
ansible.builtin.stat:
path: /usr/local/bin/speedtest
register: speedtest_binary_file_location
- name: Check if speedtest is installed
ansible.builtin.stat:
path: /usr/local/bin/speedtest
register: speedtest_binary_file_location
- name: "set fact: do we need a speedtest install"
ansible.builtin.set_fact:
need_speedtest_install: true
when:
- not speedtest_binary_file_location.stat.exists
- name: "Set fact: do we need a speedtest install"
ansible.builtin.set_fact:
need_speedtest_install: true
when:
- not speedtest_binary_file_location.stat.exists
- name: Check current version of speedtest
ansible.builtin.shell: /usr/local/bin/speedtest --version | head -n1 | awk '{print $4}' | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
register: current_speedtest_version
check_mode: false
changed_when: false
when:
- not need_speedtest_install
- name: Check current version of speedtest
ansible.builtin.shell: /usr/local/bin/speedtest --version | head -n1 | awk '{print $4}' | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
register: current_speedtest_version
check_mode: false
changed_when: false
when:
- not need_speedtest_install
- name: "set fact: do we need a speedtest install"
ansible.builtin.set_fact:
need_speedtest_install: true
when:
- not need_speedtest_install
- current_speedtest_version.stdout is version(speedtest_cli_version, '<')
- name: "Set fact: do we need a speedtest install"
ansible.builtin.set_fact:
need_speedtest_install: true
when:
- not need_speedtest_install
- current_speedtest_version.stdout is version(speedtest_cli_version, '<')
- name: "Install speedtest (pi)"
become: true
ansible.builtin.unarchive:
src: "{{ speedtest_download_file_uri }}"
dest: /usr/local/bin
remote_src: true
when:
- need_speedtest_install
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "Install speedtest (pi)"
become: true
ansible.builtin.unarchive:
src: "{{ speedtest_download_file_uri }}"
dest: /usr/local/bin
remote_src: true
when:
- need_speedtest_install
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "Configure Telegraf"
block:
- name: "Ensure {{ telegraph_config_location }} exists"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: 0755
loop:
- "{{ telegraph_config_location }}"
- "{{ telegraph_config_location }}/telegraf.d"
- name: "Ensure {{ telegraph_config_location }} exists"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: 0755
loop:
- "{{ telegraph_config_location }}"
- "{{ telegraph_config_location }}/telegraf.d"
- name: template config files to server
become: true
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "644"
loop:
- { src: "telegraf/base_config.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.conf" }
- { src: "telegraf/custom_metrics.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/custom_metrics.conf" }
- { src: "telegraf/nomad.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/nomad.conf" }
- { src: "telegraf/docker.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/docker.conf" }
notify: restart_telegraf
- name: Template config files to server
become: true
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "644"
loop:
- { src: "telegraf/base_config.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.conf" }
- { src: "telegraf/custom_metrics.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/custom_metrics.conf" }
- { src: "telegraf/nomad.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/nomad.conf" }
- { src: "telegraf/docker.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/docker.conf" }
notify: restart_telegraf
- name: template leader configs (ie, configs that should be placed on a single server)
become: true
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "644"
loop:
- { src: "telegraf/leader.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/leader.conf" }
- { src: "telegraf/speedtest.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/speedtest.conf" }
- { src: "telegraf/pingHosts.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/pingHosts.conf" }
when:
- is_cluster_leader
notify: restart_telegraf
- name: Template leader configs (ie, configs that should be placed on a single server)
become: true
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "644"
loop:
- { src: "telegraf/leader.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/leader.conf" }
- { src: "telegraf/speedtest.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/speedtest.conf" }
- { src: "telegraf/pingHosts.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/pingHosts.conf" }
when:
- is_cluster_leader
notify: restart_telegraf
- name: Copy custom metrics script
become: true
ansible.builtin.template:
src: "scripts/telegraf_custom_metrics.sh.j2"
dest: "/usr/local/bin/telegraf_custom_metrics.sh"
mode: 0755
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
when:
- inventory_hostname != 'synology'
- name: Copy custom metrics script
become: true
ansible.builtin.template:
src: "scripts/telegraf_custom_metrics.sh.j2"
dest: "/usr/local/bin/telegraf_custom_metrics.sh"
mode: 0755
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
when:
- inventory_hostname != 'synology'
- name: Copy speedtest script
become: true
ansible.builtin.template:
src: "scripts/telegraf_speedtest.sh.j2"
dest: "/usr/local/bin/telegraf_speedtest.sh"
mode: 0755
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
when:
- is_cluster_leader
- name: Copy speedtest script
become: true
ansible.builtin.template:
src: "scripts/telegraf_speedtest.sh.j2"
dest: "/usr/local/bin/telegraf_speedtest.sh"
mode: 0755
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
when:
- is_cluster_leader
- name: Reset file ownership
become: true
ansible.builtin.file:
path: "{{ telegraph_config_location }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- (ansible_os_family == 'Darwin') or (inventory_hostname == 'synology')
- name: Reset file ownership
become: true
ansible.builtin.file:
path: "{{ telegraph_config_location }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- (ansible_os_family == 'Darwin') or (inventory_hostname == 'synology')

View File

@@ -5,11 +5,11 @@
{% if is_consul_server %}
"server" = true
"ui_config" = {
"enabled" = true
"enabled" = true
}
{% else %}
"ui_config" = {
"enabled" = false
"enabled" = false
}
{% endif %}
@@ -28,15 +28,15 @@
# ----------------------------------------- Networking
"addresses" = {
"dns" = "0.0.0.0"
"grpc" = "0.0.0.0"
"http" = "0.0.0.0"
"https" = "0.0.0.0"
"dns" = "0.0.0.0"
"grpc" = "0.0.0.0"
"http" = "0.0.0.0"
"https" = "0.0.0.0"
}
"ports" = {
"dns" = 8600
"http" = 8500
"server" = 8300
"dns" = 8600
"http" = 8500
"server" = 8300
}
{% if 'linode' in group_names %}
@@ -57,7 +57,7 @@
{% if 'linode' in group_names %}
"retry_join" = [{% for h in groups['linode-cluster'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].linode_private_ip }}"{% if not loop.last %}, {% endif %}{% endfor %}]
{% else %}
"retry_join" = [{% for h in groups['lan'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
"retry_join" = ["{{ rpi1_ip_address }}", "{{ rpi2_ip_address }}", "{{ rpi3_ip_address }}"]
{% if is_consul_server %}
{% if 'linode' in group_names %}
"join_wan" = [{% for h in groups['linode-cluster'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
@@ -81,7 +81,7 @@
"key_file" = "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0-key.pem"
{% endif %}
"auto_encrypt" = {
"allow_tls" = true
"allow_tls" = true
}
{% else %} {# Consul Clients #}
"verify_incoming" = false
@@ -93,14 +93,14 @@
"ca_file" = "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
{% endif %}
"auto_encrypt" = {
"tls" = true
"tls" = true
}
{% endif %}
"acl" = {
enabled = false
default_policy = "allow"
enable_token_persistence = true
default_policy = "allow"
enable_token_persistence = true
enabled = false
}
# ----------------------------------------- Cluster Operations

View File

@@ -1,5 +1,5 @@
[Unit]
Description="HashiCorp Consul - A service mesh solution"
Description="hashiCorp Consul - A service mesh solution"
Documentation=https://www.consul.io/
Requires=network-online.target
After=network-online.target

View File

@@ -9,8 +9,7 @@
"traefik.http.routers.sabnzbd.entryPoints=web,websecure",
"traefik.http.routers.sabnzbd.service=sabnzbd",
"traefik.http.routers.sabnzbd.tls=true",
"traefik.http.routers.sabnzbd.tls.certresolver=cloudflare",
"traefik.http.routers.sabnzbd.middlewares=authelia@file"
"traefik.http.routers.sabnzbd.tls.certresolver=cloudflare"
],
"checks": [{
"id": "sabnzbd-http-check",
@@ -21,6 +20,27 @@
"failures_before_critical": 3
}]
},
{
"name": "jellyfin",
"id": "jellyfin",
"tags": [
"traefik.enable=true",
"traefik.http.services.jellyfin.loadbalancer.server.port=8096",
"traefik.http.routers.jellyfin.rule=Host(`jellyfin.{{ homelab_domain_name }}`)",
"traefik.http.routers.jellyfin.entryPoints=web,websecure",
"traefik.http.routers.jellyfin.service=jellyfin",
"traefik.http.routers.jellyfin.tls=true",
"traefik.http.routers.jellyfin.tls.certresolver=cloudflare"
],
"checks": [{
"id": "jellyfin-http-check",
"http": "http://{{ synology_second_ip }}:8096",
"interval": "30s",
"timeout": "5s",
"success_before_passing": 3,
"failures_before_critical": 3
}]
},
{
"name": "synology",
"id": "synology",

View File

@@ -1,11 +0,0 @@
version: '3.9'
services:
asn-to-ip:
image: ddimick/asn-to-ip:latest
hostname: asn-to-ip
container_name: asn-to-ip
network_mode: "bridge"
ports:
- 5151:5000
restart: unless-stopped

View File

@@ -2,7 +2,7 @@ version: '3.9'
services:
consul:
image: consul:{{ consul_version }}
image: hashicorp/consul:{{ consul_version }}
hostname: consul
container_name: consul
network_mode: "host"

View File

@@ -0,0 +1,17 @@
version: '3.9'
services:
jellyfin:
image: lscr.io/linuxserver/jellyfin:latest
hostname: jellyfin
container_name: jellyfin
network_mode: "host"
environment:
- "TZ=America/New_York"
- "PGID=101"
- "PUID={{ ansible_user_uid }}"
volumes:
- /volume1/pi-cluster/jellyfin:/config
- /volume1/media/media/movies:/data/movies
- /volume1/media/media/tv:/data/tv
restart: unless-stopped

View File

@@ -2,7 +2,7 @@ version: '3.9'
services:
sabnzbd:
image: ghcr.io/linuxserver/sabnzbd
image: ghcr.io/linuxserver/sabnzbd:{{ sabnzbd_version }}
hostname: sabnzbd
container_name: sabnzbd
network_mode: "bridge"
@@ -10,13 +10,15 @@ services:
- "TZ=America/New_York"
- "PGID=101"
- "PUID={{ ansible_user_uid }}"
#- "DOCKER_MODS=linuxserver/mods:universal-cron"
volumes:
- /var/services/homes/{{ my_username }}:/{{ my_username }}
- /volume1/nate:/nate
- /volume1/media/downloads/nzb:/nzbd
- /volume1/media/downloads/temp:/incomplete-downloads
- /volume1/media/downloads/complete:/downloads
- /volume1/docker/sabnzbd:/config
- /volume1/pi-cluster/sabnzbd:/config
- /volume1/pi-cluster/sabnzbd/startup-scripts:/custom-cont-init.d
ports:
- 8080:8080
- 9090:9090

View File

@@ -5,28 +5,28 @@ datacenter = "{{ datacenter_name }}"
# ----------------------------------------- Files and Logs
data_dir = "{{ nomad_opt_dir_location }}"
plugin_dir = "{{ nomad_opt_dir_location }}/plugins"
log_level = "warn"
log_file = "{{ nomad_opt_dir_location }}/logs/nomad.log"
log_rotate_max_files = 5
enable_syslog = false
log_file = "{{ nomad_opt_dir_location }}/logs/nomad.log"
log_level = "warn"
log_rotate_max_files = 5
plugin_dir = "{{ nomad_opt_dir_location }}/plugins"
# ----------------------------------------- Networking
bind_addr = "0.0.0.0" # the default
advertise {
{% if 'linode' in group_names %}
http = "{{ linode_private_ip }}:4646"
rpc = "{{ linode_private_ip }}:4647"
serf = "{{ linode_private_ip }}:4648" # non-default ports may be specified
http = "{{ linode_private_ip }}:4646"
rpc = "{{ linode_private_ip }}:4647"
serf = "{{ linode_private_ip }}:4648" # non-default ports may be specified
{% elif 'synology' in group_names %}
http = "{{ synology_second_ip }}:4646"
rpc = "{{ synology_second_ip }}:4647"
serf = "{{ synology_second_ip }}:4648" # non-default ports may be specified
http = "{{ synology_second_ip }}:4646"
rpc = "{{ synology_second_ip }}:4647"
serf = "{{ synology_second_ip }}:4648" # non-default ports may be specified
{% else %}
http = "{{ ansible_host }}:4646"
rpc = "{{ ansible_host }}:4647"
serf = "{{ ansible_host }}:4648" # non-default ports may be specified
http = "{{ ansible_host }}:4646"
rpc = "{{ ansible_host }}:4647"
serf = "{{ ansible_host }}:4648" # non-default ports may be specified
{% endif %}
}
@@ -48,170 +48,171 @@ consul {
{% if is_nomad_server %}
tags = [
"traefik.enable=true",
"traefik.http.routers.nomad-server.entryPoints=web,websecure",
"traefik.http.routers.nomad-server.service=nomad-server",
"traefik.http.routers.nomad-server.rule=Host(`nomad.{{ homelab_domain_name }}`)",
"traefik.http.routers.nomad-server.tls=true",
"traefik.http.routers.nomad-server.middlewares=authelia@file,redirectScheme@file",
"traefik.http.services.nomad-server.loadbalancer.server.port=4646"
"traefik.enable=true",
"traefik.http.routers.nomad-server.entryPoints=web,websecure",
"traefik.http.routers.nomad-server.service=nomad-server",
"traefik.http.routers.nomad-server.rule=Host(`nomad.{{ homelab_domain_name }}`)",
"traefik.http.routers.nomad-server.tls=true",
"traefik.http.routers.nomad-server.middlewares=redirectScheme@file",
"traefik.http.services.nomad-server.loadbalancer.server.port=4646"
]
{% endif %}
}
# ----------------------------------------- CLient Config
# ----------------------------------------- Client Config
client {
enabled = true
enabled = true
{% if 'pis' in group_names %}
node_class = "rpi"
node_class = "rpi"
{% elif 'macs' in group_names %}
node_class = "mac"
node_class = "mac"
{% elif 'synology' in group_names %}
node_class = "synology"
node_class = "synology"
{% endif %}
reserved {
cpu = 250
memory = 100
reserved_ports = "22"
}
reserved {
cpu = 250
memory = 100
reserved_ports = "22"
}
{% if not is_nomad_server %}
{% if 'linode' in group_names %}
server_join {
retry_join = [{% for h in groups['linode'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
retry_max = 3
retry_interval = "15s"
}
server_join {
retry_join = [{% for h in groups['linode'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
retry_max = 3
retry_interval = "15s"
}
{% else %}
server_join {
retry_join = [{% for h in groups['lan'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
retry_max = 3
retry_interval = "15s"
}
servers = ["{{ rpi1_ip_address }}", "{{ rpi2_ip_address }}", "{{ rpi3_ip_address }}"]
server_join {
retry_join = ["{{ rpi1_ip_address }}", "{{ rpi2_ip_address }}", "{{ rpi3_ip_address }}"]
retry_max = 3
retry_interval = "15s"
}
{% endif %}
{% endif %}
meta {
# These are variables that can be used in Nomad job files
PUID = "{{ ansible_user_uid }}"
PGID = "{{ ansible_user_gid }}"
nfsStorageRoot = "{{ interpolated_nfs_service_storage }}"
localStorageRoot = "{{ interpolated_localfs_service_storage }}"
{% if 'macs' in group_names %}
restoreCommand = "/usr/local/bin/service_restore"
restoreCommand1 = "--verbose"
restoreCommand2 = "--job"
restoreCommand3 = ""
backupCommand = "/usr/local/bin/service_backups"
backupCommandArg1 = "--verbose"
backupCommandArg2 = "--loglevel=INFO"
backupCommandArg3 = ""
backupAllocArg1 = "--verbose"
backupAllocArg2 = "--loglevel=INFO"
backupAllocArg3 = "--allocation"
backupAllocArg4 = "--delete"
backupAllocArg5 = "--job"
backupAllocArg6 = ""
{% else %}
restoreCommand = "sudo"
restoreCommand1 = "/usr/local/bin/service_restore"
restoreCommand2 = "--job"
restoreCommand3 = "--verbose"
backupCommand = "sudo"
backupCommandArg1 = "/usr/local/bin/service_backups"
backupCommandArg2 = "--verbose"
backupCommandArg3 = "--loglevel=INFO"
backupAllocArg1 = "/usr/local/bin/service_backups"
backupAllocArg2 = "--verbose"
backupAllocArg3 = "--loglevel=INFO"
backupAllocArg4 = "--allocation"
backupAllocArg5 = "--job"
backupAllocArg6 = "--delete"
{% endif %}
}
meta {
# These are variables that can be used in Nomad job files
PUID = "{{ ansible_user_uid }}"
PGID = "{{ ansible_user_gid }}"
nfsStorageRoot = "{{ interpolated_nfs_service_storage }}"
localStorageRoot = "{{ interpolated_localfs_service_storage }}"
{% if 'macs' in group_names %}
restoreCommand = "/usr/local/bin/service_restore"
restoreCommand1 = "--verbose"
restoreCommand2 = "--job"
restoreCommand3 = ""
backupCommand = "/usr/local/bin/service_backups"
backupCommandArg1 = "--verbose"
backupCommandArg2 = "--loglevel=INFO"
backupCommandArg3 = ""
backupAllocArg1 = "--verbose"
backupAllocArg2 = "--loglevel=INFO"
backupAllocArg3 = "--allocation"
backupAllocArg4 = "--delete"
backupAllocArg5 = "--job"
backupAllocArg6 = ""
{% else %}
restoreCommand = "sudo"
restoreCommand1 = "/usr/local/bin/service_restore"
restoreCommand2 = "--job"
restoreCommand3 = "--verbose"
backupCommand = "sudo"
backupCommandArg1 = "/usr/local/bin/service_backups"
backupCommandArg2 = "--verbose"
backupCommandArg3 = "--loglevel=INFO"
backupAllocArg1 = "/usr/local/bin/service_backups"
backupAllocArg2 = "--verbose"
backupAllocArg3 = "--loglevel=INFO"
backupAllocArg4 = "--allocation"
backupAllocArg5 = "--job"
backupAllocArg6 = "--delete"
{% endif %}
}
} # /client
{% if is_nomad_server %}
# ----------------------------------------- Server Config
server {
enabled = true
encrypt = "{{ nomad_encryption_key }}"
enabled = true
encrypt = "{{ nomad_encryption_key }}"
{% if 'linode' in group_names %}
bootstrap_expect = 1
bootstrap_expect = 1
{% else %}
bootstrap_expect = 3
bootstrap_expect = 3
{% endif %}
node_gc_threshold = "15m"
job_gc_interval = "15m"
job_gc_threshold = "6h"
heartbeat_grace = "60s"
min_heartbeat_ttl = "20s"
raft_protocol = "3"
node_gc_threshold = "15m"
job_gc_interval = "15m"
job_gc_threshold = "6h"
heartbeat_grace = "60s"
min_heartbeat_ttl = "20s"
raft_protocol = "3"
server_join {
retry_join = [{% for h in groups['lan'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
retry_max = 3
retry_interval = "15s"
}
server_join {
retry_join = ["{{ rpi1_ip_address }}", "{{ rpi2_ip_address }}", "{{ rpi3_ip_address }}"]
retry_max = 3
retry_interval = "15s"
}
}
autopilot {
cleanup_dead_servers = true
last_contact_threshold = "200ms"
max_trailing_logs = 250
server_stabilization_time = "10s"
enable_redundancy_zones = false
disable_upgrade_migration = false
enable_custom_upgrades = false
cleanup_dead_servers = true
disable_upgrade_migration = false
enable_custom_upgrades = false
enable_redundancy_zones = false
last_contact_threshold = "200ms"
max_trailing_logs = 250
server_stabilization_time = "10s"
}
{% endif %}
{% if is_nomad_server and is_nomad_client %}
client {
enabled = true
enabled = true
}
{% endif %}
# ----------------------------------------- Telemety
telemetry = {
publish_allocation_metrics = true
publish_node_metrics = true
collection_interval = "10s"
filter_default = false
datadog_address = "localhost:8125"
prefix_filter = [
"+nomad.client.allocations.running",
"+nomad.client.allocations.terminal",
"+nomad.client.allocs.cpu.allocated",
"+nomad.client.allocs.cpu.total_percent",
"+nomad.client.allocs.memory.allocated",
"+nomad.client.allocs.memory.swap",
"+nomad.client.allocs.memory.usage",
"+nomad.nomad.job_status.dead",
"+nomad.nomad.job_status.running",
"+nomad.nomad.job_status.pending",
"+nomad.nomad.job_summary.running",
"+nomad.nomad.job_summary.complete",
"+nomad.nomad.job_summary.lost",
"+nomad.nomad.job_summary.failed"]
collection_interval = "10s"
datadog_address = "localhost:8125"
filter_default = false
publish_allocation_metrics = true
publish_node_metrics = true
prefix_filter = [
"+nomad.client.allocations.running",
"+nomad.client.allocations.terminal",
"+nomad.client.allocs.cpu.allocated",
"+nomad.client.allocs.cpu.total_percent",
"+nomad.client.allocs.memory.allocated",
"+nomad.client.allocs.memory.swap",
"+nomad.client.allocs.memory.usage",
"+nomad.nomad.job_status.dead",
"+nomad.nomad.job_status.running",
"+nomad.nomad.job_status.pending",
"+nomad.nomad.job_summary.running",
"+nomad.nomad.job_summary.complete",
"+nomad.nomad.job_summary.lost",
"+nomad.nomad.job_summary.failed"
]
}
# ----------------------------------------- Plugins
plugin "raw_exec" {
config {
enabled = true
}
config {
enabled = true
}
}
plugin "docker" {
config {
allow_caps = [ "ALL" ]
allow_privileged = true
volumes {
enabled = true
}
allow_caps = ["all"]
allow_privileged = true
extra_labels = ["job_name", "job_id", "task_group_name", "task_name", "namespace", "node_name", "node_id"]
volumes {
enabled = true
}
}
}

View File

@@ -7,9 +7,16 @@ ConditionFileNotEmpty={{ nomad_configuration_dir }}/nomad.hcl
[Service]
{# {% if 'linode' in group_names %} #}
User=nomad
Group=nomad
{# User=nomad #}
{# Group=nomad #}
{# {% endif %} #}
{# NOTE: Nomad is running as root rather than the Nomad user due to the Docker driver not being started when cgroups v2 are enabled.
https://github.com/hashicorp/nomad/pull/16063
#}
User=root
Group=root
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nomad agent -config {{ nomad_configuration_dir }}
KillMode=process

View File

@@ -1,21 +1,21 @@
job "backup_local_filesystems" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "sysbatch"
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "sysbatch"
periodic {
cron = "0 */8 * * * *"
prohibit_overlap = true
time_zone = "America/New_York"
}
task "do_backups" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
periodic {
cron = "0 */8 * * * *"
prohibit_overlap = true
time_zone = "America/New_York"
}
} // /task do_backups
task "do_backups" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
}
} // /task do_backups
} //job

View File

@@ -57,6 +57,7 @@ job "changedetection" {
service {
port = "webUI"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`changes.{{ homelab_domain_name }}`)",
@@ -75,7 +76,6 @@ job "changedetection" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -73,6 +73,7 @@ job "chronograf" {
service {
port = "chronografPort"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -91,7 +92,6 @@ job "chronograf" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -63,6 +63,7 @@ job "code" {
service {
port = "port1"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -82,7 +83,6 @@ job "code" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -1,109 +1,110 @@
job "diagnostics" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "rpi1"
}
group "diagnostics" {
count = 1
restart {
attempts = 0
delay = "30s"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "macmini"
}
network {
port "whoami" {
to = 80
}
}
group "diagnostics" {
task "diagnostics" {
count = 1
// env {
// KEY = "VALUE"
// }
driver = "docker"
config {
image = "alpine:latest"
hostname = "${NOMAD_JOB_NAME}"
args = [
"/bin/sh",
"-c",
"chmod 755 /local/bootstrap.sh && /local/bootstrap.sh"
]
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/backups/config_backups:/backups",
"${meta.localStorageRoot}:/docker"
]
} // docker config
template {
destination = "local/bootstrap.sh"
data = <<EOH
#!/bin/sh
apk update
apk add --no-cache bash
apk add --no-cache bind-tools
apk add --no-cache curl
apk add --no-cache git
apk add --no-cache jq
apk add --no-cache openssl
apk add --no-cache iperf3
apk add --no-cache nano
apk add --no-cache wget
tail -f /dev/null # Keep container running
EOH
}
} // task diagnostics
task "whoami" {
driver = "docker"
config {
image = "containous/whoami:latest"
hostname = "${NOMAD_TASK_NAME}"
ports = ["whoami"]
} // /docker config
service {
port = "whoami"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "http"
path = "/"
interval = "90s"
timeout = "15s"
}
check_restart {
limit = 2
grace = "1m"
ignore_warnings = true
}
}
resources {
cpu = 25 # MHz
memory = 10 # MB
restart {
attempts = 0
delay = "30s"
}
} // /task whoami
network {
port "whoami" {
to = 80
}
}
} // group
task "diagnostics" {
// env {
// KEY = "VALUE"
// }
driver = "docker"
config {
image = "alpine:latest"
hostname = "${NOMAD_JOB_NAME}"
args = [
"/bin/sh",
"-c",
"chmod 755 /local/bootstrap.sh && /local/bootstrap.sh"
]
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/tmp:/diagnostics",
"${meta.localStorageRoot}:/docker"
]
} // docker config
template {
destination = "local/bootstrap.sh"
data = <<EOH
#!/bin/sh
apk update
apk add --no-cache bash
apk add --no-cache bind-tools
apk add --no-cache curl
apk add --no-cache git
apk add --no-cache jq
apk add --no-cache openssl
apk add --no-cache iperf3
apk add --no-cache nano
apk add --no-cache wget
tail -f /dev/null # Keep container running
EOH
}
} // task diagnostics
// task "whoami" {
// driver = "docker"
// config {
// image = "containous/whoami:latest"
// hostname = "${NOMAD_TASK_NAME}"
// ports = ["whoami"]
// } // /docker config
// service {
// port = "whoami"
// name = "${NOMAD_JOB_NAME}"
// provider = "nomad"
// tags = [
// "traefik.enable=true",
// "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
// "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
// "traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
// "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
// "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
// ]
// check {
// type = "http"
// path = "/"
// interval = "90s"
// timeout = "15s"
// }
// check_restart {
// limit = 2
// grace = "1m"
// }
// }
// resources {
// cpu = 25 # MHz
// memory = 10 # MB
// }
// } // /task whoami
} // group
} // job

View File

@@ -54,6 +54,7 @@ job "freshrss" {
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`rss.{{ homelab_domain_name }}`)",
@@ -73,7 +74,6 @@ job "freshrss" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -0,0 +1,404 @@
job "gitea" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
constraint {
distinct_hosts = true
}
group "gitea" {
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi"
// }
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "webui" {
to = "3000"
}
port "ssh" {
to = "22"
}
}
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = [
"${meta.restoreCommand1}",
"${meta.restoreCommand2}",
"${NOMAD_JOB_NAME}",
"${meta.restoreCommand3}"
]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "gitea" {
env {
GITEA__mailer__ENABLED = true
GITEA__mailer__FROM = "gitea@{{ homelab_domain_name }}"
GITEA__mailer__PASSWD = "{{ gitea_smtp_password }}"
GITEA__mailer__PROTOCOL = "smtp+starttls"
GITEA__mailer__SMTP_ADDR = "{{ email_smtp_host }}"
GITEA__mailer__SMTP_PORT = "{{ email_smtp_port_starttls }}"
GITEA__mailer__SUBJECT_PREFIX = "[Gitea]"
GITEA__mailer__USER = "{{ email_smtp_account }}"
GITEA__repository__DEFAULT_REPO_UNITS = "repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages" # add `repo.actions` to the list if enabling actions
GITEA__server__DOMAIN = "{{ homelab_domain_name }}"
GITEA__server__ROOT_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
GITEA__server__SSH_DOMAIN = "${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
GITEA__server__SSH_PORT = "2222" # Traefik gitea-ssh entrypoint
GITEA__server__START_SSH_SERVER = false
GITEA__service__ENABLE_NOTIFY_MAIL = true
GITEA__time__DEFAULT_UI_LOCATION = "America/New_York"
TZ = "America/New_York"
USER_GID = "${meta.PGID}"
USER_UID = "${meta.PUID}"
}
driver = "docker"
config {
image = "gitea/gitea:{{ gitea_version }}"
image_pull_timeout = "10m"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/data",
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro"
]
ports = ["webui", "ssh"]
} // docker config
service {
port = "webui"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "webui"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
service {
port = "ssh"
name = "gitea-ssh-svc"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.tcp.routers.gitea-ssh.rule=HostSNI(`*`)",
"traefik.tcp.routers.gitea-ssh.entrypoints=gitea-ssh",
"traefik.tcp.routers.gitea-ssh.service=gitea-ssh-svc"
]
} // service
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task gitea
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = [
"${meta.backupAllocArg1}",
"${meta.backupAllocArg2}",
"${meta.backupAllocArg3}",
"${meta.backupAllocArg4}",
"${meta.backupAllocArg5}",
"${NOMAD_JOB_NAME}",
"${meta.backupAllocArg6}"
]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
// group "action-runners" {
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "macmini"
// }
// constraint {
// distinct_hosts = true
// }
// count = 1
// restart {
// attempts = 0
// delay = "30s"
// }
// network {
// port "cache" {
// to = "8088"
// }
// }
// task "await-gitea" {
// lifecycle {
// hook = "prestart"
// sidecar = false
// }
// driver = "docker"
// config {
// image = "busybox:latest"
// command = "/bin/sh"
// args = [
// "-c",
// "chmod 755 /local/ping.sh && /local/ping.sh"
// ]
// network_mode = "host"
// }
// template {
// destination = "local/ping.sh"
// change_mode = "restart"
// data = <<-EOH
// #!/bin/sh
// {% raw -%}
// {{ range nomadService "gitea" }}
// IP="{{ .Address }}"
// PORT="{{ .Port }}"
// {{ end }}
// {% endraw -%}
// until [ -n "${IP}" ] && [ -n "${PORT}" ]; do
// echo "Waiting for Nomad to populate the service information..."
// sleep 1
// done
// echo "Waiting for Gitea to start..."
// until nc -z "${IP}" "${PORT}"; do
// echo "'nc -z ${IP} ${PORT}' is unavailable..."
// sleep 1
// done
// echo "Gitea is up! Found at ${IP}:${PORT}"
// EOH
// }
// }
// task "gitea-action-runner" {
// env {
// CONFIG_FILE = "/local/config.yml"
// GITEA_INSTANCE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
// GITEA_RUNNER_NAME = "${node.unique.name}-action-runner"
// GITEA_RUNNER_REGISTRATION_TOKEN = "{{ gitea_runner_registration_token }}"
// PGID = "${meta.PGID}"
// PUID = "${meta.PUID}"
// TZ = "America/New_York"
// }
// driver = "docker"
// config {
// image = "gitea/act_runner:latest"
// image_pull_timeout = "10m"
// hostname = "${NOMAD_TASK_NAME}"
// volumes = [
// "${meta.nfsStorageRoot}/pi-cluster/gitea-action-runners:/data",
// "/var/run/docker.sock:/var/run/docker.sock"
// ]
// ports = ["cache"]
// } // docker config
// template {
// destination = "local/config.yml"
// env = false
// change_mode = "noop"
// data = <<-EOH
// log:
// # The level of logging, can be trace, debug, info, warn, error, fatal
// level: info
// runner:
// # Where to store the registration result.
// {% raw %}file: .runner-{{ env "node.unique.name" }}{% endraw +%}
// # Execute how many tasks concurrently at the same time.
// capacity: 1
// # Extra environment variables to run jobs.
// envs:
// A_TEST_ENV_NAME_1: a_test_env_value_1
// A_TEST_ENV_NAME_2: a_test_env_value_2
// # Extra environment variables to run jobs from a file.
// # It will be ignored if it's empty or the file doesn't exist.
// env_file: .env
// # The timeout for a job to be finished.
// # Please note that the Gitea instance also has a timeout (3h by default) for the job.
// # So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
// timeout: 3h
// # Whether skip verifying the TLS certificate of the Gitea instance.
// insecure: false
// # The timeout for fetching the job from the Gitea instance.
// fetch_timeout: 5s
// # The interval for fetching the job from the Gitea instance.
// fetch_interval: 2s
// # The labels of a runner are used to determine which jobs the runner can run, and how to run them.
// # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
// # If it's empty when registering, it will ask for inputting labels.
// # If it's empty when execute `daemon`, will use labels in `.runner` file.
// labels: []
// cache:
// # Enable cache server to use actions/cache.
// enabled: false
// # The directory to store the cache data.
// # If it's empty, the cache data will be stored in $HOME/.cache/actcache.
// dir: ""
// # The host of the cache server.
// # It's not for the address to listen, but the address to connect from job containers.
// # So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
// {% raw %}host: "{{ env "NOMAD_IP_cache" }}"{% endraw +%}
// # The port of the cache server.
// {% raw %}port: {{ env "NOMAD_HOST_PORT_cache" }}{% endraw +%}
// # The external cache server URL. Valid only when enable is true.
// # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
// # The URL should generally end with "/".
// external_server: ""
// container:
// # Specifies the network to which the container will connect.
// # Could be host, bridge or the name of a custom network.
// # If it's empty, act_runner will create a network automatically.
// network: ""
// # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
// privileged: false
// # And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
// options:
// # The parent directory of a job's working directory.
// # If it's empty, /workspace will be used.
// workdir_parent:
// # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
// # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
// # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
// # valid_volumes:
// # - data
// # - /src/*.json
// # If you want to allow any volume, please use the following configuration:
// # valid_volumes:
// # - '**'
// valid_volumes:
// - '**'
// # overrides the docker client host with the specified one.
// # If it's empty, act_runner will find an available docker host automatically.
// # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
// # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
// docker_host: ""
// # Pull docker image(s) even if already present
// force_pull: false
// host:
// # The parent directory of a job's working directory.
// # If it's empty, $HOME/.cache/act/ will be used.
// workdir_parent:
// EOH
// }
// // service {
// // port = "cache"
// // name = "${NOMAD_TASK_NAME}"
// // provider = "nomad"
// // tags = [
// // "traefik.enable=true",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
// // ]
// // check {
// // type = "tcp"
// // port = "cache"
// // interval = "30s"
// // timeout = "4s"
// // }
// // check_restart {
// // limit = 0
// // grace = "1m"
// // }
// // } // service
// resources {
// cpu = 400 # MHz
// memory = 600 # MB
// } // resources
// } // task gitea-action-runner
// } // group action-runners
} // job

View File

@@ -87,6 +87,7 @@ job "grafana" {
service {
port = "http"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -109,7 +110,6 @@ job "grafana" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -52,6 +52,7 @@ job "headless-chrome" {
service {
port = "port1"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`chrome.{{ homelab_domain_name }}`)",
@@ -70,7 +71,6 @@ job "headless-chrome" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -0,0 +1,101 @@
job "hishtory" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
# README
# https://github.com/linuxserver/docker-hishtory-server
# https://github.com/ddworken/hishtory/blob/master/README.md
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "hishtory" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
to = "8080"
}
}
task "hishtory" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
HISHTORY_SQLITE_DB = "/config/hishtory.db"
}
driver = "docker"
config {
image = "lscr.io/linuxserver/hishtory-server:latest"
image_pull_timeout = "10m"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config"
]
ports = ["port1"]
} // docker config
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task
} // group
} // job

View File

@@ -3,143 +3,149 @@ job "icloud_backup" {
datacenters = ["{{ datacenter_name }}"]
type = "service"
// Need to authenticate within the container by running
// icloud --username=<icloud-username> --session-directory=/app/session_data
// and then entering the 2FA code that is sent to the user associated with the iCloud account.
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "icloud_backup" {
count = 1
restart {
attempts = 0
delay = "30s"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
task "icloud_backup" {
group "icloud_backup" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
// ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment.
}
count = 1
driver = "docker"
config {
image = "mandarons/icloud-drive"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud",
"${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data",
"local/icloud_backup.yaml:/app/config.yaml",
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro"
]
} // docker config
restart {
attempts = 0
delay = "30s"
}
template {
destination = "local/icloud_backup.yaml"
env = false
change_mode = "restart"
perms = "644"
data = <<-EOH
app:
logger:
# level - debug, info (default), warning, or error
level: "info"
# log filename icloud.log (default)
filename: "icloud.log"
credentials:
# iCloud drive username
username: "{{ icloud_backup_username }}"
# Retry login interval
retry_login_interval: 3600 # 1 hour
# Drive destination
root: "icloud"
smtp:
# If you want to recieve email notifications about expired/missing 2FA credentials then uncomment
email: "{{ email_smtp_account }}"
# optional, to email address. Default is sender email.
#to: "receiver@test.com"
password: "{{ icloud_backup_smtp_password }}"
host: "{{ email_smtp_host }}"
port: {{ email_smtp_port_starttls }}
# If your email provider doesn't handle TLS
no_tls: false
drive:
destination: "drive"
remove_obsolete: true
sync_interval: 172800 # 2 days
filters:
# File filters to be included in syncing iCloud drive content
folders:
- "Scanner By Readdle"
- "Documents by Readdle"
# - "folder3"
file_extensions:
# File extensions to be included
- "pdf"
- "png"
- "jpg"
- "jpeg"
- "xls"
- "xlsx"
- "docx"
- "pptx"
- "txt"
- "md"
- "html"
- "htm"
- "css"
- "js"
- "json"
- "xml"
- "yaml"
- "yml"
- "csv"
- "mp3"
- "mp4"
- "mov"
- "wav"
- "mkv"
- "m4a"
photos:
destination: "photos"
remove_obsolete: true
sync_inteval: 172800 # 2 days
filters:
albums:
# - "album1"
file_sizes: # valid values are original, medium and/or thumb
- "original"
# - "medium"
# - "thumb"
EOH
} // template data
task "icloud_backup" {
resources {
cpu = 900 # MHz
memory = 100 # MB
} // resources
env {
ENV_CONFIG_FILE_PATH = "/local/icloud_backup.yaml"
PGID = "${meta.PGID}"
PUID = "${meta.PUID}"
TZ = "America/New_York"
// ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment.
}
} // task
driver = "docker"
config {
image = "mandarons/icloud-drive"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud",
"${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data",
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro"
]
} // docker config
template {
destination = "local/icloud_backup.yaml"
env = false
change_mode = "restart"
perms = "644"
data = <<-EOH
---
app:
logger:
# level - debug, info (default), warning, or error
level: "info"
# log filename icloud.log (default)
filename: "icloud.log"
credentials:
# iCloud drive username
username: "{{ icloud_backup_username }}"
# Retry login interval
retry_login_interval: 3600 # 1 hour
root: "icloud"
smtp:
# If you want to receive email notifications about expired/missing 2FA credentials then uncomment
email: "{{ email_smtp_account }}"
# optional, to email address. Default is sender email.
#to: "receiver@test.com"
password: "{{ icloud_backup_smtp_password }}"
host: "{{ email_smtp_host }}"
port: {{ email_smtp_port_starttls }}
# If your email provider doesn't handle TLS
no_tls: false
drive:
destination: "drive"
remove_obsolete: true
sync_interval: 172800 # 2 days
filters:
# File filters to be included in syncing iCloud drive content
folders:
- "Scanner By Readdle"
- "Documents by Readdle"
# - "folder3"
file_extensions:
# File extensions to be included
- "pdf"
- "png"
- "jpg"
- "jpeg"
- "xls"
- "xlsx"
- "docx"
- "pptx"
- "txt"
- "md"
- "html"
- "htm"
- "css"
- "js"
- "json"
- "xml"
- "yaml"
- "yml"
- "csv"
- "mp3"
- "mp4"
- "mov"
- "wav"
- "mkv"
- "m4a"
photos:
destination: "photos"
remove_obsolete: true
sync_interval: 172800 # 2 days
all_albums: false # Optional, default false. If true preserve album structure. If same photo is in multiple albums creates duplicates on filesystem
folder_format: "%Y-%m" # optional, if set put photos in subfolders according to format. Cheatsheet - https://strftime.org
filters:
albums:
# - "album1"
file_sizes: # valid values are original, medium and/or thumb
- "original"
# - "medium"
# - "thumb"
EOH
} // template data
resources {
cpu = 900 # MHz
memory = 100 # MB
} // resources
} // task
} // group
} // group
} // job

View File

@@ -78,6 +78,7 @@ job "influxdb" {
service {
port = "httpAPI"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
check {
type = "tcp"
@@ -89,7 +90,6 @@ job "influxdb" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}

View File

@@ -0,0 +1,98 @@
job "jellyfin" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "macmini"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "jellyfin" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "webui" {
static = "8096"
to = "8096"
}
port "udp1" {
static = "7359"
to = "7359"
}
}
task "jellyfin" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "lscr.io/linuxserver/jellyfin:latest"
image_pull_timeout = "10m"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config",
"${meta.nfsStorageRoot}/media/media/movies:/data/movies",
"${meta.nfsStorageRoot}/media/media/tv:/data/tv"
]
ports = ["webui", "udp1"]
} // docker config
service {
port = "webui"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "webui"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
resources {
cpu = 2500 # MHz
memory = 750 # MB
} // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,94 @@
job "ladder" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "ladder" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
to = "8080"
}
}
task "ladder" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
PORT = "8080"
}
driver = "docker"
config {
image = "ghcr.io/kubero-dev/ladder:latest"
hostname = "${NOMAD_TASK_NAME}"
ports = ["port1"]
image_pull_timeout = "10m"
// volumes = [
// "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/"
// ]
} // docker config
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task
} // group
} // job

View File

@@ -82,6 +82,7 @@ job "lidarr" {
service {
port = "lidarr"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -100,7 +101,6 @@ job "lidarr" {
check_restart {
limit = 0
grace = "10m"
ignore_warnings = true
}
} // service

View File

@@ -47,6 +47,7 @@ job "loki" {
service {
port = "loki_port"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -66,7 +67,6 @@ job "loki" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -67,6 +67,7 @@ job "mealie" {
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -85,7 +86,6 @@ job "mealie" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -57,6 +57,7 @@ job "nginx" {
service {
port = "web"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -75,7 +76,6 @@ job "nginx" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -55,6 +55,7 @@ job "nzbhydra" {
service {
port = "hydra_port"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`hydra.{{ homelab_domain_name }}`)",
@@ -73,7 +74,6 @@ job "nzbhydra" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -1,92 +1,92 @@
job "overseerr" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi"
// }
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "overseerr" {
count = 1
restart {
attempts = 0
delay = "30s"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
network {
port "overseerr" {
to = "5055"
}
}
group "overseerr" {
task "overseerr" {
count = 1
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/overseerr"
hostname = "${NOMAD_JOB_NAME}"
ports = ["overseerr"]
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/overseerr:/config" ]
} // docker config
service {
port = "overseerr"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=overseerr",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
]
check {
type = "tcp"
port = "overseerr"
interval = "30s"
timeout = "4s"
restart {
attempts = 0
delay = "30s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
network {
port "overseerr" {
to = "5055"
}
}
} // service
resources {
cpu = 1600 # MHz
memory = 300 # MB
} // resources
task "overseerr" {
} // task
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "lscr.io/linuxserver/overseerr:latest"
hostname = "${NOMAD_JOB_NAME}"
ports = ["overseerr"]
image_pull_timeout = "10m"
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/overseerr:/config" ]
} // docker config
service {
port = "overseerr"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=overseerr",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "overseerr"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
resources {
cpu = 1600 # MHz
memory = 300 # MB
} // resources
} // task
} // group
} // group
} // job

View File

@@ -37,7 +37,7 @@ job "pihole" {
// }
}
task "await_filesytem" {
task "await_filesystem" {
driver = "docker"
config {
@@ -109,6 +109,7 @@ job "pihole" {
service {
name = "${NOMAD_JOB_NAME}"
port = "web"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`p.{{ homelab_domain_name }}`)",
@@ -118,7 +119,7 @@ job "pihole" {
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.middlewares.piholeRedirect.redirectregex.regex=^(https?://p\\.{{ homelab_domain_name }})/?$",
"traefik.http.middlewares.piholeRedirect.redirectregex.replacement=$${1}/admin/",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,piholeRedirect"
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=piholeRedirect"
]
check {
type = "http"
@@ -130,13 +131,13 @@ job "pihole" {
check_restart {
limit = 3
grace = "10m"
ignore_warnings = false
}
}
service {
name = "piholeDNStcp"
port = "dns"
provider = "nomad"
check {
type = "tcp"
port = "dns"

View File

@@ -51,7 +51,7 @@ job "promtail-syslogs" {
{% raw -%}
clients:
- url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
- url: http://{{ range nomadService "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
{% endraw %}
scrape_configs:

View File

@@ -84,6 +84,7 @@ job "prowlarr" {
service {
port = "prowlarr"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -103,7 +104,6 @@ job "prowlarr" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -1,136 +1,136 @@
job "radarr" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi3"
// }
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi3"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "radarrGroup" {
restart {
attempts = 0
delay = "10m"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
network {
port "radarr" {
to = "7878"
}
}
group "radarrGroup" {
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = [
"${meta.restoreCommand1}",
"${meta.restoreCommand2}",
"${NOMAD_JOB_NAME}",
"${meta.restoreCommand3}"
]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "radarr" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
//UMASK_SET = 022 #optional
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/radarr:develop"
hostname = "${NOMAD_JOB_NAME}"
force_pull = true
ports = ["radarr"]
volumes = [
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
"${meta.nfsStorageRoot}/media:/media"
]
} // docker config
service {
port = "radarr"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "radarr"
interval = "30s"
timeout = "4s"
restart {
attempts = 0
delay = "10m"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
network {
port "radarr" {
to = "7878"
}
}
} // service
resources {
cpu = 2000 # MHz
memory = 400 # MB
} // resources
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
} // /task radarr
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = [
"${meta.restoreCommand1}",
"${meta.restoreCommand2}",
"${NOMAD_JOB_NAME}",
"${meta.restoreCommand3}"
]
}
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = [
"${meta.backupAllocArg1}",
"${meta.backupAllocArg2}",
"${meta.backupAllocArg3}",
"${meta.backupAllocArg4}",
"${meta.backupAllocArg5}",
"${NOMAD_JOB_NAME}",
"${meta.backupAllocArg6}"
]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
lifecycle {
hook = "prestart"
sidecar = false
}
} // group
} // /task create_filesystem
task "radarr" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
//UMASK_SET = 022 #optional
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/radarr:develop"
hostname = "${NOMAD_JOB_NAME}"
force_pull = true
ports = ["radarr"]
volumes = [
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
"${meta.nfsStorageRoot}/media:/media"
]
} // docker config
service {
port = "radarr"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "radarr"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
resources {
cpu = 2000 # MHz
memory = 400 # MB
} // resources
} // /task radarr
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = [
"${meta.backupAllocArg1}",
"${meta.backupAllocArg2}",
"${meta.backupAllocArg3}",
"${meta.backupAllocArg4}",
"${meta.backupAllocArg5}",
"${NOMAD_JOB_NAME}",
"${meta.backupAllocArg6}"
]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
} // job

View File

@@ -81,6 +81,7 @@ job "readarr" {
service {
port = "readarr"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -100,7 +101,6 @@ job "readarr" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -32,104 +32,275 @@ job "recyclarr" {
task "recyclarr" {
env {
TZ = "America/New_York"
TZ = "America/New_York"
RECYCLARR_APP_DATA = "/local"
}
// user = "${meta.PUID}:${meta.PGID}"
driver = "docker"
config {
image = "ghcr.io/recyclarr/recyclarr:2"
image = "ghcr.io/recyclarr/recyclarr:{{ recyclarr_version }}"
hostname = "${NOMAD_TASK_NAME}"
init = true
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config"
]
} // docker config
// template {
// destination = "local/recyclarr.yml"
// env = false
// change_mode = "restart"
// perms = "644"
// data = <<-EOH
// ---
// # yaml-language-server: $schema=https://raw.githubusercontent.com/recyclarr/recyclarr/master/schemas/config-schema.json
template {
destination = "local/recyclarr.yml"
env = false
change_mode = "restart"
perms = "644"
data = <<-EOH
# yaml-language-server: $schema=https://raw.githubusercontent.com/recyclarr/recyclarr/master/schemas/config-schema.json
// # A starter config to use with Recyclarr. Most values are set to "reasonable defaults". Update the
// # values below as needed for your instance. You will be required to update the API Key and URL for
// # each instance you want to use.
// #
// # Many optional settings have been omitted to keep this template simple.
// #
// # For more details on the configuration, see the Configuration Reference on the wiki here:
// # https://github.com/recyclarr/recyclarr/wiki/Configuration-Reference
# A starter config to use with Recyclarr. Most values are set to "reasonable defaults". Update the
# values below as needed for your instance. You will be required to update the API Key and URL for
# each instance you want to use.
#
# Many optional settings have been omitted to keep this template simple. Note that there's no "one
# size fits all" configuration. Please refer to the guide to understand how to build the appropriate
# configuration based on your hardware setup and capabilities.
#
# For any lines that mention uncommenting YAML, you simply need to remove the leading hash (`#`).
# The YAML comments will already be at the appropriate indentation.
#
# For more details on the configuration, see the Configuration Reference on the wiki here:
# https://recyclarr.dev/wiki/reference/config-reference
// # Configuration specific to Sonarr
// sonarr:
// # Set the URL/API Key to your actual instance
# Configuration specific to Sonarr
sonarr:
series:
base_url: https://sonarr.{{ homelab_domain_name }}/
api_key: "{{ sonarr_api_key }}"
delete_old_custom_formats: true
replace_existing_custom_formats: true
// {% raw -%}
// - base_url: http://{{ range service "sonarr" }}{{ .Address }}:{{ .Port }}{{ end }}
// api_key: f7e74ba6c80046e39e076a27af5a8444
// {% endraw -%}
# Quality definitions from the guide to sync to Sonarr. Choices: series, anime
quality_definition:
type: series
// # Quality definitions from the guide to sync to Sonarr. Choice: anime, series, hybrid
// quality_definition: series
quality_profiles:
- name: "HD - 720p/1080p"
reset_unmatched_scores:
enabled: true
upgrade:
allowed: true
until_quality: WEB-1080p
qualities:
- name: Bluray-2160p Remux
enabled: false
- name: Bluray-2160p
enabled: false
- name: WEB-2160p
enabled: false
qualities:
- WEBRip-2160p
- WEBDL-2160p
- name: HDTV-2160p
enabled: false
- name: Bluray-1080p Remux
enabled: false
- name: Bluray-1080p
- name: WEB-1080p
qualities:
- WEBRip-1080p
- WEBDL-1080p
- name: HDTV-1080p
- name: Bluray-720p
enabled: false
- name: WEB-720
qualities:
- WEBRip-720p
- WEBDL-720p
- name: HDTV-720p
custom_formats:
- trash_ids:
- 85c61753df5da1fb2aab6f2a47426b09 # BR-DISK
- 9c11cd3f07101cdba90a2d81cf0e56b4 # LQ
- e2315f990da2e2cbfc9fa5b7a6fcfe48 # LQ Release Title
# - 47435ece6b99a0b477caf360e79ba0bb # X265
- fbcb31d8dabd2a319072b84fc0b7249c # Extras
- 32b367365729d530ca1c124a0b180c64 # Bad dual lingual groups
- 82d40da2bc6923f41e14394075dd4b03 # No-RlsGroup
quality_profiles:
- name: "HD - 720p/1080p"
score: -1000
- trash_ids:
- ec8fa7296b64e8cd390a1600981f3923 # Repack
quality_profiles:
- name: "HD - 720p/1080p"
score: 5
- trash_ids:
- eb3d5cc0a2be0db205fb823640db6a3c # Repack2
quality_profiles:
- name: "HD - 720p/1080p"
score: 6
- trash_ids:
- 44e7c4de10ae50265753082e5dc76047 # Repack3
quality_profiles:
- name: "HD - 720p/1080p"
score: 7
- trash_ids: # Streaming services, Low Tier
- bbcaf03147de0f73be2be4a9078dfa03 # 40D
- fcc09418f67ccaddcf3b641a22c5cfd7 # ALL4
- 77a7b25585c18af08f60b1547bb9b4fb # CC
- f27d46a831e6b16fa3fee2c4e5d10984 # CANALPlus
- 4e9a630db98d5391aec1368a0256e2fe # CRAV
- 36b72f59f4ea20aad9316f475f2d9fbb # DCU
- 7be9c0572d8cd4f81785dacf7e85985e # FOD
- 7a235133c87f7da4c8cccceca7e3c7a6 # HBO
- f6cce30f1733d5c8194222a7507909bb # HULU
- dc503e2425126fa1d0a9ad6168c83b3f # IP
- 0ac24a2a68a9700bcb7eeca8e5cd644c # iT
- b2b980877494b560443631eb1f473867 # NLZ
- fb1a91cdc0f26f7ca0696e0e95274645 # OViD
- c30d2958827d1867c73318a5a2957eb1 # Red
- ae58039e1319178e6be73caab5c42166 # Sho
- d100ea972d1af2150b65b1cffb80f6b5 # TVer
- 0e99e7cc719a8a73b2668c3a0c3fe10c # U-next
- 5d2317d99af813b6529c7ebf01c83533 # VDL
quality_profiles:
- name: "HD - 720p/1080p"
score: 50
- trash_ids: # Streaming services, second tier
- d660701077794679fd59e8bdf4ce3a29 # AMZN
- a880d6abc21e7c16884f3ae393f84179 # HMAX
- d34870697c9db575f17700212167be23 # NF
- 1656adc6d7bb2c8cca6acfb6592db421 # PCOK
- c67a75ae4a1715f2bb4d492755ba4195 # PMTP
- 3ac5d84fce98bab1b531393e9c82f467 # QIBI
- 1efe8da11bfd74fbbcd4d8117ddb9213 # STAN
quality_profiles:
- name: "HD - 720p/1080p"
score: 80
- trash_ids: # Streaming services, Top tier
- f67c9ca88f463a48346062e8ad07713f # ATVP
- 89358767a60cc28783cdc3d0be9388a4 # DSNP
- 81d1fbf600e2540cee87f3a23f9d3c1c # MAX
quality_profiles:
- name: "HD - 720p/1080p"
score: 100
- trash_ids: # HQ Source Groups: Tier 1
- e6258996055b9fbab7e9cb2f75819294
quality_profiles:
- name: "HD - 720p/1080p"
score: 1700
- trash_ids: # HQ Source Groups: Tier 2
- 58790d4e2fdcd9733aa7ae68ba2bb503
quality_profiles:
- name: "HD - 720p/1080p"
score: 1650
- trash_ids: # HQ Source Groups: Tier 3
- d84935abd3f8556dcd51d4f27e22d0a6
quality_profiles:
- name: "HD - 720p/1080p"
score: 1600
// # Release profiles from the guide to sync to Sonarr.
// # You can optionally add tags and make negative scores strictly ignored
// release_profiles:
// # Series
// - trash_ids:
// - EBC725268D687D588A20CBC5F97E538B # Low Quality Groups
// - 1B018E0C53EC825085DD911102E2CA36 # Release Sources (Streaming Service)
// - 71899E6C303A07AF0E4746EFF9873532 # P2P Groups + Repack/Proper
// # Anime (Uncomment below if you want it)
// # - trash_ids:
// # - d428eda85af1df8904b4bbe4fc2f537c # Anime - First release profile
// # - 6cd9e10bb5bb4c63d2d7cd3279924c7b # Anime - Second release profile
# Configuration specific to Radarr.
radarr:
movies:
base_url: https://radarr.{{ homelab_domain_name }}/
api_key: "{{ radarr_api_key }}"
delete_old_custom_formats: true
replace_existing_custom_formats: true
// # Configuration specific to Radarr.
// radarr:
// # Set the URL/API Key to your actual instance
// {% raw -%}
// - base_url: http://{{ range service "radarr" }}{{ .Address }}:{{ .Port }}{{ end }}
// api_key: f7e74ba6c80046e39e076a27af5a8444
// {% endraw -%}
# Which quality definition in the guide to sync to Radarr. Only choice right now is 'movie'
quality_definition:
type: movie
preferred_ratio: 0.5
// # Which quality definition in the guide to sync to Radarr. Only choice right now is 'movie'
// quality_definition:
// type: movie
quality_profiles:
- name: "720p/1080p"
reset_unmatched_scores:
enabled: true
- name: "720p/1080p Remux"
reset_unmatched_scores:
enabled: true
// # Set to 'true' to automatically remove custom formats from Radarr when they are removed from
// # the guide or your configuration. This will NEVER delete custom formats you manually created!
// delete_old_custom_formats: false
custom_formats:
# Use `recyclarr list custom-formats radarr` for values you can put here.
# https://trash-guides.info/Radarr/Radarr-collection-of-custom-formats/
// custom_formats:
// # A list of custom formats to sync to Radarr. Must match the "trash_id" in the guide JSON.
// - trash_ids:
// - ed38b889b31be83fda192888e2286d83 # BR-DISK
// - 90cedc1fea7ea5d11298bebd3d1d3223 # EVO (no WEBDL)
// - 90a6f9a284dff5103f6346090e6280c8 # LQ
// - dc98083864ea246d05a42df0d05f81cc # x265 (720/1080p)
// - b8cd450cbfa689c0259a01d9e29ba3d6 # 3D
- trash_ids:
# Movie versions
- eca37840c13c6ef2dd0262b141a5482f # 4K Remaster
- 570bc9ebecd92723d2d21500f4be314c # Remaster
- 0f12c086e289cf966fa5948eac571f44 # Hybrid
- 9d27d9d2181838f76dee150882bdc58c # Masters of Cinema
- e0c07d59beb37348e975a930d5e50319 # Criterion Collection
- 957d0f44b592285f26449575e8b1167e # Special Edition
- eecf3a857724171f968a66cb5719e152 # IMAX
- 9f6cbff8cfe4ebbc1bde14c7b7bec0de # IMAX Enhanced
# Unwanted
- b8cd450cbfa689c0259a01d9e29ba3d6 # 3D
- ed38b889b31be83fda192888e2286d83 # BR-DISK
- 90a6f9a284dff5103f6346090e6280c8 # LQ
- bfd8eb01832d646a0a89c4deb46f8564 # Upscaled
- 90cedc1fea7ea5d11298bebd3d1d3223 # EVO (no WEBDL)
- 923b6abef9b17f937fab56cfcf89e1f1 # DV (WEBDL)
- b6832f586342ef70d9c128d40c07b872 # Bad Dual Groups
- ae9b7c9ebde1f3bd336a8cbd1ec4c5e5 # No-RlsGroup
- 7357cf5161efbf8c4d5d0c30b4815ee2 # Obfuscated
- 5c44f52a8714fdd79bb4d98e2673be1f # Retags
- c465ccc73923871b3eb1802042331306 # Line/Mic Dubbed
# Misc
- e7718d7a3ce595f289bfee26adc178f5 # Repack/Proper
- ae43b294509409a6a13919dedd4764c4 # Repack2
# HQ Release Groups
- ed27ebfef2f323e964fb1f61391bcb35 # HD Bluray Tier 01
- c20c8647f2746a1f4c4262b0fbbeeeae # HD Bluray Tier 02
- c20f169ef63c5f40c2def54abaf4438e # WEB Tier 01
- 403816d65392c79236dcb6dd591aeda4 # WEB Tier 02
- af94e0fe497124d1f9ce732069ec8c3b # WEB Tier 03
quality_profiles:
- name: "720p/1080p"
- name: "720p/1080p Remux"
// # Uncomment the below properties to specify one or more quality profiles that should be
// # updated with scores from the guide for each custom format. Without this, custom formats
// # are synced to Radarr but no scores are set in any quality profiles.
// # quality_profiles:
// # - name: Quality Profile 1
// # - name: Quality Profile 2
// # #score: -9999 # Optional score to assign to all CFs. Overrides scores in the guide.
// # #reset_unmatched_scores: true # Optionally set other scores to 0 if they are not listed in 'names' above.
// EOH
// }
# HDR FORMATS
# ########################
- trash_ids:
- 3a3ff47579026e76d6504ebea39390de # Remux Tier 01
- 9f98181fe5a3fbeb0cc29340da2a468a # Remux Tier 02
- e61e28db95d22bedcadf030b8f156d96 # HDR
- 2a4d9069cc1fe3242ff9bdaebed239bb # HDR (undefined)
quality_profiles:
- name: "720p/1080p"
score: -100
- name: "720p/1080p Remux"
# AUDIO FORMATS
# ########################
- trash_ids:
- 6fd7b090c3f7317502ab3b63cc7f51e3 # 6.1 Surround
- e77382bcfeba57cb83744c9c5449b401 # 7.1 Surround
- f2aacebe2c932337fe352fa6e42c1611 # 9.1 Surround
quality_profiles:
- name: "720p/1080p"
score: -50
- name: "720p/1080p Remux"
score: -50
- trash_ids:
- 89dac1be53d5268a7e10a19d3c896826 # 2.0 Stereo
quality_profiles:
- name: "720p/1080p"
score: 120
- trash_ids:
- 77ff61788dfe1097194fd8743d7b4524 # 5.1 Surround
quality_profiles:
- name: "720p/1080p"
score: 80
- name: "720p/1080p Remux"
score: 80
EOH
}
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
resources {
cpu = 100 # MHz
memory = 300 # MB
} // resources
} // task

View File

@@ -0,0 +1,27 @@
job "remove_nzbs" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "batch"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "rpi"
}
periodic {
cron = "*/15 * * * * *"
prohibit_overlap = true
time_zone = "America/New_York"
}
task "remove_nzbs" {
driver = "raw_exec"
config {
command = "/home/pi/.pyenv/shims/python"
args = ["/home/pi/repos/bin/bin-sabnzbd/removeNZBs.py"]
}
} // /task do_backups
} //job

View File

@@ -1,496 +1,529 @@
job "reverse-proxy" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
value = "rpi1"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "authelia-group" {
group "reverse-proxy-group" {
restart {
attempts = 0
delay = "30s"
}
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "rpi"
}
network {
port "authelia-port" {
static = {{ authelia_port }}
to = 9091
}
port "whoami" {
to = 80
}
port "dashboard" {
static = 8080
to = 8080
}
port "web" {
static = 80
to = 80
}
port "websecure" {
static = 443
to = 443
}
port "externalwebsecure" {
static = 4430
to = 4430
}
}
restart {
attempts = 0
delay = "30s"
}
task "authelia" {
network {
port "authelia-port" {
to = 9091
}
}
env {
TZ = "America/New_York"
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
}
task "authelia" {
driver = "docker"
config {
image = "authelia/authelia:{{ authelia_version }}"
hostname = "authelia"
ports = ["authelia-port"]
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/authelia:/config" ]
args = [
"--config",
"/local/authelia/config.yml"
]
} // docker config
env {
TZ = "America/New_York"
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
}
template {
destination = "local/authelia/users.yml"
env = false
change_mode = "restart"
perms = "644"
data = <<-EOH
---
###############################################################
# Users Database #
###############################################################
driver = "docker"
config {
image = "authelia/authelia:{{ authelia_version }}"
hostname = "authelia"
ports = ["authelia-port"]
image_pull_timeout = "10m"
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/authelia:/config" ]
args = [
"--config",
"/local/authelia/config.yml"
]
} // docker config
# This file can be used if you do not have an LDAP set up.
users:
{{ authelia_user1_name }}:
displayname: "{{ authelia_user1_name }}"
password: "$argon2id$v=19$m=65536,t=1,p={{ authelia_user1_password }}"
email: {{ authelia_user1_email }}
groups:
- admins
- dev
EOH
}
template {
destination = "local/authelia/users.yml"
env = false
change_mode = "restart"
perms = "644"
data = <<-EOH
---
###############################################################
# Users Database #
###############################################################
template {
destination = "local/authelia/config.yml"
env = false
change_mode = "restart"
perms = "644"
data = <<-EOH
---
## The theme to display: light, dark, grey, auto.
theme: auto
# This file can be used if you do not have an LDAP set up.
users:
{{ authelia_user1_name }}:
displayname: "{{ authelia_user1_name }}"
password: "$argon2id$v=19$m=65536,t=1,p={{ authelia_user1_password }}"
email: {{ authelia_user1_email }}
groups:
- admins
- dev
EOH
}
jwt_secret: {{ authelia_jwt_secret}}
default_redirection_url: https://authelia.{{ homelab_domain_name}}
template {
destination = "local/authelia/config.yml"
env = false
change_mode = "restart"
perms = "644"
data = <<-EOH
---
## The theme to display: light, dark, grey, auto.
theme: auto
server:
host: 0.0.0.0
port: 9091
path: ""
read_buffer_size: 4096
write_buffer_size: 4096
enable_pprof: false
enable_expvars: false
disable_healthcheck: false
jwt_secret: {{ authelia_jwt_secret}}
default_redirection_url: https://authelia.{{ homelab_domain_name}}
log:
level: info
format: text
# file_path: "/config/log.txt"
keep_stdout: false
server:
host: 0.0.0.0
port: 9091
path: ""
buffers:
read: 4096
write: 4096
timeouts:
read: 15s
write: 15s
idle: 30s
enable_pprof: false
enable_expvars: false
disable_healthcheck: false
totp:
issuer: authelia.com
log:
level: info
format: text
# file_path: "/config/log.txt"
keep_stdout: false
authentication_backend:
disable_reset_password: false
file:
path: /local/authelia/users.yml
password:
algorithm: argon2id
iterations: 1
salt_length: 16
parallelism: 8
memory: 64
totp:
issuer: authelia.com
access_control:
default_policy: deny
networks:
- name: internal
networks:
- 10.0.0.0/8
#- 172.16.0.0/12
#- 192.168.0.0/18
rules:
# Rules applied to everyone
- domain: "*.{{ homelab_domain_name }}"
policy: two_factor
authentication_backend:
password_reset:
disable: false
file:
path: /local/authelia/users.yml
password:
algorithm: argon2id
iterations: 1
salt_length: 16
parallelism: 8
memory: 64
access_control:
default_policy: deny
networks:
- internal
- name: internal
networks:
- 10.0.0.0/8
#- 172.16.0.0/12
#- 192.168.0.0/18
rules:
# Rules applied to everyone
- domain: "*.{{ homelab_domain_name }}"
policy: two_factor
networks:
- internal
session:
name: authelia_session
domain: {{ homelab_domain_name }}
same_site: lax
secret: {{ authelia_session_secret }}
expiration: 1h
inactivity: 15m
remember_me_duration: 1w
session:
name: authelia_session
domain: {{ homelab_domain_name }}
same_site: lax
secret: {{ authelia_session_secret }}
expiration: 1h
inactivity: 15m
remember_me_duration: 1w
regulation:
max_retries: 5
find_time: 10m
ban_time: 15m
regulation:
max_retries: 5
find_time: 10m
ban_time: 15m
storage:
encryption_key: {{ authelia_sqlite_encryption_key}}
local:
path: /config/db.sqlite3
storage:
encryption_key: {{ authelia_sqlite_encryption_key}}
local:
path: /config/db.sqlite3
notifier:
smtp:
username: {{ email_smtp_account }}
password: {{ authelia_smtp_password }}
host: {{ email_smtp_host }}
port: {{ email_smtp_port }}
sender: "Authelia <{{ my_email_address }}>"
subject: "[Authelia] {title}"
startup_check_address: {{ my_email_address }}
notifier:
smtp:
username: {{ email_smtp_account }}
password: {{ authelia_smtp_password }}
host: {{ email_smtp_host }}
port: {{ email_smtp_port }}
sender: "Authelia <{{ my_email_address }}>"
subject: "[Authelia] {title}"
startup_check_address: {{ my_email_address }}
ntp:
address: "time.cloudflare.com:123"
version: 3
max_desync: 3s
disable_startup_check: true
disable_failure: true
EOH
}
ntp:
address: "time.cloudflare.com:123"
version: 3
max_desync: 3s
disable_startup_check: true
disable_failure: true
EOH
}
service {
port = "authelia-port"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`authelia.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Cache-Control=no-store",
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Pragma=no-cache",
"traefik.http.routers.authelia.middlewares=authelia-headers"
service {
port = "authelia-port"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`authelia.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Cache-Control=no-store",
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Pragma=no-cache",
"traefik.http.routers.authelia.middlewares=authelia-headers"
]
check {
type = "tcp"
port = "authelia-port"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
resources {
cpu = 200 # MHz
memory = 1000 # MB
}
} // task authelia
} // authelia-group
group "reverse-proxy-group" {
constraint {
attribute = "${node.unique.name}"
value = "rpi1"
}
restart {
attempts = 0
delay = "30s"
}
network {
port "whoami" {
to = 80
}
port "dashboard" {
static = 8080
to = 8080
}
port "web" {
static = 80
to = 80
}
port "websecure" {
static = 443
to = 443
}
port "externalwebsecure" {
static = 4430
to = 4430
}
port "ssh" { # Used for gitea
static = 2222
to = 2222
}
}
task "whoami" {
driver = "docker"
config {
image = "containous/whoami:latest"
hostname = "${NOMAD_TASK_NAME}"
image_pull_timeout = "10m"
ports = ["whoami"]
} // /docker config
service {
port = "whoami"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
]
check {
type = "http"
path = "/"
interval = "90s"
timeout = "15s"
}
check_restart {
limit = 2
grace = "1m"
}
}
resources {
cpu = 25 # MHz
memory = 10 # MB
}
} // /task whoami
task "traefik" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
CF_API_EMAIL = "{{ my_email_address }}"
CF_DNS_API_TOKEN = "{{ traefik_cf_api_token }}"
}
driver = "docker"
config {
image = "traefik:v{{ traefik_version }}"
hostname = "traefik"
ports = ["dashboard", "web", "websecure","externalwebsecure", "ssh"]
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/traefik/acme:/acme" ]
image_pull_timeout = "10m"
args = [
"--global.sendAnonymousUsage=false",
"--global.checkNewVersion=false",
"--entryPoints.gitea-ssh.address=:2222",
"--entryPoints.web.address=:80",
"--entryPoints.websecure.address=:443",
"--entryPoints.externalwebsecure.address=:4430",
"--entrypoints.web.http.redirections.entryPoint.to=websecure",
"--entrypoints.web.http.redirections.entryPoint.scheme=https",
"--entrypoints.web.http.redirections.entryPoint.permanent=true",
"--providers.file.filename=/local/traefik/siteconfigs.toml",
"--providers.file.watch=true",
"--providers.consulcatalog=true",
"--providers.consulcatalog.endpoint.address=http://${NOMAD_IP_web}:8500",
"--providers.consulcatalog.prefix=traefik",
"--providers.consulcatalog.exposedbydefault=false",
"--providers.nomad=true",
"--providers.nomad.endpoint.address=http://${NOMAD_IP_web}:4646",
// "--metrics=true",
// "--metrics.influxdb=true",
// "--metrics.influxdb.address=influxdb.service.consul:{{ influxdb_port }}",
// "--metrics.influxdb.protocol=http",
// "--metrics.influxdb.pushinterval=10s",
// "--metrics.influxdb.database=homelab",
// "--metrics.influxdb.retentionpolicy=2day",
// "--metrics.influxdb.addentrypointslabels=true",
// "--metrics.influxdb.addserviceslabels=true",
"--accesslog=true",
"--log=true",
"--log.level=ERROR",
"--api=true",
"--api.dashboard=true",
"--api.insecure=true",
"--certificatesresolvers.cloudflare.acme.email={{ my_email_address }}",
"--certificatesresolvers.cloudflare.acme.storage=/acme/acme-${node.unique.name}.json",
"--certificatesresolvers.cloudflare.acme.dnschallenge=true",
"--certificatesresolvers.cloudflare.acme.dnschallenge.provider=cloudflare",
"--certificatesresolvers.cloudflare.acme.dnschallenge.delaybeforecheck=10",
"--certificatesresolvers.cloudflare.acme.dnschallenge.resolvers=1.1.1.1:53,8.8.8.8:53"
]
} // docker config
template {
destination = "local/traefik/httpasswd"
env = false
change_mode = "noop"
data = <<-EOH
{{ my_username }}:{{ traefik_http_pass_me }}
family:{{ traefik_http_pass_family }}
EOH
}
template {
destination = "local/traefik/httpasswdFamily"
env = false
change_mode = "noop"
data = <<-EOH
{{ my_username }}:{{ traefik_http_pass_me }}
family:{{ traefik_http_pass_family }}
EOH
}
template {
destination = "local/traefik/siteconfigs.toml"
env = false
change_mode = "noop"
data = <<-EOH
[http]
[http.middlewares]
[http.middlewares.compress.compress]
[http.middlewares.localIPOnly.ipWhiteList]
sourceRange = ["10.0.0.0/8"]
[http.middlewares.redirectScheme.redirectScheme]
scheme = "https"
permanent = true
[http.middlewares.authelia.forwardAuth]
address = {% raw %}"http://{{ range nomadService "authelia" }}{{ .Address }}:{{ .Port }}{{ end }}{% endraw %}/api/verify?rd=https://authelia.{{ homelab_domain_name }}"
trustForwardHeader = true
authResponseHeaders = ["Remote-User", "Remote-Groups", "Remote-Name", "Remote-Email"]
[http.middlewares.basicauth.basicauth]
usersfile = "/local/traefik/httpasswd"
removeHeader = true
[http.middlewares.basicauth-family.basicauth]
usersfile = "/local/traefik/httpasswdFamily"
removeHeader = true
[http.middlewares.allowFrame.headers]
customFrameOptionsValue = "allow-from https://home.{{ homelab_domain_name }}"
[http.routers]
[http.routers.consul]
rule = "Host(`consul.{{ homelab_domain_name }}`)"
service = "consul"
entrypoints = ["web","websecure"]
[http.routers.consul.tls]
certResolver = "cloudflare" # From static configuration
[http.services]
[http.services.consul]
[http.services.consul.loadBalancer]
passHostHeader = true
[[http.services.consul.loadBalancer.servers]]
url = "http://consul.service.consul:8500"
EOH
}
service {
port = "dashboard"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file,redirectScheme@file"
]
check {
type = "tcp"
port = "authelia-port"
interval = "30s"
timeout = "4s"
}
check {
type = "tcp"
port = "dashboard"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
check_restart {
limit = 0
grace = "1m"
}
} // service
resources {
cpu = 200 # MHz
memory = 110 # MB
}
resources {
cpu = 140 # MHz
memory = 100 # MB
} // resources
} // task authelia
} // task traefik
task "whoami" {
driver = "docker"
config {
image = "containous/whoami:latest"
hostname = "${NOMAD_TASK_NAME}"
ports = ["whoami"]
// task "promtail-traefik" {
} // /docker config
// driver = "docker"
// config {
// image = "grafana/promtail"
// hostname = "promtail-traefik"
// volumes = [
// "/mnt/pi-cluster/logs:/traefik"
// ]
// args = [
// "-config.file",
// "/local/promtail-config.yaml",
// "-print-config-stderr",
// ]
// } // docker config
service {
port = "whoami"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
]
check {
type = "http"
path = "/"
interval = "90s"
timeout = "15s"
}
check_restart {
limit = 2
grace = "1m"
ignore_warnings = true
}
}
resources {
cpu = 25 # MHz
memory = 10 # MB
}
// template {
// destination = "local/promtail-config.yaml"
// env = false
// data = <<-EOH
// server:
// http_listen_port: 9080
// grpc_listen_port: 0
} // /task whoami
// positions:
// filename: /alloc/positions.yaml
task "traefik" {
// {% raw -%}
// clients:
// - url: http://{{ range nomadService "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
// {% endraw %}
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
CF_API_EMAIL = "{{ my_email_address }}"
CF_DNS_API_TOKEN = "{{ traefik_cf_api_token }}"
}
// scrape_configs:
// - job_name: traefik
// static_configs:
// - targets:
// - localhost
// labels:
// job: traefik_access
// {% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
// __path__: "/alloc/logs/traefik.std*.0"
// pipeline_stages:
// - regex:
// expression: '^(?P<remote_addr>[\w\.]+) - (?P<remote_user>[^ ]*) \[(?P<time_local>.*)\] "(?P<method>[^ ]*) (?P<request>[^ ]*) (?P<protocol>[^ ]*)" (?P<status>[\d]+) (?P<body_bytes_sent>[\d]+) "(?P<http_referer>[^"]*)" "(?P<http_user_agent>[^"]*)" (?P<request_number>[^ ]+) "(?P<router>[^ ]+)" "(?P<server_URL>[^ ]+)" (?P<response_time_ms>[^ ]+)ms$'
// - labels:
// method:
// status:
// router:
// response_time_ms:
driver = "docker"
config {
image = "traefik:{{ traefik_version }}"
hostname = "traefik"
ports = ["dashboard", "web", "websecure","externalwebsecure"]
volumes = [ "${meta.nfsStorageRoot}/pi-cluster/traefik/acme:/acme" ]
args = [
"--global.sendAnonymousUsage=false",
"--global.checkNewVersion=false",
"--entryPoints.web.address=:80",
"--entryPoints.websecure.address=:443",
"--entryPoints.externalwebsecure.address=:4430",
"--entrypoints.web.http.redirections.entryPoint.to=websecure",
"--entrypoints.web.http.redirections.entryPoint.scheme=https",
"--entrypoints.web.http.redirections.entryPoint.permanent=true",
"--providers.file.filename=/local/traefik/siteconfigs.toml",
"--providers.file.watch=true",
"--providers.consulcatalog=true",
"--providers.consulcatalog.endpoint.address=http://consul.service.consul:8500",
"--providers.consulcatalog.prefix=traefik",
"--providers.consulcatalog.exposedbydefault=false",
"--metrics=true",
"--metrics.influxdb=true",
"--metrics.influxdb.address=influxdb.service.consul:{{ influxdb_port }}",
"--metrics.influxdb.protocol=http",
"--metrics.influxdb.pushinterval=10s",
"--metrics.influxdb.database=homelab",
"--metrics.influxdb.retentionpolicy=2day",
"--metrics.influxdb.addentrypointslabels=true",
"--metrics.influxdb.addserviceslabels=true",
"--accesslog=true",
"--log=true",
"--log.level=ERROR",
"--api=true",
"--api.dashboard=true",
"--api.insecure=true",
"--certificatesresolvers.cloudflare.acme.email={{ my_email_address }}",
"--certificatesresolvers.cloudflare.acme.storage=/acme/acme-${node.unique.name}.json",
"--certificatesresolvers.cloudflare.acme.dnschallenge=true",
"--certificatesresolvers.cloudflare.acme.dnschallenge.provider=cloudflare",
"--certificatesresolvers.cloudflare.acme.dnschallenge.delaybeforecheck=10",
"--certificatesresolvers.cloudflare.acme.dnschallenge.resolvers=1.1.1.1:53,8.8.8.8:53"
]
} // docker config
// EOH
// } // template
template {
destination = "local/traefik/httpasswd"
env = false
change_mode = "noop"
data = <<-EOH
{{ my_username }}:{{ traefik_http_pass_me }}
family:{{ traefik_http_pass_family }}
EOH
}
// lifecycle {
// hook = "poststart"
// sidecar = true
// }
template {
destination = "local/traefik/httpasswdFamily"
env = false
change_mode = "noop"
data = <<-EOH
{{ my_username }}:{{ traefik_http_pass_me }}
family:{{ traefik_http_pass_family }}
EOH
}
// resources {
// cpu = 30 # MHz
// memory = 30 # MB
// } // resources
template {
destination = "local/traefik/siteconfigs.toml"
env = false
change_mode = "noop"
data = <<-EOH
[http]
[http.middlewares]
[http.middlewares.compress.compress]
// } // promtail sidecar task
[http.middlewares.localIPOnly.ipWhiteList]
sourceRange = ["10.0.0.0/8"]
[http.middlewares.redirectScheme.redirectScheme]
scheme = "https"
permanent = true
[http.middlewares.authelia.forwardAuth]
address = "http://authelia.service.consul:{{ authelia_port }}/api/verify?rd=https://authelia.{{ homelab_domain_name }}"
trustForwardHeader = true
authResponseHeaders = ["Remote-User", "Remote-Groups", "Remote-Name", "Remote-Email"]
[http.middlewares.basicauth.basicauth]
usersfile = "/local/traefik/httpasswd"
removeHeader = true
[http.middlewares.basicauth-family.basicauth]
usersfile = "/local/traefik/httpasswdFamily"
removeHeader = true
[http.middlewares.allowFrame.headers]
customFrameOptionsValue = "allow-from https://home.{{ homelab_domain_name }}"
[http.routers]
[http.routers.consul]
rule = "Host(`consul.{{ homelab_domain_name }}`)"
service = "consul"
entrypoints = ["web","websecure"]
[http.routers.consul.tls]
certResolver = "cloudflare" # From static configuration
[http.services]
[http.services.consul]
[http.services.consul.loadBalancer]
passHostHeader = true
[[http.services.consul.loadBalancer.servers]]
url = "http://consul.service.consul:8500"
EOH
}
service {
port = "dashboard"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file,redirectScheme@file"
]
check {
type = "tcp"
port = "dashboard"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
//cpu = 40 # MHz
memory = 64 # MB
} // resources
} // task traefik
// task "promtail-traefik" {
// driver = "docker"
// config {
// image = "grafana/promtail"
// hostname = "promtail-traefik"
// volumes = [
// "/mnt/pi-cluster/logs:/traefik"
// ]
// args = [
// "-config.file",
// "/local/promtail-config.yaml",
// "-print-config-stderr",
// ]
// } // docker config
// template {
// destination = "local/promtail-config.yaml"
// env = false
// data = <<-EOH
// server:
// http_listen_port: 9080
// grpc_listen_port: 0
// positions:
// filename: /alloc/positions.yaml
// {% raw -%}
// clients:
// - url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
// {% endraw %}
// scrape_configs:
// - job_name: traefik
// static_configs:
// - targets:
// - localhost
// labels:
// job: traefik_access
// {% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
// __path__: "/alloc/logs/traefik.std*.0"
// pipeline_stages:
// - regex:
// expression: '^(?P<remote_addr>[\w\.]+) - (?P<remote_user>[^ ]*) \[(?P<time_local>.*)\] "(?P<method>[^ ]*) (?P<request>[^ ]*) (?P<protocol>[^ ]*)" (?P<status>[\d]+) (?P<body_bytes_sent>[\d]+) "(?P<http_referer>[^"]*)" "(?P<http_user_agent>[^"]*)" (?P<request_number>[^ ]+) "(?P<router>[^ ]+)" "(?P<server_URL>[^ ]+)" (?P<response_time_ms>[^ ]+)ms$'
// - labels:
// method:
// status:
// router:
// response_time_ms:
// EOH
// } // template
// lifecycle {
// hook = "poststart"
// sidecar = true
// }
// resources {
// cpu = 30 # MHz
// memory = 30 # MB
// } // resources
// } // promtail sidecar task
} // reverse-proxy-group
} // reverse-proxy-group
}

View File

@@ -0,0 +1,101 @@
job "sabnzbd" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "macmini"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "sabnzbd" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "http" {
static = "8080"
to = "8080"
}
}
task "sabnzbd" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
DOCKER_MODS = "linuxserver/mods:universal-cron"
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/sabnzbd"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config",
"${meta.nfsStorageRoot}/media/downloads/nzb:/nzbd",
"${meta.nfsStorageRoot}/media/downloads/temp:/incomplete-downloads",
"${meta.nfsStorageRoot}/media/downloads/complete:/downloads",
"${meta.nfsStorageRoot}/nate:/nate",
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}/startup-scripts:/custom-cont-init.d"
]
ports = ["http"]
} // docker config
service {
port = "http"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`sab.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
// "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
]
check {
type = "tcp"
port = "http"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
resources {
cpu = 5000 # MHz
memory = 1000 # MB
} // resources
} // task
} // group
} // job

View File

@@ -82,6 +82,7 @@ job "sonarr" {
service {
port = "sonarr"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -100,7 +101,6 @@ job "sonarr" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -0,0 +1,97 @@
job "speedtest" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "macmini"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "speedtest" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
to = "80"
}
}
task "speedtest" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
DB_CONNECTION = "sqlite"
APP_KEY = "{{ speedtest_app_key }}"
}
driver = "docker"
config {
image = "lscr.io/linuxserver/speedtest-tracker:latest"
image_pull_timeout = "10m"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config"
]
ports = ["port1"]
} // docker config
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
resources {
cpu = 1000 # MHz
memory = 200 # MB
} // resources
} // task
} // group
} // job

View File

@@ -40,6 +40,7 @@ job "stash" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
STASH_STASH = "/data/"
STASH_GENERATED = "/generated/"
STASH_METADATA = "/metadata/"
@@ -58,6 +59,7 @@ job "stash" {
"${meta.nfsStorageRoot}/nate/.stash/generated:/generated",
"${meta.nfsStorageRoot}/nate/.stash/media:/data",
"${meta.nfsStorageRoot}/nate/.stash/metadata:/metadata",
"${meta.nfsStorageRoot}/nate/.stash/blobs:/blobs",
"/etc/timezone:/etc/timezone:ro"
]
ports = ["port1"]
@@ -66,6 +68,7 @@ job "stash" {
service {
port = "port1"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -73,7 +76,6 @@ job "stash" {
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
]
check {
@@ -85,12 +87,11 @@ job "stash" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 4500 # MHz
cpu = 3000 # MHz
memory = 400 # MB
} // resources

View File

@@ -70,6 +70,7 @@ job "syncthing" {
service {
port = "webGUI"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -89,7 +90,6 @@ job "syncthing" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -108,7 +108,7 @@ job "TEMPLATE" {
}
}
task "await-TEMPLATEdb" {
task "await-TEMPLATEEdb" {
driver = "docker"
config {
@@ -158,6 +158,7 @@ job "TEMPLATE" {
service {
name = "${NOMAD_TASK_NAME}"
port = "port2"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
@@ -178,7 +179,6 @@ job "TEMPLATE" {
check_restart {
limit = 3
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -9,87 +9,89 @@ job "TEMPLATE" {
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "TEMPLATE" {
count = 1
restart {
attempts = 0
delay = "30s"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
network {
port "port1" {
static = "80"
to = "80"
group "TEMPLATE" {
count = 1
restart {
attempts = 0
delay = "30s"
}
}
task "TEMPLATE" {
network {
port "port1" {
static = "80"
to = "80"
}
}
// env {
// PUID = "${meta.PUID}"
// PGID = "${meta.PGID}"
// }
task "TEMPLATE" {
driver = "docker"
config {
image = ""
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/",
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro"
]
ports = ["port1"]
} // docker config
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
]
driver = "docker"
config {
image = ""
image_pull_timeout = "10m"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/"
]
ports = ["port1"]
} // docker config
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
]
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
} // task
check_restart {
limit = 0
grace = "1m"
}
} // service
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task
} // group
} // group
} // job

View File

@@ -82,6 +82,7 @@ job "TEMPLATE" {
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -102,7 +103,6 @@ job "TEMPLATE" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -70,6 +70,7 @@ job "uptimekuma" {
service {
port = "web"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`uptime.{{ homelab_domain_name }}`)",
@@ -88,7 +89,6 @@ job "uptimekuma" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -0,0 +1,154 @@
job "valentina" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "valentina" {
count = 1
restart {
attempts = 0
delay = "30s"
}
task "valentina" {
env {
PGID = "${meta.PGID}"
PUID = "${meta.PUID}"
TZ = "America/New_York"
VALENTINA_AWS_ACCESS_KEY_ID = "{{ valentina_aws_access_key_id }}"
VALENTINA_AWS_SECRET_ACCESS_KEY = "{{ valentina_aws_secret_access_key }}"
VALENTINA_DISCORD_TOKEN = "{{ valentina_discord_token }}"
VALENTINA_GUILDS = "{{ valentina_guids }}"
VALENTINA_LOG_LEVEL = "INFO"
VALENTINA_LOG_LEVEL_AWS = "INFO"
VALENTINA_LOG_LEVEL_HTTP = "ERROR"
VALENTINA_MONGO_DATABASE_NAME = "{{ valentina_mongo_database_name }}"
VALENTINA_MONGO_URI = "{{ valentina_mongo_uri }}"
VALENTINA_OWNER_CHANNELS = "{{ valentina_owner_channels }}"
VALENTINA_OWNER_IDS = "{{ valentina_owner_ids }}"
VALENTINA_S3_BUCKET_NAME = "{{ valentina_s3_bucket_name}}"
VALENTINA_GITHUB_TOKEN = "{{ valentina_github_token }}"
VALENTINA_GITHUB_REPO = "{{ valentina_github_repo }}"
}
driver = "docker"
config {
image = "ghcr.io/natelandau/valentina:v{{ valentina_version }}"
image_pull_timeout = "10m"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/valentina",
]
} // docker config
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task
} // group
group "mongobackup" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
to = "80"
}
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
task "mongobackup" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
AWS_ACCESS_KEY_ID = "{{ valentina_aws_access_key_id }}"
AWS_S3_BUCKET_NAME = "{{ valentina_s3_bucket_name }}"
AWS_S3_BUCKET_PATH = "db_backups"
AWS_SECRET_ACCESS_KEY = "{{ valentina_aws_secret_access_key }}"
BACKUP_DIR = "/data/db_backups"
CRON_SCHEDULE = "0 2 * * *" # 2am daily
// CRON_SCHEDULE = "*/1 * * * *" # Every minute
DAILY_RETENTION = "7"
DB_NAME = "{{ backup_mongo_db_name }}"
LOG_FILE = "/data/backup_mongodb.log"
LOG_LEVEL = "INFO"
MONGODB_URI = "{{ backup_mongo_mongodb_uri }}"
MONTHLY_RETENTION = "12"
PORT = "80"
STORAGE_LOCATION = "BOTH"
WEEKLY_RETENTION = "4"
YEARLY_RETENTION = "2"
}
driver = "docker"
config {
image = "ghcr.io/natelandau/backup-mongodb:v{{ backup_mongodb_version }}"
image_pull_timeout = "10m"
hostname = "${NOMAD_TASK_NAME}"
ports = ["port1"]
volumes = ["${meta.nfsStorageRoot}/pi-cluster/valentina:/data"]
} // docker config
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
]
check {
type = "tcp"
port = "port1"
interval = "1m"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
}
} // service
} // task
} // group
} // job

View File

@@ -58,6 +58,7 @@ job "whoogle" {
service {
port = "whoogle"
name = "${NOMAD_JOB_NAME}"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
@@ -77,7 +78,6 @@ job "whoogle" {
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service

View File

@@ -28,7 +28,7 @@ job "wikijs" {
}
}
task "await_db_filesytem" {
task "await_db_filesystem" {
constraint {
attribute = "${node.unique.name}"
@@ -56,7 +56,7 @@ job "wikijs" {
}
} // /task
task "await_backup_filesytem" {
task "await_backup_filesystem" {
constraint {
attribute = "${node.unique.name}"
@@ -122,6 +122,7 @@ job "wikijs" {
service {
port = "db"
name = "wikijsdb"
provider = "nomad"
check {
type = "tcp"
port = "db"
@@ -131,7 +132,6 @@ job "wikijs" {
check_restart {
limit = 2
grace = "1m"
ignore_warnings = true
}
}
@@ -180,7 +180,7 @@ group "wikijs_app_group" {
}
} // /task
task "await_filesytem" {
task "await_filesystem" {
driver = "docker"
config {
@@ -225,6 +225,7 @@ group "wikijs_app_group" {
service {
port = "http"
name = "wikijs"
provider = "nomad"
tags = [
"traefik.enable=true",
"traefik.http.routers.wikijs.rule=Host(`wiki.{{ homelab_domain_name }}`)",
@@ -241,7 +242,6 @@ group "wikijs_app_group" {
check_restart {
limit = 3
grace = "30s"
ignore_warnings = true
}
} // /service

View File

@@ -20,6 +20,7 @@ _mainScript_() {
readarr
sonarr
uptimekuma
gitea
)
fi
@@ -131,7 +132,7 @@ _mainScript_() {
BACKUP_RETENTION_HOURLY=2
BACKUP_RETENTION_DAILY=6
BACKUP_RETENTION_WEEKLY=3
BACKUP_RETENTION_MONTHLY=2
BACKUP_RETENTION_MONTHLY=4
;;
esac

338
vault.yml
View File

@@ -1,118 +1,222 @@
$ANSIBLE_VAULT;1.1;AES256
35366634393265303030366466303232616338633038313738633637383439356439616536666230
6566643530623337323034306366613935663334313934310a636235653531316237393231376362
33663935366131663137363465666363336630386362313333633762656461636239366234633832
3538353463356335360a323030643238323034343666376230356465396639636563316532373638
35366637366663303164376661636563346330313932343462326239626264633262303739383831
31333134613534643265643433323065303833326662346466633931373337326233303633363032
66333336373865313333626566386665653833343638376264356430383764316134333231366466
62336534666565343839393237356139393738396333393337666631303461373362343664396665
63343161613462653866616566363631346566636639626138316539353362616261666337386635
62356262363564376334336163613035643336656331653562306433363161393435343431663137
66663936623834666364303333386335353961373031383164623766323836383462363231396263
34343662336637633262333530623039376534643966653839346236363166646564613333633366
33363534616466393137366234633030663036613263383733313235353364613864316139356330
38343439346661613136316235326430326437646135636637343665663031393262653661396331
39653739666364666564633364636231323237366265323631333234306631386362666135336461
35646564643631666663336237636435626338346663633038353964303764626236373561323763
34643565656462323764623263383037663735323364396437653332376137356263633963306332
39633339366236313063643665356366346138616434316332643731666634366336623064653361
63393134643630313632396434643131646464323737343133613364333465393834656236616134
30313961346236326563616263373463616432393962663262616232356663636439643731383930
32326664306563623665633164373932356163356361663465363362303661396662386630323137
32383333656435613762393430303163383135393037363763333139633239666639303538623134
32386635663962363939373365613138316435366433303863326561613463306338396136393965
34333961383035383135333561313331336565383031356133626530306163333666333564353262
38646434643234303363383965636339323633326330663736393461383461303661353365663631
33343831356135653139633463336330646634363639326635653863343632663466336639313962
65306438613933386664336138613066326364343738633531356664343664646464396162343861
36663030643762343938633564373531663430303536643665613532313630636461646235666335
62613634656232373936363439363766316561373937386261613861396566303834376134666564
35396330636166316239336433323939363839636361643630353263663233303166313863636364
34363134363161643234643134663361373237316466626363646264643530343064666464393166
66366561356336616663393064376162643731343532663436646432366331643066396232393432
63336633313963383132333639626130623737346137646561303338623136306361656630396364
36306234643161643864313334316634396233313831613830393865353763653963656632363865
34346439356166363839343063313263396437366163343734326162346166353465313163313236
35343531333438303561393137323831303063353466666463303835653630353630393836393236
32643035636335363137303134333735343964646130306339663137646261366635353632613533
35303636373465633831353439376464386132616238613336366134383037376165396365353436
66633937656162346661326136343266313937393436353532656634366535653762633930393239
62383862356165336435616666346238646666613066323262323530356534373262633861646466
66643935363334623264373338663362623439313138666338363732386666383739636162653763
32666439316632653633363266343365393366373834323065353335613563306135383432613433
61633835326565386662313265356536613237313364366163313562393836613061616432316638
33376531663533376435383437393539663565616439666438646232663732663063343666646631
36366364353339323262666630363932616461323833306666616365343530646536326363613232
65313031333064396662363736316137656161393865383135366539636432386539623837353634
62313638666564396462666334616365323932396236633932613362633166346265613161363863
39396464663966353565393662363633366237323066306436616437363666666635343265666435
32363162643761666639336464383430366565323862353161303338333232326335303462653938
61333162306132633637653736623033373164343463333933666438326534303730613862383035
39393939323561333738653465306165643461366534313537633162313638393630393361623432
34306264316565333334303633323836343162373738636161656565313134643262343533666434
37623962626263353062333939633662316663316238636331646230313861363364326636653365
37343761626437663832346266333634666361333361313638626639633934646335613062626365
31656132643034303032623365613530306436383437633761636238336139373739313836393336
66653066633962333730643034653032626530663731633462393937326236656362356236666333
62343139646139303433393163613037623963633230366236396434643163316664616435386436
61616237366130663662643162613730303033376334333066393432333032613830316262333763
31663936663239653361633634323736306363323864666635633465376363323838326366663630
39383463343038666564653663616161336266313563633731623335373732343732383164623431
39333262346539373937386531386466373863323232653265383064643863303638326566313765
30376565643462643439316431306331346438633331616437613762323138363061336630353661
61653139333962373261323063386231346266323762306433613363643230366265366239623832
38353562393064633537373761643539313234333136333530343536393033313131393932633637
63383066623930376561656436396564353264643630636332653862613933636630333633396130
33346638663033636436626631323330336430313738313465323737386434613538346564633938
62653236643033313062336664323335656132383430313831636334326430383938653762313835
65656430656363653735343738326534616335636130646539363066393436383961316135346262
30656131353733616562613239663965383864313263653063393635623838633538303433323437
61636162363663306166343464333534316131346231303663336365303363656635363066353131
38623263366136396466383538323637626236633163663033613934303766613931313135613132
39346237656635303166363031353866663833303537666330346130353563373763623530373464
65666338313039633732626564393161663335613264306637646332643133356135613366353264
62376466613037326463623131373937303039356138336337333163303636303335333736376563
63313730623664356165653861303139313039616231326136313634623136613365313466373561
35313466313539383838373838386131653638653430613863313430626139353465626636386539
37383034326561393666383566326364623337376432633864613630353662663665336232313064
31396531313937373336383438646234343661643534316332633163653532633565613136343235
31653739633035303430626364303961626433323835653638653839396231663662636666393563
30346162326561636365393831333435333362356432646161633463313963346537643631393736
66343966313361333934616534313037636238613830623938623563393230376635316631636534
62376136386137313737646135376463343831663162616566373764643930386539306231613964
65383633313037323234616364623139623834303466306666613334346131633531643932623362
31623337636434623531376437623033306235346366376436396336346634303138613262373464
30363735633663363364613139666164383436306666363362346633346663346366393634333335
61326234303734626465616530346339303536636665626436623237383434636362393034346562
64633765363531346138376166393030666433396339333662663036313031306434626236383664
63636633316138653433366337303033636330333761626162373435633062366639396362376537
30313436353964613838323332353137383433323265343831356334393238666438323735313630
66313534646636633866313533353533643531356266643433353137653130386165353936616438
31353331383461313130383035663837646439323366623935386236663262653165313432326639
65316339356661623436386537353335343332616362323463613966383736306638396630653437
62376232353763336365613438383936646265623261306338613663343864363839313663343030
37396537626435303036613531396239353439663930363263373632333536376364336436383961
31613237393430326663366531633633613362373265646437303530656564383830366164643465
31333162373037323836396234383265333832376461383530383139353562666635386661383262
31323162363834376432313766393965373763313664383966346464313865343261333030653033
37326564663836323963663735353432653938373632356564653830616562656333383563366432
32366464316265613565613830633264613634313134373530386562313163656434356164356139
62643065613638323735366332316366383236653762373436393631363039636333346431666137
33643239323062343537353061646138346661643262303363326137356461356439663166653739
39636534653935376433633761373630656566393535373962353762646165663566613235646134
32313838383538363532643965376464626361663431393165663238373762636337366434666437
32636234613264666633366663616639386236386333623766383735383431323964343965643362
39326266366162333266343133646335653837393962633731613230653665366462393931613462
33633966626132633832653634626633393238643238393064646233663064346333653164623336
34373634376335383639346338663830653061386161306134336530376161333637333733666533
63626464393435626361323333656639616431333638383163626662323733613564613430323532
31343262616133333965633462366636333762623764326231346437666634663339393563666664
65653536333834643937326464333464353135363738663031303162396535616139663535336535
65343062646465373831303235303933343030346562633561653534313263333033313531656430
64623833653832323134333138663966313939303739376131383133366131323530353961633765
63386436373262313334323932646232616435646665323736356433376332653530326230346331
63613163343365623937336564643431653963383333363664663934633962316663306537376236
62333531353833326232613565666563613864653364363333613737663965366133383231623839
65323161613533343130316635636630633931633936666662303330326262376233376562633865
3930
61336563323538613164623831613266643061653432356563333439656535653561313463626134
6639373766343030336631356332303634336163333730330a303639303136346438643666313966
38333232373533303138343332383237616433613466663262306461323537613166643563623339
6265646333653363300a373962313466613939613966653766363335323437626439373131623262
39666338326361346333316235343035346366666339313564343639373432626462616338333966
66363832303865623030626133336230376362613861396234383666616438393862363731346539
62313361663961636332636266643162643762633262393330326630643730363465343031313938
66646165353737653964326662343661653532626262636566383838643236666237613466353836
38333833393465356336363666616139326465633064633163636535393330326432613938343533
32356532636664373836303662643163333234656235343265326366643737643961336430383036
38333130303635383733386566313833663864353332333732316165663064393362396636333438
34633534393937386336636638303365343761633530643333636639636333646531333063323662
33383138386233343434393534373135636130643433306662313564316464346131646665393363
32383966643138333535323735383839396465336131383032333837386535393931633435633963
61623165333931303666356133316562386137626230643164353234623533633634383931306139
39313336313962626264653962336331613633363734336161383532663666613837653733353432
61383939636333336139383930353730353730346333363037306539363637363334626138636133
35353330613361353561383734663334653563663962663732356336353364656166363731633337
30383066666562376466643331336632343262373131636337326637376537373964663637386435
65323161326563336237336261616666323662373933666465646633313133366230303666626466
33303264663739356537373965366231336439353966333831326436343738656261663830383537
66616466643864623530396165666535626234303261373936353661343439316162653262303234
36306531323063303863323163333065616338656262336262376439616434353265653661373762
64323431653334373634386336343637663136353232343939323366343731623266663937663232
34666538376164373863363165383661366661656534336638326637616230663266656464343732
63616139313964646465306236613933323732343035366135386636653637666533626566363939
63373933376330313032613762343337383164646239613430643633646464616632376332393730
61376237343734366437343165393538303234613062623566386638633337613062626131343634
35396430373839373735393866646539313335623663363663623264653561643165353465356232
39636563616433393031643731333239393835353763653431616537653432613566353134666437
34346539316537346265636539646437626363336339306663353639386537313638626538363330
65356333663430316163643930373930393132623563313362363763313662303438613235386264
35343437366136356430613432616137656530313962323434373335643438303032643264366364
30356434336364653931383066356365656665396161633437393839363165636532333561396233
34643966313632366461646535376632663063363863656332613263363635393438663434323631
37343230336366313964336233643832376435343966363236356634353433316363303933366137
30633739393437623863646532386463663639373533396134306537343766613866646364616366
66333939643061633766613361373731333261636134646434626265653062376136646339396132
61343737616266643864373033633733646238313461363039333363643935616434396563383366
63613436353638636233393636313466666637383163366631393135666534326633383330623262
39323762653133303531383264666665616262326135666235653739373061353065363436613863
63643131393762356461336133633239313837663131623263646535396638323731613037386534
30666530353133396164626332313036656639643037323635643630313432313535313733393464
61396339333332643039346366326635313764303634366130616537636433363765633333306335
66386535613938363632623631656439306237636165333632353265623031396538306131396662
65333635633331366262363661363138343237366365323939633337303137323163653666613739
34666337396432633666626264396364653563623939336635343463646565333036616132656135
32646339333966653162343765386666383137626365393130623663656163666436613565303537
33656565636339663336613435396132613430626332353134643866393631366230363735303136
30653032393166666639373966333837316332313836386166373466643833643438643139626435
63663038623066326164353931366439623633623433303864303131666335333335623735356462
36626464353338626139303839646138613734653061626239653938333534306138393732616239
61643230316434653136386336633431363566306337643333373131303136623237643364353766
65643531666433323330343661313562326362336334653933613431633836656363643731353663
62666465363339656338663365353730396464626565636433623232303634336263383764376530
64326436623635353836316335363564653931343837653833373239643238363766376530646563
38666531386539616435646436346133666661393666623761636366626538363436326536336465
30386133366631363364386561383435353836613964383761643639313138336432616439326136
31636366303737336137636130396465636364373961343364393066376665303962383566386235
66346362363564346462316335363865623832383966666339346238623232343337346465613338
30643865313138393332303264636339633631353235363030633730373164373537376562613735
65626339356436663236643263633238643337613330376138653630666266643431303531333062
36393530376566626530343566313563313335313765396361373263383330646161646662343832
63396364376561323764356566336237646261666134376636346234313830356265663635393638
31373032386262616261373734616266643132663165306362393135356135633238633434366339
35353336623734363465633931393530666536373736613632353338306639393062623636303539
63316232623338623335653564316432636230663062313037623437313938383930343835623532
33376666386235383138623535313532353762353033326130623338333730393763343864623932
32383735393335356637626166643839613037636334613139643236373030393762646539393763
66613736313333393936326538623232333631366134396539636237323437336330323932323031
31663739346464373666346337616162623061323432396537376230633965363330653038366635
33373561313038653836303266663633663764626163306465313561656266313537313665396232
35653634616335356537646466376239313162303435663462343837613061623737623063383431
63636536666535333931326531393038343361383439303236346336313739333565306364633130
63666337323462666430343737383732626561646131323439653235326532646134623965366638
36363033643139353265346166666237303336353433306561346636613533383163353163323437
39313838626137336634393964653339643938616163353534326166373035383632396630653030
65663335643232383731643930343333313839636339636637313965663138663936313231333163
34663765323462396333613935303063376461623763346239323866396531613737663364636364
32613961633137383563346130376134333839393262376239346239303135393062666162626365
61363230336562323330666536336666376434613635343135643866313136653630376534643539
64323663613261346265353537316262613964323665616466373861393863333137346631356165
66366665363065643731373739323135656362333136346665366430376234343764646165336336
63356563373663333762366562306263386461653462656265616663333566386431343266666361
30303835663634383033333131333339646231373135336662363165636362636562343438626138
62653365306336386332323538323237666136336261373336663235336335656131346161386335
36373562343433336666633236633035346130636266353162333232363832316365663938353761
63653932653862643436396537636530383163376433613864633038633732633731343636643038
32623864396365353930633566663434373863393636383135643065646335663934666138383165
65353634633062633935396665373531383862623562643765393366633038353836393138313863
65366130623131623666616138646235346461623135326631383337353166323861346538336636
33663534623031666364343039373161313036316335343061366635623135613030656232656363
33643338343931643538393763623863373636386436643534626162336636323436356338663162
37363534656164316263633238396665396662343233343064623334383963616439306538386637
32393834623165383265386561633730343364313630333362633432363261643538353363323963
32383264623664663537346338316439656335376638316538346233643336323830303930393363
61383631643839336530366534303637323733613136633263353132343937633332303465353133
65363735306535393038643264646366363731346636643334373839396230343634353235666463
37633164306632346465326662633662366565613332366637616234373733636334396161343665
33396239383239306165623339633537633335643435633538373938363432613661376364306565
32366336363232613261613163323435353462613365613535343639323731653563376564336530
62623235343166306661313236393039356431323666643464633964636637393936616637616430
62323733396462376236633364316631313165333232616363306664376434333464313134363965
39313531303837643035353965653337373433666666343238666435383530626164626463333336
34316164363935373833613335633432396364633133396464616262636531393035393539303866
64316236643736343964336537336261663332633438326534616333303532316234623431646537
66376166313161633862633339636631373033386263316465646635646139623965643832326334
36636362336465613734303235343530636334646230353262343835373263613765336438616365
39373562383737666631376666666638386132383535613630383361386339653662613130616632
38326261656563333939663033376532666139616339366330363431313037636235343233623330
30613534383665336365366562643239616361363061626663326533353761373966353366663530
38656634646137643330626462613961316265373866633130333265616261393433376139326439
38643263663764313330386632376533373931386534363439376130383434663330376265393233
33316635303736343730353438316463623238316238303835393163383466643865646661356334
35356165656334656361626331316465343065366361646238373039613161313735663639393039
39376638663035386631356230656463623266356430303263613266643365643037336434333633
63386130633163643734613336323933336234343566316235386232333262623237333434353738
32313932383035333137643732323031346633306636366133393433376134656563336537626661
32333638623834613965316137393533356362613839666635326462653262383032336134356137
30313230343563306161313438663436333839623561326230636265333237393963393236356537
33346666613339656536366639313064336231646636613438656137326438343966396166326335
35363662393434346536643530396364616134303633356261353665313163396134326436646536
35376534333164633866376135623264313735383731393132663736376565346639303865656235
37633739353435343936623030353361633933313664306337386331336563343137306536666638
63623662646436653535646135623837623662313735316334356364623630333233633066346236
61356463336233623133373033613138633138366637316466313334303838396137323835333337
37393261343032323462346634333966323166326136313134633862643266376335393635633631
65663864666563346161616166356439613937356362626235316265333238353865343166653761
64366164366463373964616637643761323265363563663665616263303531313336313964353833
34646564396539326139393064623333336430656238643833643332386438383535396430656565
34636532343937623932346239356638323038393431353734303237386138626261316434633131
62666538383363646336393862633031323332666466336239343730643865363962656334303338
63373462613436613537656264623566643033353937313935343430633062663836613232656637
66633162323537666538326335386338303234653461633231343566336162613535646565393938
34323939653036383966633266356336386331356131636636316563393464383033623564646133
36653736326162326430636334616635623132626438656461616161663964303761346336643337
39316437373534623664313164656336636431393537643865613962363839303034336432323164
31646562623535383833656137613831326239353039616635376638366638333236663833343135
35316530656261663262646434356437383934316437623863336666396166646138313065363937
30373439313638636363316138313937613536653536376530343662643665346632663661373535
35386163393965303962653361376433653933393535363065323234303232623331336366643337
39643230366638326561643964663639363834343864346439306434623139643239636634636632
32303734336337613332363765623439313732666361643939363337643433376439396231656261
66353632346664323337333931373965383038383766313564633230646236373338333236633135
34343638313031316632396335636565376635316131326230666234323233393064663439303330
34376563393738346562633536623235636164326263333463613466376362343962656132636335
64663062623939386639643836656433343864313331643962633134656534313663633662376265
63343438663939313231353937333038643862343863313138333133393733326265343762383730
33306231663830393631626334353438626161656236626532623833373030346635353366396538
61333464383463343233663136333132333731353039646563626334653037363432393039316431
62353539316462343464393466623062356637366232353634333238316632323965363137636464
65643361336134363530353464333163633837323962383534663836346239653036346636633639
39663435363239656235633535343466643030646439353664616537656365393662333837363665
37303433383164366635386265326566356432313939353536613839393731333133623466323365
33366163393466343132313166373134626566336432326439373534616139333562313461656263
34393636343263636634666234366433356664383336393038323339633430363961626261666431
39643934363435636239623332333632623738346235633265346130363031303935376166313464
64323665396264373563613062313738363565383139376436323866393831383566633037373166
35613131633763636430303731363666376535343530383933356235373638313639303564646433
30636534323531333563313661656139386266616134666162653864376432666438633062303435
36393537316266326134643936366563626434663161343038633663303464666339613063666239
35363466386630643161356336663139303562376363633331393061623266636565366462313236
36666363396331313463336536343131333334326339303337313865633732643536313338633862
65616363623166346635326535323862636461346133636632306464373034663363376533366636
37616135393732396632623631313965333339393739633363383136363631323666653265663334
66613339386362383862656635636434646139306161393564313433326636333639386536376432
64323533623662633339386631633331323638303762663835653330323933323730356434303932
63303433316439383362326165316261333261633932393165626162393235626661666237616561
34333962303766346434373839623531366461366233303835636161646565326163383739623263
63646166363563636634393066373962373835636638373338643936663963343362393035643130
36373731633661303462643662316536333531373634393030353835306362393632376237643038
36313764353832613735616364383032336133303432613331616631353535353864316561623538
36613334323363323939353234306637363635373131363733663531343833306262316464356136
31333835323233343761306431373834353463386164616636323462363363313830353763663863
32356135633032616466373465656236386362323434316666626537303934626464313566376661
33623366333261636361616665376136303134353664333166623736303534653566346336643334
35656532656532346362316662323830353831393366353239643062616339336339306662323064
31366666353232613532376234353662316235643930613332313531333636663935646463343436
63653230666436383565303339336132343966396631346232613264333365306336343730343664
37303138333665326437663264356636613165353430386165363363653362663665666338623664
66383637346135656134316531656135353665353066393134393265666362336435386134633061
64326664663566393934373463306433636563383633613137613134616366656338303337373666
66623963336130613933383335616162626638386338323739313562383432663437346462313030
66653933623838386234343862373737633463646533306162666231313364383962646339623535
64333863663336663238363732663636633034376135366233613035653131343533663439326437
65653135616262316462386138653836653065346462333661646263633762373263303137323235
66303339383964376531623639313831393238376465663231323339323735313432386262626437
35386261646234623634376435373830353261303537353733643731303839393235656334333837
64636234393431346464363337623130343133323933643366306537303833366561626631303339
31353336373633373430313162613533386163616265306431636261643535626439306637386332
31373662383066646266623166383330653165343364326532316466303465616463623362633734
30313163316331663663343439363662356166653533333839313730383330613164663639363637
30393133386538376136306535653139303332646231393138323133343264646631383933373930
66373238346534653939383535643338393031333537333761643265356564373964373862653537
66376234383036383830333362306230303763663362323934316237313432356565613438356334
38633534646666643432323564623935343862633164653438623136386335383233363238386362
37316430343239373435313261623739626566613538613235613961346165663037383037363436
34396535656331373461356632383665613733643939656239386565336331333462363134383464
32343063373563313035396433393233346637626539633931323439643837326234656665306666
33633939353439396531306135613166613062396333396433333037356132333932646164363039
36376634383764656165623061373762636631383334303138323061363535343635326462356533
32363034616530303236333731366536393532623833396435343938353761353838356464626365
61386130373133333130383932346331663164613730643064383565663464653662633736663137
33383262623134623332346166343266626335626234663138323131633261326132393039623834
35303237383161313536653666643934313361626264383831373437646338663532393464376330
36366237646261383137623162323665326465346265313733633537616332323034363362323265
62336664306562333938386664363631633734336561363935373130363963326361386664613263
38643036343539363034653536376465306164316139393862633337363263646364316461373938
33383837653737356530656461636632323236343636666461633163633630303835363165646363
65623064643662353861303164353332666366663639366432666366376165353963363636623034
32373037333764363935303639613164656435653130663665313861366430313639303863393233
30383362613337376164383334613135643437663530613233616638303630343966303731633138
36363038376238636236323732356561396662656230336137393765376262633862386131306338
36303662626463376538336462643738626238633066323233623662366234353465323266623733
33353336643639336231323837353766363962386461336466306436626438636130613934656362
36363166356265323666353061343339646265656661626364663666643563336638663932646662
62626462616666643630656263343764396439306235383362333532386564383932336434616362
64383436363930663666376162303665323565373534653666393539613563306531653765336332
31303531343163626534326262303135356438316262613365653566306630393236376666643637
65303463353533313337663161633133643864313062306436383163343838356530356331626236
65376564303565643638393737383738366265626564353938363237346466303138