Initial commit

This commit is contained in:
Nathaniel Landau
2022-02-05 16:22:33 -05:00
parent 43e9f4fc59
commit 84958e0ef8
103 changed files with 10138 additions and 23 deletions

21
.ansible-lint.yml Normal file
View File

@@ -0,0 +1,21 @@
---
# Full documentation: https://ansible-lint.readthedocs.io/en/latest/index.html
exclude_paths:
- .cache/
- .github/
- ../../.cache/
- archived_data/
skip_list:
- unnamed-task
- var-naming
- command-instead-of-shell
- meta-no-info
- meta-incorrect
- role-name
- ignore-errors
warn_list:
- experimental
- risky-file-permissions
- command-instead-of-module

16
.gitignore vendored Normal file
View File

@@ -0,0 +1,16 @@
# Ignore MacOS Junk
.DS_Store
# Ignore synology junk
*@eaDir*
# Don't push customized information, Galaxy roles, or collections
galaxy-roles
ansible_collections
# Don't sync vault password file
password_file
.password_file
# Ignore caches
.cache

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule ".hooks"]
path = .hooks
url = https://github.com/natelandau/githooks

1
.hooks Submodule

Submodule .hooks added at 144f2bb8c4

80
.vscode/shellscript.code-snippets vendored Normal file
View File

@@ -0,0 +1,80 @@
{
// Place your workspace snippets here. Each snippet is defined under a snippet name and has a scope, prefix, body and
// description. Add comma separated ids of the languages where the snippet is applicable in the scope field. If scope
// is left empty or omitted, the snippet gets applied to all languages. The prefix is what is
// used to trigger the snippet and the body will be expanded and inserted. Possible variables are:
// $1, $2 for tab stops, $0 for the final cursor position, and ${1:label}, ${2:another} for placeholders.
// Placeholders with the same ids are connected.
// Example:
// "Print to console": {
// "scope": "javascript,typescript",
// "prefix": "log",
// "body":
// "console.log('$1');",
// "$2"
// ],
// "description": "Log output to console"
// }
"Comment block": {
"scope": "shellscript",
"prefix": "_c",
"body": [
"\t\t# DESC:",
"\t\t#\t\t\t\t\t$1",
"\t\t# ARGS:",
"\t\t#\t\t\t\t\t\\$1 (Required):\t",
"\t\t#\t\t\t\t\t\\$2 (Optional):\t",
"\t\t# OUTS:",
"\t\t#\t\t\t\t\t 0: Success",
"\t\t#\t\t\t\t\t 1: Failure",
"\t\t#\t\t\t\t\tstdout: ",
"\t\t# USAGE:",
"\t\t#\t\t\t\t\t_nameOfFunc_ \"@\""
],
"description": "Comment block for a function"
},
"value": {
"scope": "shellscript",
"prefix": "_",
"body": ["\"\\${$0}\""],
"description": ""
},
"subshell": {
"scope": "shellscript",
"prefix": "__",
"body": ["\"\\$($0)\""],
"description": ""
},
"_function_() {}": {
"scope": "shellscript",
"prefix": "_f",
"body": [
"_${1:name}_() {",
"\t\t# DESC:",
"\t\t#\t\t\t\t\t$2",
"\t\t# ARGS:",
"\t\t#\t\t\t\t\t\\$1 (Required):\t",
"\t\t#\t\t\t\t\t\\$2 (Optional):\t",
"\t\t# OUTS:",
"\t\t#\t\t\t\t\t 0: Success",
"\t\t#\t\t\t\t\t 1: Failure",
"\t\t#\t\t\t\t\tstdout: ",
"\t\t# USAGE:",
"\t\t#\t\t\t\t\t_${1:name}_ \"@\"",
"\t\t",
"\t\t [[ $# == 0 ]] && fatal \"Missing required argument to ${FUNCNAME[0]}\"",
"\t\t",
"}"
],
"description": "Add a new function"
},
"while read loop": {
"scope": "shellscript",
"prefix": "_w",
"body": [
"while read -r ${1:VARIABLE}; do",
"\t\techo \"\\${${1:VARIABLE}}\"",
"done < <(COMMAND)"
]
}
}

28
.yamllint.yml Normal file
View File

@@ -0,0 +1,28 @@
---
# Find full documentation at: https://yamllint.readthedocs.io/en/stable/index.html
extends: default
locale: en_US.UTF-8
rules:
braces:
level: error
max-spaces-inside: 1
min-spaces-inside: 1
comments-indentation: disable
indentation:
spaces: consistent
indent-sequences: true
check-multi-line-strings: false
line-length: disable
quoted-strings:
quote-type: any
required: false
extra-required:
- '^http://'
- '^https://'
- 'ftp://'
- 'ssh \w.*'
- '{{'
extra-allowed: []
truthy:
level: error

21
LICENSE
View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2022 Nathaniel Landau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

176
README.md
View File

@@ -1,2 +1,174 @@
# homelab-ansible # Homelab Ansible Playbook
Ansible scripts to configure my homelab using Consul, Nomad, Docker, Traefik, and more...
Repository for managing computers, services, and orchestration on my home LAN via Ansible. **These files are heavily customized for my unique set-up and preferences** and are published in the hopes they are helpful to someone as a reference. Do not expect them to work without heavy customization for your own use.
## Infrastructure
- **[Protectli FW4B](https://protectli.com/vault-4-port/)** running [Opnsense](https://opnsense.org)
- **Cisco SG250-26P** - 26 port managed POE switch
- **Four RaspberryPi 4b** boards running Raspbian Lite
- **Mac Mini** (2018) used for media conversion and serving, backups, and amd64 only Docker containers (why can't we have multi-arch everywhere people? Why?)
- **Synology DS16+II** - 8TB in SHR with BTRFS
## Backups
- Most jobs use NFS storage on the NAS for volume mounts. Jobs who require their storage to be available on a local machine are backed up to the NAS using custom shell scripts which are called as pre/post tasks in their Nomad job file. These custom scripts are written using these [shell script templates](https://github.com/natelandau/shell-scripting-templates)
- Offsite backups are performed by [Arq Backup](https://www.arqbackup.com) which runs on the Mac Mini and performs nightly backups to B2. Backup restores are tested twice a year based on reminders in my to-do app. _This is NOT managed by this playbook._
## Service Architecture
- [Hashicorp Consul](https://www.consul.io) provides a service mesh to allow intra-service discovery via DNS in the form of `[service_name].service.consul`.
- [Hashicorp Nomad](https://www.nomadproject.io) provides container and service orchestration across all the RaspberryPis and the Mac Mini
- [Traefik](https://traefik.io/traefik/) reverse proxies requests to services
- [Authelia](https://www.authelia.com/) provides SSO
- Traefik and Authelia are bundled in a single Nomad job named reverse_proxy.hcl
## Ansible Playbook
This playbook adds storage, services, applications, and configurations to a previously bootstrapped server. Configuring server access, users, security, basic packages, generic networking, etc. is out of scope. Once a server is bootstrapped, this playbook will:
- **Update servers**: Packages via Homebrew (MacOS) or apt (Debian)
- **Configure shared storage**: Adds shared NFS/SMB storage from a NAS
- **Installs and configures specific services** which run on bare metal
- [Hashicorp Nomad](https://www.nomadproject.io) for service orchestration
- [Hashicorp Consul](https://www.consul.io) for a service mesh
- [Docker](https://www.docker.com) for containerization
- [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) for telemetry
- [Tdarr](https://tdarr.io) for automated video conversion
- Custom shell scripts for backups and house keeping
* **Syncs Nomad and Docker Compose job files** to servers:
- [ASN-to-IP](https://hub.docker.com/r/ddimick/asn-to-ip) - Used by Opnsense to build firewall aliases
- [Authelia](https://www.authelia.com/) - Open-source full-featured authentication server
- [Changedetection.io](https://github.com/dgtlmoon/changedetection.io) - Website change detection monitoring and notification service
- [Diun](https://crazymax.dev/diun/) - Docker Image Update Notifier is a CLI application
- [Grafana](https://grafana.com/) - Operational dashboards
- [Grafana Loki](https://grafana.com/oss/loki/) - Log aggregation system
- [Headless Trunk](https://github.com/alpeware/chrome-headless-trunk) - Headless Chromium
- [InfluxDB](https://www.influxdata.com/) - Time series database
- [Lidarr](https://lidarr.audio/) - Music collection manager
- [nginx](https://www.nginx.com/) - Web server
- [OpenVSCode Server](https://github.com/gitpod-io/openvscode-server) - Run VS Code on a remote machine
- [Overseerr](https://overseerr.dev/) - Media discovery and request management
- [Pi-Hole](https://pi-hole.net/) - Network-wide ad blocking
- [Plex](https://www.plex.tv/) - Media streaming
- [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/) - Log shipping agent
- [Prowlarr](https://github.com/Prowlarr/Prowlarr) - Indexer manager/proxy
- [Radarr](https://radarr.video/) - Movie collection manager
- [sabNZBD](https://sabnzbd.org/) - Binary newsreader
- [Sonarr](https://sonarr.tv/) - TV collection manager
- [Syncthing](https://syncthing.net/) - Continuous file synchronization
- [Traefik](https://traefik.io/traefik/) - Reverse proxy
- [Uptime Kuma](https://github.com/louislam/uptime-kuma) - Monitoring tool
- [Whoogle](https://github.com/benbusby/whoogle-search) - Privacy-respecting metasearch engine
- [WikiJS](https://js.wiki/) - Powerful and extensible open source Wiki software
## Running the playbook
1. Install the required roles: `ansible-galaxy install -r requirements.yml --force`
2. Add the vault password to `/.password_file`
3. Run the playbook: `ansible-playbook main.yml`
### Notes
- Specify specific tags by appending `--tags [tag1],[tag2]`
- Skip specific tags by using `--skip-tags [tag1],[tag2]`
- To dry run use `--check --diff`
### Available Ansible Tags
The following tags are available in for this playbook
| Flag | Usage |
| --------------------- | ----------------------------------------------------------- |
| `backup` | Copies backup scripts and configures cron |
| `clean` | Removes nomad_jobs prior to syncing folder |
| `consul` | Installs, upgrades, and provisions Consul |
| `docker` | Installs Docker |
| `jobs` | Syncs orchestration job files (Nomad, Docker-Compose, etc.) |
| `logrotate` | Configures log rotate oon the cluster leader |
| `nomad` | Installs, upgrades, and provisions Nomad |
| `packages` | Ensure base packages are up-to-date |
| `prometheus_exporter` | Provisions Prometheus Node Exporter on hosts |
| `repos` | Runs `pull_all_repos` against `~/repos` |
| `sanity` | Confirms we can connect to the target computer |
| `storage` | Mounts NFS storage for cluster |
| `tdarr` | Installs and configures Tdarr |
| `telegraf` | Installs and configures telegraf |
| `update` | Shorthand for `packages`, `repos`, and `nomad jobs` |
## Variables and Configuration
Variables are contained in two different files
- `inventory.yml` - Server specific flags
- `default_variables.yml` - Primary variables files
Additionally, a task named `interpolated_variables.yml` creates variables which have different values based on logical checks.
### inventory.yml
Server specific flags are managed in `inventory.yml`. All flags default to false for all hosts. To enable a flag for a specific host, add the variable and set the value to `true`. Available flags are:
```yaml
# Used to stagger cron jobs
cron_start_minute: "0"
# Run software which needs to run on a single device
is_cluster_leader: false
# Install and configure Consul
is_consul_client: false
# Run this server as a consul server
is_consul_server: false
# Install Docker compose and sync compose files
is_docker_compose_client: false
# Install and configure Nomad
is_nomad_client: false
# Run this server as a Nomad server
is_nomad_server: false
# Install Prometheus on this server
is_prometheus_node: false
# Install Telegraf on this server
is_telegraf_client: false
# Run this node as the Tdarr server
is_tdarr_server: false
# Run Tdarr client on this server
is_tdarr_node: false
# Mount NFS shared storage
is_shared_storage_client: false
# Manage apt-packages
manage_apt_packages_list: false
# Manage Homebrew (MacOS) packages
manage_homebrew_package_list: false
# If true, will always delete dir before syncing new jobs. (run '--tags clean' )
clean_nomad_jobs: false
# Mac computer with an Arm chip
mac_arm: false
# Mac computer with an Intel chip
mac_intel: false
```
### default_variables.yml
Contains the majority of configuration variables. Specifically,
- Version numbers - Bump a version number for a service which doesn't pull from `latest`.
- Storage mount points
- Service configuration variables
- Apt and Homebrew package lists
## Additional Information
### Nomad Job Conventions
Nomad is used as the orchestration engine. The following conventions are used throughout the Nomad job files.
- Nomad jobs are written in hcl and **contain jinja template variables**. _Important:_ These job files will not function until synced via Ansible
- There are three types of variables within nomad jobs
- Jinja variables populated when Ansible syncs the jobs to disc.
- Nomad environment variables populated at runtime
- Nomad variables read from the node's Nomad configuration file
- Templates stanzas
- Indented heredocs can be used using `value = <<-EOT` to analyses the lines in the sequence to find the one with the smallest number of leading spaces, and then trims that many spaces from the beginning of all of the lines.
- Nomad env variables or Consul key/values used in templates will reload jobs when configurations change dynamically
- Tags in service stanzas integrate with Traefik via the Consul catalog

12
ansible.cfg Normal file
View File

@@ -0,0 +1,12 @@
[defaults]
nocows = True
roles_path = ./galaxy-roles:./roles
collections_paths = ./
inventory = ./inventory.yml
stdout_callback = yaml
any_errors_fatal = True
display_skipped_hosts = False
vault_password_file = ./.password_file
[ssh_connection]
scp_if_ssh = True

162
default_variables.yml Normal file
View File

@@ -0,0 +1,162 @@
---
# ---------------------------------- SOFTWARE VERSIONS
consul_version: 1.10.4
influxdb_version: 1.8.4
nomad_version: 1.2.5
prometheus_verssion: 1.1.2
tdarr_installer_version: 2.00.13
telegraf_version: 1.19.1
traefik_version: "v2.5.6"
# ---------------------------------- SERVICE STATIC PORT MAPPINGS
authelia_port: "9091"
influxdb_port: "8086"
tdarr_node_port: "8267"
tdarr_server_port: "8266"
tdarr_webui_port: "8265"
# ---------------------------------- DIRECTORIES FOR SERVICE LOCAL STORAGE
# These folders must be created, even if empty, to allow mounting nomad local storage end-points
service_localfs_dirs:
- influxdb
- lidarr
- prowlarr
- radarr
- sonarr
- uptimekuma
# ---------------------------------- SHARED FILE STORAGE
rpi_usb_drive_mount_point: /mnt/usbDrive
rpi_localfs_service_storage: "{{ rpi_usb_drive_mount_point }}/docker"
rpi_nfs_mount_point: /mnt
rpi_nfs_mounts_list:
- { local: "{{ rpi_nfs_mount_point }}/pi-cluster", src: "10.0.30.6:/volume1/pi-cluster" }
- { local: "{{ rpi_nfs_mount_point }}/syncthing", src: "10.0.30.6:/volume1/syncthing" }
- { local: "{{ rpi_nfs_mount_point }}/media", src: "10.0.30.6:/volume1/media" }
- { local: "{{ rpi_nfs_mount_point }}/nate", src: "10.0.30.6:/volume1/nate" }
rpi_nfs_mounts_remove:
- { local: "{{ rpi_nfs_mount_point }}/downloads", src: "10.0.30.6:/volume1/downloads" }
# mac_autofs_type is one of 'smb,nfs,afp'
mac_autofs_type: smb
mac_localfs_service_storage: "/Users/{{ ansible_user }}/Library/docker"
mac_storage_mount_point: /System/Volumes/Data/mnt
mac_keep_alive_file: "{{ mac_storage_mount_point }}/pi-cluster/keepalive.txt"
mac_nfs_mounts_list:
- { local: "{{ mac_storage_mount_point }}/pi-cluster", src: "10.0.0.6:/volume1/pi-cluster" }
- { local: "{{ mac_storage_mount_point }}/syncthing", src: "10.0.0.6:/volume1/syncthing" }
- { local: "{{ mac_storage_mount_point }}/media", src: "10.0.0.6:/volume1/media" }
- { local: "{{ mac_storage_mount_point }}/nate", src: "10.0.0.6:/volume1/nate" }
# Add mounts to remove from auto_nfs to the dict below if needed
mac_nfs_mounts_remove:
# - { local: "{{ mac_storage_mount_point }}/pi-cluster", src: "10.0.0.6:/volume1/pi-cluster" }
mac_afp_or_smb_mounts_list:
- { local: "{{ mac_storage_mount_point }}/pi-cluster", src: "10.0.0.6:/pi-cluster" }
- { local: "{{ mac_storage_mount_point }}/syncthing", src: "10.0.0.6:/syncthing" }
- { local: "{{ mac_storage_mount_point }}/media", src: "10.0.0.6:/media" }
- { local: "{{ mac_storage_mount_point }}/nate", src: "10.0.0.6:/nate" }
mac_afp_or_smb_mounts_remove:
# - { local: "{{ mac_storage_mount_point }}/pi-cluster", src: "10.0.0.6:/pi-cluster" }
# ---------------------------------- SERVICE CONFIGURATION VARIABLES
nomad_configuration_dir: /etc/nomad.d
nomad_plist_macos: "/Users/{{ ansible_user }}/Library/LaunchAgents/nomad.plist"
nomad_jobfile_location: "~/repos/nomad_jobs"
docker_compose_file_location: "~/repos/docker_compose_files"
rpi_consul_configuration_dir: /etc/consul.d
mac_consul_configuration_dir: /etc/consul.d
synology_consul_configuration_dir: /volume1/docker/consul/config
consul_plist_macos: "/Users/{{ ansible_user }}/Library/LaunchAgents/consul.plist"
rpi1_tdarr_file_location: "{{ rpi_usb_drive_mount_point }}/tdarr"
mac_tdarr_file_location: "/Users/{{ ansible_user }}/Library/tdarr"
# ---------------------------------- PACKAGES
apt_packages_list:
- bc
- coreutils
- curl
- dnsutils
- exa
- fzf
- git
- git-extras
- htop
- iftop
- iotop
- iperf
- jq
- less
- lnav
- logrotate
- lsof
- nano
- net-tools
- nmap
- openssh-server
- p7zip-full
- python3-pip
- rsync
- shellcheck
- unzip
- wget
- yamllint
- zsh
homebrew_package_list:
- ansible
- ansible-lint
- bash
- bash-completion
- bashdb
- bat
- bats-core
- coreutils
- diff-so-fancy
- exa
- ffmpeg
- findutils
- fping
- fzf
- gawk
- git
- git-extras
- git-flow
- gnu-sed
- gnu-tar
- gnutls
- gpg
- grep
- handbrake
- htop
- httpie
- iperf
- jq
- lesspipe
- nano
- ncurses
- nmap
- openssl
- pandoc
- prettier
- readline
- shellcheck
- shfmt
- source-highlight
- sqlite
- ssh-copy-id
- tealdeer
- tree
- wget
- yamllint
- zsh
homebrew_cask_install_dir: /Applications
homebrew_casks_list:
- lingon-x

View File

@@ -0,0 +1,17 @@
$ANSIBLE_VAULT;1.1;AES256
30303134363364323437636131396232313837343934613131303933346235643163316632666466
6331613537326562306465323561356531396136346231390a653966326662376561373462376536
39353666623933616265303761353237346439656431666263316230633132363166663733633064
3539636539653130340a303532373663353861336131656138633434306336376233316632323833
37363831643563333430346239323961376161306231326461333262333037323133323463623064
63343063383964653165393265613135333037316266623636313135313130306534396262373232
36626562653961306536363937363032383633636230666633383032623333653466663135623666
32623963626235626365383637616265623233306331376530383765666362373434303135613637
33383131386238303866313436316137656632306663303235643431373762383238663031313231
66336261653561653434343139653231656633306630363935383830616434313963323963376661
36323731333664633139333539306137633932323236313137643562393833383533303733366336
64623462336636373562623035613262373634323265363330366336353936613531643037316236
37363266616362373764613530646231613566306432656236623034643139666430623539303936
31333234323033616231643264616139326238303836363035373938653531623563636531303166
65646238376464663763313034306536323935366263383265396236636266353631343538343465
36313837653839303861

View File

@@ -0,0 +1,59 @@
$ANSIBLE_VAULT;1.1;AES256
34376638653033356530616235616233663134623162636262383262393139626339643839663062
6166653238343034373733383439653337346531623735650a343066386362316639386434303439
66623032653439633361303166653062313962663939353334383866313436343937333234613366
3438623336633733350a396634316562656133373065626432386665373662383630613863633737
37616330393363343438623361393334623430643465313936626530333661633237316538663566
31303638383962656238636233623366343166626433353134643436333337323766353732363038
61313438396536613637316236376663623831653630303334323536356139386462343334353733
66613835386664313036623539323938633762353261313231616461663262333961616363396432
34396462393730623764613430633132346365623264643261393565326335643762383339333839
64643466653362643539653434356531353664633631326364366330633830306630373663323838
64306231646661366164663636303636663765336136623833383637636362333830393635356439
63333436336233363436633733663866626337373734653031336133643939653763633734633939
63336635333939336335646163343438323763356532623037653137336364653034376634386132
33626461393136636264666563383032623638346162326630353433636537353839663032616662
35316364626331366261663261656437636635373863636430633834646237626237623138333136
33363064323535353238363336656633353261363331356265333338633734653130383632666266
64363962336138366638613137623035326139666134396536343762343732646136663935326234
34656264393834643639613163663165323163643239393738303864306530343333613735333061
30396565656134366232393137303532633633313865383862303465323465346462323235363632
66633035343139313238623763633339333262353730633934343432623636613762386137623631
39393238316666613965393232393131653639643431393136383838353762323538366137323537
39616162646366333835333063653332336161383766383336333230326332333564356233343331
30386438663565396332656132316465383864626435623439306563363962353838323937383936
65613436633031323032653730363765366665383862643233666466636231636363376164383731
39663663643933396635326431306334303862313663633137313135363334343438633338363838
30356165323837393262303564306133653232343734373737353732663439626263653366636563
61343063383763653866656435633564336535356437306262643064356165373866633066626435
66626332313039336262626230663264306236366333343231633163613065316165653261383135
61356361383062346236613561393237316434333931336134623164366461313963626431666362
39383364633731353462396137356435303335656633356232346339326638656262343333626436
66633734303964336438386161623031636639663531386135613734653734386336323432333833
66343739616633336137356561316164643831336235336364303863323962623064643131363664
63643539316365306563623736633234633936316339336334636236303131383265646638366162
61323561323466326635393531366263333966346239303664303665313863303335373961373238
66393732313036306132326364643563393430613637333530616262336562633638633966353234
33643132383633613966303366376431663035303062663361316561366438363134323934363332
62356266393238633633636563396536313966323263393130313533343432363132323433393565
66343864303961363138343133343832663731396237643237343066383962343733326364666431
61376335343262333430336236613264663766376437306639653666663064613263303936366562
65613266663031306133386165346237616437363764383664326236643138396561346337653136
61386338343564663366303730376362376461303763666165356361666434363536626335623931
31313665353566623766636166663931366235393162343862393132626462663161613436373130
34366562316635366132333736656239396465643835643665366535393261316539373163316137
61616136626163636236353266363565336435393262613062643566323034626366353764313037
30333937616330373037393764333934303135636665316434383861306462663633393738393137
62653263363736396139346265663939616236366637306665353532333762346261323261626339
37636439393630313164396238633439663062353165353332373531663763353261666438333138
35653639373536663263363065346637363539303030643534393332306336306234363431366531
34303362313630363731313736613134343334396463383266396231313637393533353631626263
37346561656161336635666664643632623964643433333261396237393762336636623230666231
62323966306565376632363837323737666231303334326235633664646431663330313866626131
38343136373735653866373063636564333435666136303031333638626639633864313132656234
64646162666638636333656165346432313538663132313832623234303934346665313232656462
39313936626261303630396136663936633835623733396336373166323137313562656238316131
39393966346163393761303838636231326462653634646261353863646538393834313331313636
32633565326235633565396234396363633131333034363265633164346231336139656437626364
64653236356664373566656131383436326662653865326161653738303064353063396263613836
6336396334346366376261393534643362383864383430646131

View File

@@ -0,0 +1,17 @@
$ANSIBLE_VAULT;1.1;AES256
30336236353230313637666437663037386434306132373730386366353039393838313937656438
6264323738383364383766303166343834636661373934610a373032353232643465353234363038
35383063633233313963353063316262326335333661313865623132386236376535646361333536
6361626132393136650a643530306535306564393236626362643038643831653837666539626538
62616439373161646165343465396335646665643033623664613838303066653735613030376662
38383261653366336662623337363737323738636339316464643932346366383832396639363137
31306433343635363663643234383330653833666663336639666434333166663934353263633837
35316536313437363838366361643830353036643033373361396137316265633933323432646538
37643561393438626132316637626334623463646230316663613739386638653831623337643134
39313364386130336330666461666362386232366535386639333930366266666232323862343462
33623165363131653138633635353431343630623834343132646135323039336631383038373536
65376462393636376639643437656363366533613032313330343934356364386234306433623634
39653638646330643237333335653132306666643063323539653237643366396631326462346234
33383639313963323963636134643232396463303963396566643432653664656231386534653266
33376664366464616630356664376264376430356163356463623034316363623936366163373165
31613566313531383734

View File

@@ -0,0 +1,54 @@
$ANSIBLE_VAULT;1.1;AES256
65373963656433613964383062303964333463616463623137633934383036303430343366663839
3730363661623735633262383665643038376162626338300a646565323364313233626161303263
63626538313263363333633831303030363166643335313730636666396138343464646430353935
3530383131393730380a336433653064343634393066353065383532646461646266623638323463
35366339623766383761303966336636393439393330343936313865636434643432613633373638
33343331306130366630343335303235356533303861616635386166376636343637306361353436
63383238636336353137393636653937346262663536626431646466633536393161636466373634
35313664633066356364636462656132323831386333303639363339363634356237663234616438
61366231393962653837663764633332653861343036346562313263303963373062613034316562
32366537366137613437653937353963636164323933653337386538393835316164613933613333
66383431313638356264323531653236396336636632656131326638663533633833343132663931
35656461646139386333633735636134396366633133336231663430613662663439376533643138
36333665383065316233633836353763643734353932646565346361613035656238373662333163
61343336646664386337383563396333343765306261356561313531393231663130643338333863
66303938373361303663306438393636353630326433663036616239663332613461663336396566
31643932666336366430393932313937376165353938643463333936643631363230656631633633
65376661646638303734623334626166313066646334633736653564643735383562316434633664
62363637333635363130346366366432663866323931343535633433373838376138306263613237
37336136646431616666643539316434666464396333383936643134633563346434313639373336
34333838656632333466653531356361633333666131633035613361633237653365333037333862
34363162656561343137653764376139373032653237316432383436633830623864613030363839
61313735363032323465656362346366633830386233643331323966383230646564333562373866
39356435393964353732363633646139396562386535346134663031343938363031323130316535
64346435333466366264646331376233313961346332303339643739636336386464353236316632
64346162326334376566333066383364333539636130363863656661666263666230306333623635
37623333366466336334613132333033623461303739333365373233613863336165623538613834
34353035323438663534343366353565386230623735623463643736656339656138393561363838
37373036333630396634396431613735373830333535383363316133393366376434643636643739
38303436326236333363383332633233623364346231646430646337333563623164323732363362
62323338366438663766336131643133353663363638376564613936303334626636366331373536
36373135666536643236633932656437616134383165323630303730633236343432316235363862
61303964336335626538666235306639343735306365656162333431356664643563653463663239
37323363386638303761393532356430343363326162343436653239636664636164393738653363
31616165616634366261386532393663363966313464306334373466346464653932396464346136
64356230636136376566306265326561393332326466306530323865633163303032383063653431
36323731353939666234336166336239323266313361353964383139396564373330303531666435
66396636323662336333336436376637393938336365666339653436613538326132346337643431
39336539333565346264313961613232353364376539616136363165383966623737363964303961
37633561333235346562356130326663366631366262646163626236623933613130656637643864
36336564373230626337356462323864303730366137613632373536323635636334623134333036
32356662646434366331626465363662323736653939663236393763633465353934613432393133
61323665396539623930383031666433616465636165316133666261663135626430653735346264
33306232343339353165393538653036383639306235656533336434373539356235633264666461
65643464333134653264356239393833393239343731356366393536356363653232633465376338
30343435373262393530333336653663373364633462313530623939626466383365393033343933
38356137306163303431653965346361363935646637646539303161366137633037393436313331
36306534306235333338646534326364626535643839613835356163396265643430633933376232
32613536303338356138643866653938366561613535666566623433336366643361346238393337
61623134616536363961303364303331323230363163393531323035356331343332623733633766
39613061623635656634376665396266613866313361636366663134623936623861383334643361
36613662633837656130306638373763396363393966383163356563386331643736666638653635
35333265393365396434353733316239653461623462303137383533336637303132333965376566
61353363633261616635666366313865373663326664636437316537396464373733

View File

@@ -0,0 +1,13 @@
$ANSIBLE_VAULT;1.1;AES256
38373339313965336461323636373265373637393535636337626562373431383763346138386130
6564613633623264663835373966616439313364646436310a366531623162623130353339323236
31383364646331333261633738353538663361313130623730623036393534306634623065333335
6336626163653033310a383132366162666434653461396130643034343861633462373532373535
35613162333365373739313063393865356561636437623634303864376630373737343930653062
30366634386132646232353132303831363364363131653433363838396565646139306232336566
63333866373364613239353330373263613863306136393932383539306134646639326233313761
63666163656265663633326564643864343639636136373735353731353431313866646335333139
33653764633333656631323865663639613735303430643230663862376631613437346564393631
39303231363839663834616439643632613331373735393834626665323831646165303738386132
31326633613030356338323133643031666666303730346636393134393930383462653637393830
61306131646563626565

View File

@@ -0,0 +1,17 @@
$ANSIBLE_VAULT;1.1;AES256
36316264663735643835613962666266363839626566356439343937616430646431323637303462
3963346362316139323039666664613737343363663938330a346333323164666330366137383931
38356164333732633166383336306636373633333064393665636465316538393735393732306263
3666623932623331310a303735326530356661333735653033326236313637633334346335383437
66343464356436353962613866316462346162643534663732336664366634356661613165616135
37343464616463313835356637623531323939356565333966383062643334626434643532393136
30326335303838306531306534323863383263343661333939393966353934313663623933376666
32653435316362653631653234333261303137333831373037306266383061313135333033373639
35383665373737646431333034653330306637316362623162323464333937626632353162363538
31393238323362643363643631323531363532666366373063343434656630366363616164343564
66306264396135396538663966613966646534303235373738326234373030336132373163386232
62396338636233376163383632623030323934363863383733626333613430313332373138666566
66653464663132333466396233616339366438376166393935363965663738356639663165303561
66313134383162373564303838356137313536633465353763336363326364313961366534383966
38363364313961386262613563656330633133306432383464643530323266333139316636353834
62303232343761383765

22
files/certs/nomad/cli.csr Normal file
View File

@@ -0,0 +1,22 @@
$ANSIBLE_VAULT;1.1;AES256
66373835646438666239653764656335366633633232316231336365393037343661636139633836
3336626134623162313762376237373231356232626334380a633538303638366161353833396331
61313731646136313862633630613035643236633863363463393730646538643666393562313735
3261386231343232340a346332343065393836633637636332363232653964343636383863366334
35313537323131623365363231663731323662323030306162323939366462663662363631363561
37356231353739386135323636613734366333653233653862613133333032383432613834343162
63393162353538333862663062383030653234623732643264613565393831353634626133396434
65613166353666366134653865373765363530363533383639633864373038646661303932626466
33323066333935323465393361396164353430373837323137396332323038656534636436366438
33646365623835383863643966353335323763376265343364666334306435386266313061353964
39326137316366613965306135346432333438393137363962366232306638666633306332623930
34656662383838646439656636623631666566336263363163666231343538383963633134366262
36366665636132623532323661633637346664336332383636626236653738383433316534636434
66313338356632633636356262383633656464383532313264306464393139616533343932353530
64373062363137666166313837366162383233633030393362373836373165643932346665653363
36333138306437343263613965386638393033386535616138363433323230393564396231366634
39346438373763643438323438633136346364313266323563663035363839313961653530353466
63343934333462656635653531653838663032666339633837663539633139613061633264366461
34623561353330636538373865356335393234613865663965636262333532316234646330333530
34306438396564626638623265386565383735303365383735633961663266633766333666633437
6432

39
files/certs/nomad/cli.pem Normal file
View File

@@ -0,0 +1,39 @@
$ANSIBLE_VAULT;1.1;AES256
36356332313236303863343636323336633232646464346636386433363936323464613831343034
3537343935636631326133393138626233646631616338610a366464303537333035366231303236
30356139353363303737323566633538303833326365633265616130393462626438313461306537
6536336437353364310a333134333339396134323937666639376562323334366436636131613435
65646362393437666233303235363838326663376332336132393364636232323939346635363830
31353430656136366565393563376538613031396437333763396138313036366164633932643532
39623965316165653636333465636161363939653839396563346261363232666537313132323764
62653963333261373132343965663539353134333634353264316532323732613361393433386561
65303237303235343939623132616264303266633936653339303066633633346466306637306330
61356138323139363537353836326437393130356165343039323130343331373263653833343466
34663838653361633836306436333263616435326132316461353931663437393466646662383038
37646139333437306534366634636566353337353337376533363630326135303832633361386664
36356536633536623463376637313437633939623434636334616534333839303261323966303761
62376430653538633834633130353762323163633036626333383434306231326665386432333030
31303561303136303532386362363431343137633336626136336362653863333237363233326638
65626163383732323534396162666539343238393938663734306634386238306638663037376433
32343466393638643764383034613130633964366563323333343831353834666263646335636365
37303633666432666637633266346438613832666438386333303535626162663336636637366263
39376338353665376166363337386133363364363165383463373231313264306634313661623638
65373831386466613239666534313236323230363331363331613366633030616636373366303762
39313330303665326462653030373839623130643833343730383135353030393237343832626432
34356136333339323334633939633666366664393433663461646139633339633761646537633238
30636135346135626161333738666331353466373861363666613332323037623139393065346362
62653261336637333837313030616564376234363637373030663262396664613731326130343538
32663730323863353933353264303361356337653965633632623461303035633030643939396230
62363234633434333330346132333533303833303231616631656165643365393833356331346430
34306334363262373333363331323536656166656638653239373130633036633630373134353964
35383731656138313961663039396134313139383835366637373234346165383538313931356264
31616435393730366561366162633434303332333734636234343063326461636264333231393634
65346338633236366237653631656561386239376261623064383535386530356664643666363230
62653864656538616236333131343631343039626335363462396437656366346132303462393530
66633362343661613462653861316337353963373037376361323163613163356532333136646363
36653531623132326666323561666431656430383735633537656133636630393330643334373462
61396334336165303031663836336264343538656134633837373635323238363136336232396361
34376136313935346363633836316366376439333164386265326561396238656339646239333064
63623263326437663739333866363165316638346130393763643936303262313133656230613638
38326531313838313037393536343139326465663064396232643036303031323436633766383763
6237346436363964366331633064383761326464376166323266

View File

@@ -0,0 +1,17 @@
$ANSIBLE_VAULT;1.1;AES256
30666633323939666138653137653533636566333239346165303637366339663633666166363662
3336356139306162653534616466343764393435633861350a333932393537353932663735383839
36343935343962626537346236626433343238346633623139303738633736653366386232303766
3934633635353361350a663165343036356565326162396164313333653733363939316661633436
34303162343964623034333161343439616261643963646436333663646537366639353666353964
66633736343965346630356438323536333232613066353737306639663562373164333530393536
65393136363764343632376561393033346166373761613230643136323534366330613363373232
65343332373538346432383964366331373262373137653632353932653633366564633263333063
30336663313032646237306639373865663462666331633363376538326666323334326563343539
63383031643461366536666330303431666437636432306234623633393666653862323964646638
63323065393330636561393464376234613330343161383835613036396461306438643961396336
39323932346431383063643334383065343934303861363564633438636631623461346661653332
34336533323738343638396431616433663632306166316337356332616466626363666363613838
37656338353163663364356134353635653637653865656466383663303131326230623635366330
65343438653236616332363935653337623762376338313663373163343163656561326536336234
37376333653931616436

View File

@@ -0,0 +1,26 @@
$ANSIBLE_VAULT;1.1;AES256
61626565613135366230633966646533626265353465316636313339643964343730303030323366
6132353835396263323762373136363233356538303434370a623237313934303563346236613635
61313063353339623437303136633962343630633261643865386237343234306232626630383561
3430613332396631610a343566613833383762303238323364326165663234366530393636376233
30616265303939306333633534646231326134653633343364336638313361626462383230323465
32666662356336613431306137373263373532383935616130613933326535343561303731316335
30663961366236356634373736353531613135366538336539383463643764356437616234653166
61333061626232663630656362643136313331373336353164663734393265336164303935363565
38656561636639333935613238373537333663633138303338623434393339636132303062386165
30346135396163353261666332383365323662303437363033333130646563666337653565346338
65306635336537333430366136326631386266316339653836646337363263346239386332666436
61393231666531613738363037346338633832616137363039333761373561636637636535303563
61616433633734656666646237376535613836336262313362393765396436343135626536333332
35336564383533663864303937356536636232653065343431393765343230323465656665373036
32623533303338396630393536663435633430343765356630306432636238323263653366396566
31343065323634623861663062343437316532346337653864616638653362663965303130343134
34323164393438623434376536393635353661353633326530313061353030336333333135376338
61613361626361623834343330316164306138623034393131656566636565373531653764653235
63383035383465393365303434346162646363366232386664306665623661653936363631393634
38653765303932363661396635353162353561316234613239366334323531353736333036646538
65333831333466383433663964623263633835636534666366383032303963343066646434376335
38383033343530353931663036366131313633643563633631663235336165353139363438353666
64616236313032373034626232396637346165343436613836393361613864333033623233393436
39353935376131366363663439646563303364626630643836323163306639633335363439363236
30303761653732353835363464613937376339623230303633363236336636313730

View File

@@ -0,0 +1,43 @@
$ANSIBLE_VAULT;1.1;AES256
64633662633464336163353665653938313265386465306438303432613934616238653839663336
6131666230663261623138343862386433613831643730370a623165636264326363663266393438
36366635313134663865396433643561306336386264663333323638356530633062343832626362
6536643333653439630a623666376561613963653437303535326433313730346134623430643033
65343934386337396339383332383530373963383531336631343332343166316237383632313332
63363432646237616464616139636533313137663330663730313032633239633866393132386663
33643162363665383266326634613132656663623831306631386233366161386438396464383936
62326333363662376533383834633534336339323063643066323265636535366339623761333239
63666262646136323235613161353162306534306534383232663532333636376363396239663232
39333138356366356437393864303232623733343165646132633865396566646431383931386133
36633261656131633538343131336132613435323533353761323438306266343834366165323831
32393532623162383539343731636238616464313561643535343031393431666465636236373864
36383234656639633137386466323364323265663334396532356662323961366438643563313065
31623432303939346465393962663164313039646134646532613461333361393636613334373736
39383861343531373939653964336163643330343032383533666533383762393864613264316139
34623632613336343530353930383564383532383838363265663532666135336538323639623637
32323436316362343536663636636365353962633835343662613264336266336439623833636264
39613238663837313536343866323165313837656362323532363064336136643435316463373736
34343734393830313664303030303565633939396666323463363935663639356264353035363862
35633039396638653931376134373564343339393639393665666566386666633261653638316666
33316266343039643138373634363661613536643866366130663031336166333866376337343835
64383962333839663161343139663130623830626166333737363336373936663432353536336562
30663836363466396239343838633861376638353131643038313762633733383163656363626662
36383533353666616530633339346461333539383732613462666166343461383232303163636163
64343838646137643835386230383230626235623965316230333634656662366231633763366666
39393930616530623662636161663336653036643265363765656130376365613363636461646164
64613364623839353739653762303966353134396639383463326138633337303337613132326134
61393131343232353963363062323134613639326265623338353030643931626664363635353734
39363237646339366330623239323066363465666235663461366465643838626363326133353137
36386339323939333838643930376336333536386635656361623533613565646162633933306266
38626638353033616535623263363765613236636439303234306666346430373462613666643631
36313932393862386337363631393965376436396630653937663264366531636530356437633763
64356262626563383038643063653537646165613734303964643633643961303535303563363933
66343866633137633235363634653665326134356633613735383437653830636336663263303437
63663230653564643137386564346232626264623537383763313936396666643464393163616230
33633333383063376331643462653363373837333830613362383532383962353432313064623233
35623838633739356665626533366430333535666366383262646336353933373230383235336261
32333234303133643630383334633134396434303561353534623134616539626165616132616331
64373133316238396330316435393832326430376238383266326330613037656433636334666637
38346562353630366637666539616362313239363261363933313534666436383765643934633565
64646534353732393230633838303332366338356137383437396439383261343432656334353933
6535303963366434396464363064616432633138636230616666

View File

@@ -0,0 +1,17 @@
$ANSIBLE_VAULT;1.1;AES256
30313163323036306164633430346437623836386164346437353434323565373864383762623932
6330626463336534333665333563313530356664623933320a393866343230626434376230643536
32663637343633303633356531326636383933643866313337633464316330393262343935373830
3032396661623133660a323336386261363036643561613832323961343162663937363230373936
61316666613933333861373631626436626366323332386236333232656262356439316430633265
33663433636139313061373764346537373137613431623262383262663231356431643534336535
32386332643164303561326262323334323961623831333535366362623038623137646465316364
65626230333737356365333232373338373563616264346266663965396266303632613136313435
36333733306565656134356465373165323837393438393465316363623133363765343537633234
35386530373664383864313430653037396133363064303866666331366235643566636264343732
37636235653065643466313438353236373663666163636235373365323533303334383637666130
62643439376639623330653265613163333934326561613333363232303061356133383234653365
32623334646661616232393061626362643238323433353936363833626532333232616261643032
34653735333938396163346464396538333061343631623861383336343465643230323433306532
63353333363861303137656165303364633166626132616236363536633136616361623432613932
64616333303538643933

View File

@@ -0,0 +1,30 @@
$ANSIBLE_VAULT;1.1;AES256
34393333306164663762313533613638653164623637303333383461653732643536303731666638
6131386531343435343935633764656163353131623562610a663337366532343232393237336232
64326430353962393163633032623131653931613431376638393239356461333963313630396164
3438656161323035340a313965663538343663653731336365326533656662346137356330373734
36393239653935373466333730336463613362383361373862633836616361333236633435306436
33326463616665663463303132666265393633393439386432303366393838643235653131383933
38633461366263653366396539646265623433396432656663616162386362623038356263653230
61363232313866323130336266366336656462636634343265316234623137336138663530366339
38316630653964633533366436316631386436633364656231313531326530346438646135396638
61316638383831646263393661663335653266303963313033633463353566646332663532633530
65316434373465623033373465323564663233383735646331633731393532663662393930386131
35316630306661663337383130313636633137346137666364653663643161646233356330373835
38366434666336653033373039373638653564663235353864313763623664356133386363343233
35343833663735363330316563656166333531636430343236626238376637386531303034323165
38303731646430356338666532643862656633643838653132336630643632633630313834306530
31343361393033326539666364363136616233656132353834383066303735366264316662623038
35316632666135326431343033333736636333373737353066383830333861663632303433353433
62636137626433313661346537333434636362343932333834643232363139636666376636373430
32396132313965366433663564643637363263363065396632313838373561346335323335343165
61366534393665373438653837386563613835383338363362353131333738663039353638346334
39346664323733363937646466303763613434323233303866346462623937323338633638643132
64623134653161336337373138363336633736656666323665306131636630613437373434326237
64643063646332363932646466303832613638383062623638656134653362633037633266633939
38376235663036646637303039386335363436663466613965353135626331636331666633326236
34303862393531303032366434613536383538346636316434373962323935656239336633623535
63613936336237643763613736353262333366363031323532353130646630353335656431366632
64343331383536393633373961623939666362386339303761333362386435633839376365356535
33353137623938643536333066343830633365363762323062343137343766636331616136303864
66376563636466396366623261323930323666333031373266306264633630373963

View File

@@ -0,0 +1,40 @@
$ANSIBLE_VAULT;1.1;AES256
35623831653130633938356633623231356465303736666531356231623234656134613135306230
6336636538396464643136303337333935623764643433390a343538306361326137356237383235
64396366623632636162323861653035653266653739663330616135643732663065313966306634
6339393438363561630a616636386663626635366161633638613161613337616638623936663337
37643761633239653436383130376131643035316135663438626631376561386461346466383636
30656262393463343733623937636336626262383130663438326138656565336430346638653638
30373937633033663834663665316563393264306133366165376132396661306466343961643731
66376237643131356633623539313832656433356233376565663763643335326137396463626539
30396438333866613130333661363031626363633838393662303865613533616263373065393762
66666662636237623963353864346539376461626162356165366133356165373438336435366631
63306435656130323061303834333532326539386564623133356230623864363936666664616530
32633330306133363364636339346461653731343361323964623733336562613730336238623537
32663237326430643861316231333762326663646534633431326165366339613465326262616438
61346637346636333832323037636630313965383633313531666333373265653231313835643731
33343865633564356134326463373438623739356330333863666262343532616164663738656266
64373538616665643830613232373034323138623036386135396561363132396432623439333233
39643066383338373266336465323930356466303637323937383532396464323939363737656634
38383039633764306666393564633430343438333636656232656464616561376639666434383065
63626537633832376536333765363439626261393765656638623566616666313838343666303765
66643432316637626539393262346131643265613030633439656362383461643830343430386336
64313435346630376438633764363961636432363435636634393365316563386439633339323064
36323835363865633862396634346334393037373136633062366530316164323533363261393939
62303530663332393134373731393062393163383230653463653933313965633366646566636462
38366634353134663439373837663434343433623531333865653038353431353161626532663264
35383265323565626136323062366636653632643336376161636337623636333035663262613438
38333161616339373763323236363538326166353139626336633766336236663732363965333032
32646564616133376662396438666364653433393739363632663138623238366366346338373565
62353535663765663335373032393332313037383732306264343538306237303033663139623033
39656565356337393339616634366339363138316162303861633033303765393536633763643835
63366262393662313166656461326138356135653763356362326261623839346263386363373166
36353233346133613961303736383836333766386634393263313335306665353762316131353435
64353630373633333366666638613364396135393130333261666230666366646461306133626333
39346336326665303333323464396565313934396361313232313738653538393535633662646135
31636438616430306230326336636433613162643334363232353938353238393037636333636134
37653164633136373735303030343236613437316533383434653036373834623237663566623632
31363239396562613839356232346665373334656266343938613635373632333165303737343164
66303532313435383831373939366264316230653162626536336231646661323731383539323535
61356261626535336661333831613635376430356662633561373765373033363737316138326465
333666633034333730333137363462326134

View File

@@ -0,0 +1,17 @@
$ANSIBLE_VAULT;1.1;AES256
36363530323232636238366533353530383364656162356131303335386263386364396533656264
3461613435353336646437346434646532373733663531650a643361386533393636376533613030
32626336366262396466636161616161393662616633633335336136653830326165346537636338
3839613763336532300a666636343239643438643465623237343236363363386138646662343335
62613965306133393530656139613230373535376665336566316435616134396263383231383936
66336638626663623663626635306237646536303437396530636662373830316334613932633832
38393264363662346234616535353732356635626133386637336234356665653563356337376333
63363930653430326165336231616366636535313161663530653238383663333039383564323064
39306330306133646466633533366562653834313438316566323833653035383430353335646261
66363864333337623631343738653138393036343330306266613735336431363337386530626433
31383962646336393538313961396531653865393566626137616435373839613133343331313935
38383564663031636561343266613863633565356239646363636363313964323139626234383134
62386162393133666633663439623735386235303933343666373666656133393331323435353464
66353136343439333561363234373666643766633438646663376238343663363136613963653162
39313039326133633536653665373165653733353037303264363337663537376162313466353261
38303033323162643939

View File

@@ -0,0 +1,26 @@
$ANSIBLE_VAULT;1.1;AES256
35623164313039613163636133336163626265636364643831313639396131636332313139396335
6664646363336636303230363532613766393334636432630a663131333037363730313366393364
31656531633964393336363866323162333630663434353535373732333465386163353835373836
3031616538393133330a353265613262633336613930646234313062326133343435663366383936
61336265353263613762306638663566346361656463626634343234626362393363616134333138
31373765336364376433333966333038373962623934353335396463633566633961353035343139
61306235636530376537386637343038336137626466356265386338306137666436353434633161
30363933656133356237363661643333333363326431376239633964623338393933343233623265
35366533313663386432666466613938653263323463373864663337333332326539343839633633
61643963643037656236373164633437396537363965313966336663326234343665373238306461
64643435363430633266333033633934343837393235363937653364333965383131653139313530
35316133623338333933326339383061316266663630656132346132633638643335623230636563
62376337303132616261326437316235316538336561623339383462353461363433643833393438
39343761393065333939303664633361633139643765663965346361336565323464653238356464
61303162336265623761323437303338633530636561346339303437346366383537323738373563
64326530313039343133323137393363316465383064303933303537383037376532373066343037
64666333306366353863303839613335663263333838326364386233383731373335333630633036
31336364393861643531336331363939616166356164663161336435616239363066626338653863
37653633316262393766393463613763316436633465356234326565316539633537666538383135
63396633396365316531613366643239633662366633613034373737323661313565386334383666
35353735306533643835353537656331373434643132333530343463303466363933383663663364
64386633643831303737316461663531633437623133313166616462333136393231383239313065
36343635303937393662623633663633613534663937393933373830346630313861373662396265
38663833393133643635343439386461396565373865303532323039303239663836616161623935
34353238653535663935663165326137663762343639343564346663326431373730

View File

@@ -0,0 +1,43 @@
$ANSIBLE_VAULT;1.1;AES256
34306465616539336630613562366536663339636564303738333835356362613532663739643464
6664623436333737653463623961613562393334613231660a316665613530353863396663373231
32306430303065326639343564383262363031373137306664653662326136333933353061373731
3036356237356534620a343434653836373733666135636435383761383333316439363337303439
66346161666633656562643433313930313332306439323535613361303333373762343930356434
30616161323162626364323662393232336661303766396332376234643638616533663534633031
66383165396163626137373731386462333436323037323032366134303664363133343537643165
32313433386332623135336561383638373666363338613061623439393165366435353963613534
39626564626466666432653933653663666330613666393330633735353931313639373537633332
35623836396265306237613365653936623337313962616131316637353162306236626632336533
66396537336439313437326462346166333535353065343037343130653131633832333264343465
31323163656237353337646631326138653362326263326537383137653132323661666631346533
33386331373039613763326366373133663230373331313632303833353061353733363838623239
64613334666266636461633762326631383565373533343166626431316365363935346334646531
33313337623434646534626435333333326533386234663834383661343766313139653262346137
64663034663333663462663863666430396266373964633231633763323139643639313637363731
63343065653461363764306132366535323432303062333263326137346532636234303566326433
62626130323561326534316463343133633362356361373965333665616265316233616538633633
33656133313434316534396432643333363963643137393836313165333965646266623564323531
61633534363465313262393566353733663862353265376563626138643234303565613739386130
33346336643861646635663330373361326265666461656338323962656532613637356535616462
33356562363262336466303563353162343632633639643237313236313831383063653731616135
63643830656432383139363461666362636632613737623436333537613034643961313262396535
33316261623963333837353839353431663361393134656130386137396362613139656563396565
39323362386333646163313565346565653738616162363563613733333038383636386364316664
39343232356434373031396630636136613331323630346437366166343432626131656562323537
66666165653836656437363265393037656266643164303362383337326130383630303362366631
32633636653564653162663033323130623336643231626665353630303031366639353765306239
62666532646635383935346135353963613435656363343063306534323339393233386532303263
35356532316337323264633631653736633731396366663237373035393861663138346537333338
37663264383135626636303163383461313037313330383332636339343661343164633833396238
30663134373431663336343537643635666265303461643435643661343333396533643763636238
32373338666461613939386630303666643461333030663432353938343835373166363332376263
36306133316436633632326362373438643061356638663964393431616165393231346362303164
66333638646136646465663232663866353833303833623765653731643464653065363663616632
65633766333264663634343965313863303337343766306365653464386662333939393835353732
66613133666533663535376337313364643938333939303339646161343162393964613431393431
30383534333165313630613663316639343031346532333933636238313636306238343131663862
36633866313530303634326261396637363031623365663030656231623939376635626265383333
39323133363338643537363265386237623065343162346538346663306334306239343864396261
32393334373439653163343832306365323763653231313631613537323664616264313964323263
3161323630326465333035363461316635326330616337333238

93
handlers/main.yml Normal file
View File

@@ -0,0 +1,93 @@
---
##################################### MOUNT DRIVES
- name: Mount shared storage on Mac
become: true
ansible.builtin.command:
cmd: automount -cv
register: automount_output
failed_when: automount_output.rc > 0
when:
- "'macs' in group_names"
- not ansible_check_mode
listen: "mac_run_automount"
- name: Mount and unmount shared storage on Mac
become: true
ansible.builtin.command:
cmd: automount -cvu
register: automount_output
failed_when: automount_output.rc > 0
when:
- "'macs' in group_names"
- not ansible_check_mode
listen: "mac_run_automount_unmount"
##################################### TELEGRAF
- name: (Re)Start telegraf (Debian)
become: true
ansible.builtin.service:
name: telegraf
state: restarted
when:
- ansible_os_family == 'Debian'
listen: restart_telegraf
- name: (Re)Start telegraf
ansible.builtin.shell:
cmd: /usr/local/bin/brew services restart telegraf
executable: /usr/local/bin/bash
ignore_errors: true
when:
- ansible_os_family == 'Darwin'
listen: restart_telegraf
##################################### NOMAD
- name: restart nomad (Debian)
become: true
ansible.builtin.systemd:
name: nomad
enabled: true
state: restarted
when:
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
listen: "restart nomad"
- name: "unload nomad agent (MacOSX)"
ansible.builtin.command:
cmd: "launchctl unload -w {{ nomad_plist_macos }}"
failed_when: false
when:
- ansible_os_family == 'Darwin'
- "'nostart' not in ansible_run_tags"
listen: "restart nomad"
- name: "load the nomad agent (MacOSX)"
ansible.builtin.command:
cmd: "launchctl load -w {{ nomad_plist_macos }}"
when:
- ansible_os_family == 'Darwin'
- "'nostart' not in ansible_run_tags"
listen: "restart nomad"
- name: "ensure nomad is really running"
ansible.builtin.shell:
cmd: "sleep 10 && /usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
register: node_status_response
failed_when: node_status_response.rc > 0
changed_when: false
when: "'nostart' not in ansible_run_tags"
listen: "restart nomad"
# - name: "Ensure sure Nomad service is really running"
# ansible.builtin.command:
# cmd: systemctl is-active nomad
# register: is_nomad_really_running
# changed_when: false
# failed_when: is_nomad_really_running.rc != 0
# when:
# - ansible_os_family == 'Debian'
# - "'nostart' not in ansible_run_tags"
# listen: "restart nomad"
##################################### CONSUL

169
inventory.yml Normal file
View File

@@ -0,0 +1,169 @@
---
all:
# Set all inventory-based vars to false. Override on specific hosts.
vars:
# Used to stagger cron jobs
cron_start_minute: "0"
# Run software which needs to run on a single device
is_cluster_leader: false
# Install and configure Consul
is_consul_client: false
# Run this server as a consul server
is_consul_server: false
# Install Docker compose and sync compose files
is_docker_compose_client: false
# Install and configure Nomad
is_nomad_client: false
# Run this server as a Nomad server
is_nomad_server: false
# Install Prometheus on this server
is_prometheus_node: false
# Install Telegraf on this server
is_telegraf_client: false
# Run this node as the Tdarr server
is_tdarr_server: false
# Run Tdarr client on this server
is_tdarr_node: false
# Mount NFS shared storage
is_shared_storage_client: false
# Manage apt-packages
manage_apt_packages_list: false
# Manage Homebrew (MacOS) packages
manage_homebrew_package_list: false
# If true, will always delete dir before syncing new jobs. (run '--tags clean' )
clean_nomad_jobs: false
# Mac computer with an Arm chip
mac_arm: false
# Mac computer with an Intel chip
mac_intel: false
children:
lan:
children:
pis:
hosts:
rpi1:
ansible_host: 10.0.30.91
ansible_user: "{{ pi_username }}"
ansible_become_pass: "{{ pi_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi1"
ansible_port: 22
cron_start_minute: "0"
is_consul_server: true
is_consul_client: true
is_nomad_server: true
is_nomad_client: true
is_cluster_leader: true
is_prometheus_node: true
is_telegraf_client: true
manage_apt_packages_list: true
rpi2:
ansible_host: 10.0.30.92
ansible_user: "{{ pi_username }}"
ansible_become_pass: "{{ pi_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi2"
ansible_port: 22
cron_start_minute: "10"
is_consul_server: true
is_consul_client: true
is_nomad_server: true
is_nomad_client: true
is_telegraf_client: true
manage_apt_packages_list: true
rpi3:
ansible_host: 10.0.30.93
ansible_user: "{{ pi_username }}"
ansible_become_pass: "{{ pi_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi3"
ansible_port: 22
cron_start_minute: "20"
is_consul_server: true
is_consul_client: true
is_nomad_server: true
is_nomad_client: true
is_telegraf_client: true
manage_apt_packages_list: true
rpi4:
ansible_host: 10.0.30.94
ansible_user: "{{ pi_username }}"
ansible_become_pass: "{{ pi_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/rpi4"
ansible_port: 22
cron_start_minute: "30"
is_consul_server: false
is_consul_client: true
is_nomad_server: false
is_nomad_client: true
is_telegraf_client: true
manage_apt_packages_list: true
macs:
hosts:
macmini:
ansible_host: 10.0.0.4
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ mac_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/macMini"
ansible_python_interpreter: "/usr/local/bin/python3"
ansible_port: 22
mac_intel: true
is_nomad_client: true
is_consul_client: true
is_telegraf_client: true
is_tdarr_server: true
is_tdarr_node: true
manage_homebrew_package_list: true
imac:
ansible_host: 10.0.0.25
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ mac_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/imac"
ansible_python_interpreter: "/usr/local/bin/python3"
ansible_port: 22
mac_intel: true
manage_homebrew_package_list: true
is_tdarr_node: true
is_shared_storage_client: true
skimmbook:
ansible_host: 10.0.0.21
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ mac_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/skimmbook"
ansible_python_interpreter: "/opt/homebrew/bin/python3"
ansible_port: 22
mac_arm: true
manage_homebrew_package_list: true
is_tdarr_node: true
is_shared_storage_client: true
vpnmac:
ansible_host: 10.0.90.2
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ mac_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/skimmbook"
ansible_python_interpreter: "/opt/homebrew/bin/python3"
ansible_port: 22
mac_arm: true
manage_homebrew_package_list: true
is_tdarr_node: true
nas:
hosts:
synology:
ansible_host: 10.0.0.6
synology_second_ip: 10.0.30.6
ansible_user: "{{ my_username }}"
ansible_become_pass: "{{ synology_become_pass }}"
ansible_ssh_private_key_file: "{{ ssh_key_location }}/synology"
ansible_port: 22
ansible_python_interpreter: /usr/local/bin/python3
is_consul_client: true
is_telegraf_client: true
is_docker_compose_client: true
# linode:
# children:
# linode-cluster:
# hosts:
# testbox:
# ansible_host:
# linode_private_ip:
# ansible_user:
# ansible_become_pass:
# ansible_ssh_private_key_file: "{{ ssh_key_location }}/linode"
# ansible_port:

78
main.yml Normal file
View File

@@ -0,0 +1,78 @@
---
- hosts: all
serial: 1
vars_files:
- default_variables.yml
- vault.yml
pre_tasks:
- name: Run sanity checks
import_tasks: tasks/sanity.yml
tags: ["always", "sanity"]
- name: populate service facts
service_facts:
tags: ["nomad", "consul"]
- name: Run debug tasks
import_tasks: tasks/debug.yml
tags: [never, debug]
- name: populate device specific variables
import_tasks: tasks/interpolated_variables.yml
tags: ["always"]
- name: Ensure we have up-to-date packages
import_tasks: tasks/packages.yml
tags: ["packages", "update"]
- name: Set clean nomad_jobs_dir variable
ansible.builtin.set_fact:
clean_nomad_jobs: true
tags: ["never", "clean"]
tasks:
- name: Configure cluster NFS mounts
import_tasks: tasks/cluster_storage.yml
tags: ["storage"]
when:
- is_nomad_client or is_nomad_server or is_shared_storage_client
- name: Install Docker
import_tasks: tasks/docker.yml
tags: ["docker"]
when: "'nas' not in group_names"
- name: Install and Upgrade Consul
import_tasks: tasks/consul.yml
tags: ["consul"]
when: is_consul_client or is_consul_server
- name: Install and Upgrade Nomad
import_tasks: tasks/nomad.yml
tags: ["nomad"]
when: is_nomad_client or is_nomad_server
- name: Orchestration Jobs
import_tasks: tasks/orchestration_jobs.yml
tags: ["jobs", "update"]
- name: Prometheus Node Exporter
import_tasks: tasks/service_prometheus_nodeExporter.yml
tags: ["prometheus_exporter"]
when:
- is_prometheus_node
- "'pis' in group_names"
- name: Install backup scripts
import_tasks: tasks/backups.yml
tags: ["backup", "backups"]
when: is_nomad_client or is_nomad_server
- name: Install and configure Telegraf
import_tasks: tasks/telegraf.yml
tags: ["telegraf"]
when: is_telegraf_client
- name: Pull repositories
import_tasks: tasks/pull_repositories.yml
tags: ["never", "update", "repos"]
- name: Configure log rotate
import_tasks: tasks/logrotate.yml
tags: ["logrotate"]
when: is_cluster_leader
- name: Install and configure tdarr
import_tasks: tasks/tdarr.yml
tags: ["tdarr"]
when: is_tdarr_server or is_tdarr_node
handlers:
- import_tasks: handlers/main.yml

3
requirements.yml Normal file
View File

@@ -0,0 +1,3 @@
---
roles:
- name: arillso.logrotate

47
tasks/backups.yml Normal file
View File

@@ -0,0 +1,47 @@
---
# TASK DESCRIPTION:
# Nomad jobs which can not run with NFS storage use pre-start and post-stop tasks to invoke
# shell scripts which keep the job's filesystem in sync. This task does the following:
#
# 1. Copies a backup and restore shellscript to /usr/local/bin
# 2. Edits the sudoers file to allow the script to be invoked with sudo privileges
- name: copy backup shellscript to server
become: true
ansible.builtin.template:
src: scripts/service_backups.sh.j2
dest: /usr/local/bin/service_backups
mode: 0755
when:
- is_nomad_client or is_nomad_server
- name: copy restore shellscript to server
become: true
ansible.builtin.template:
src: scripts/service_restore.sh.j2
dest: /usr/local/bin/service_restore
mode: 0755
when:
- is_nomad_client or is_nomad_server
- name: ensure nomad user can run sudo with the restore script
become: true
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "nomad ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
validate: "/usr/sbin/visudo -cf %s"
when:
- is_nomad_client or is_nomad_server
- "'pis' in group_names"
- name: ensure my user can run sudo with the restore script
become: true
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "{{ ansible_user }} ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
validate: "/usr/sbin/visudo -cf %s"
when:
- is_nomad_client or is_nomad_server
- "'pis' in group_names"

163
tasks/cluster_storage.yml Normal file
View File

@@ -0,0 +1,163 @@
---
# TASK DESCRIPTION:
# Adds mount points to shared NFS storage to servers working in the homelab cluster.
# --------------------------------- Mount on Raspberry Pis
- name: "Mount storage on Raspberry Pis"
when: "'pis' in group_names"
block:
- name: ensure local mount points exist
become: true
ansible.builtin.file:
path: "{{ item.local }}"
state: directory
mode: 0777
# owner: "{{ ansible_user_uid }}"
# group: "{{ ansible_user_gid }}"
loop: "{{ rpi_nfs_mounts_list }}"
- name: remove old nfs drives
become: true
ansible.posix.mount:
path: "{{ item.local }}"
src: "{{ item.src }}"
fstype: nfs
opts: defaults,hard,intr,timeo=14
state: absent
loop: "{{ rpi_nfs_mounts_remove }}"
- name: mount all nfs drives
become: true
ansible.posix.mount:
path: "{{ item.local }}"
src: "{{ item.src }}"
fstype: nfs
opts: defaults,hard,intr,timeo=14
state: mounted
loop: "{{ rpi_nfs_mounts_list }}"
# --------------------------------- Mount on Macs
# https://gist.github.com/l422y/8697518
- name: "Mount storage on Macs"
when: "'macs' in group_names"
block:
- name: create mount_point
become: true
ansible.builtin.file:
path: "{{ mac_storage_mount_point }}"
state: directory
mode: 0755
# I ran into problems getting this to run successfully. If errors occur, add the line manually using:
# $ sudo nano /private/etc/auto_master
- name: add NFS shared drives to macs
when: mac_autofs_type == 'nfs'
block:
- name: add auto_nfs to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_nfs"
line: "/- auto_nfs -nobrowse,nosuid"
unsafe_writes: true
- name: add mounts to /etc/auto_nfs
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_nfs
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_nfs_mounts_list if mac_nfs_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: remove old mounts from /etc/auto_nfs
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_nfs
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=nfs,bg,intr,noowners,rw,vers=4 nfs://{{ item.src }}"
state: absent
unsafe_writes: true
mode: 0644
notify: mac_run_automount_unmount
loop: "{{ mac_nfs_mounts_remove if mac_nfs_mounts_remove is iterable else [] }}"
- name: add AFP shared drives to macs
when: mac_autofs_type == 'afp'
block:
- name: add auto_afp to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_afp"
line: "/- auto_afp -nobrowse,nosuid"
unsafe_writes: true
- name: add mounts to /etc/auto_afp
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_afp
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: remove mounts from /etc/auto_afp
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_afp
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
notify: mac_run_automount_unmount
- name: add SMB shared drives to macs
when: mac_autofs_type == 'smb'
block:
- name: add auto_smb to "/private/etc/auto_master"
become: true
ansible.builtin.lineinfile:
path: /private/etc/auto_master
regexp: "auto_smb"
line: "/- auto_smb -noowners,nosuid"
unsafe_writes: true
- name: add mounts to /etc/auto_smb
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_smb
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=smbfs,soft,noowners,nosuid,rw ://{{ smb_username }}:{{ smb_password }}@{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_list if mac_afp_or_smb_mounts_list is iterable else [] }}"
notify: mac_run_automount
- name: remove mounts from /etc/auto_smb
become: true
ansible.builtin.lineinfile:
create: true
path: /private/etc/auto_smb
regexp: "{{ item.src }}"
line: "{{ item.local }} -fstype=afp,rw afp://{{ item.src }}"
state: present
unsafe_writes: true
mode: 0644
loop: "{{ mac_afp_or_smb_mounts_remove if mac_afp_or_smb_mounts_remove is iterable else [] }}"
notify: mac_run_automount_unmount

327
tasks/consul.yml Normal file
View File

@@ -0,0 +1,327 @@
---
# TASK DESCRIPTION:
# Downloads, installs, and configures Hashicorp Consul.
- name: Set variables needed to install consul
block:
- name: "set variable: check if we have a mounted USB drive (Debian)"
ansible.builtin.stat:
path: "{{ rpi_usb_drive_mount_point }}"
register: have_usb_drive
changed_when: false
when:
- ansible_os_family == 'Debian'
- name: "set variable: Use USB drive for consul /opt (Debian)"
ansible.builtin.set_fact:
consul_opt_dir: "{{ rpi_usb_drive_mount_point }}/opt/consul"
when:
- ansible_os_family == 'Debian'
- have_usb_drive.stat.exists
- name: "set variable: Use root disk for consul /opt (Debian)"
ansible.builtin.set_fact:
consul_opt_dir: "/opt/consul"
when:
- ansible_os_family == 'Debian'
- not have_usb_drive.stat.exists
- name: "set variable: Use ~/library for /opt files (macOSX)"
ansible.builtin.set_fact:
consul_opt_dir: "/Users/{{ ansible_user }}/Library/consul"
when:
- mac_intel or mac_arm
- name: "set variable: Use ~/volume1/docker/consul/data for /opt files (synology)"
ansible.builtin.set_fact:
consul_opt_dir: "/volume1/docker/consul/data"
when:
- inventory_hostname == 'synology'
- name: "set variable: Set Consul download Binary (armv7l)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "set variable: Set Consul download Binary (MacOSX)"
ansible.builtin.set_fact:
consul_download_uri: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_darwin_amd64.zip"
when: mac_intel
- name: Assert that we can install Consul
ansible.builtin.assert:
that:
- consul_download_uri is defined
- consul_opt_dir is defined
fail_msg: "Unable to install consul on this host"
when: inventory_hostname != 'synology'
- name: "Stop Consul"
block:
- name: "stop consul systemd service (Debian)"
become: true
ansible.builtin.systemd:
name: consul
state: stopped
when:
- ansible_os_family == 'Debian'
- ansible_facts.services["consul.service"] is defined
- name: "check if plist file exists (MacOSX)"
ansible.builtin.stat:
path: "{{ consul_plist_macos }}"
register: consul_file
when:
- ansible_os_family == 'Darwin'
- name: "unload consul agent (MacOSX)"
become: true
ansible.builtin.command:
cmd: "launchctl unload {{ consul_plist_macos }}"
when:
- ansible_os_family == 'Darwin'
- consul_file.stat.exists
- name: "Create 'consul' user and group"
when:
- ansible_os_family == 'Debian'
block:
- name: "Ensure group 'consul' exists (Debian)"
become: true
ansible.builtin.group:
name: consul
state: present
- name: "Add the user 'consul' with group 'consul' (Debian)"
become: true
ansible.builtin.user:
name: consul
group: consul
- name: "Create Consul /opt storage and copy certificates"
block:
- name: "create {{ consul_opt_dir }} directories"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
recurse: true
mode: 0755
loop:
- "{{ consul_opt_dir }}"
- "{{ consul_opt_dir }}/logs"
- "{{ consul_opt_dir }}/plugins"
- "{{ consul_opt_dir }}/certs"
- name: Copy certs to servers
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: "certs/consul/consul-agent-ca.pem", dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem" }
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0.pem" }
- { src: "certs/consul/{{ datacenter_name }}-server-consul-0-key.pem", dest: "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0-key.pem" }
when:
- is_consul_server
- name: Copy certs to clients
become: true
ansible.builtin.copy:
src: certs/consul/consul-agent-ca.pem
dest: "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
mode: 0755
when:
- is_consul_client
- not is_consul_server
- name: "set owner of files to consul:consul (debian)"
become: true
ansible.builtin.file:
path: "{{ consul_opt_dir }}"
owner: consul
group: consul
recurse: true
when:
- ansible_os_family == 'Debian'
- name: "set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }} (MacOSX)"
become: true
ansible.builtin.file:
path: "{{ consul_opt_dir }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- ansible_os_family != 'Debian'
- name: "Template out Consul configuration file"
block:
- name: "create {{ interpolated_consul_configuration_dir }}"
become: true
ansible.builtin.file:
path: "{{ interpolated_consul_configuration_dir }}"
state: directory
mode: 0755
- name: copy consul base config file
become: true
ansible.builtin.template:
src: consul.hcl.j2
dest: "{{ interpolated_consul_configuration_dir }}/consul.hcl"
mode: 0644
- name: "set owner of files to consul:consul (Debian)"
become: true
ansible.builtin.file:
path: "{{ interpolated_consul_configuration_dir }}"
owner: consul
group: consul
recurse: true
when:
- ansible_os_family == 'Debian'
- name: "Install Consul binary"
block:
- name: "set fact: need install consul?"
set_fact:
need_consul_install: false
when:
- consul_download_uri is defined
- name: Check if Consul is installed
ansible.builtin.stat:
path: /usr/local/bin/consul
register: consul_binary_file_location
when:
- consul_download_uri is defined
- name: "set fact: need consul install?"
set_fact:
need_consul_install: true
when:
- consul_download_uri is defined
- not consul_binary_file_location.stat.exists
- name: Check current version of Consul
ansible.builtin.command:
cmd: /usr/local/bin/consul --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
changed_when: false
register: installed_consul_version
check_mode: false
when:
- consul_download_uri is defined
- not need_consul_install
- name: "set fact: need consul install?"
set_fact:
need_consul_install: true
when:
- consul_download_uri is defined
- not need_consul_install
- installed_consul_version.stdout != consul_version
- name: install Consul
become: true
ansible.builtin.unarchive:
src: "{{ consul_download_uri }}"
dest: /usr/local/bin
remote_src: true
when:
- consul_download_uri is defined
- need_consul_install
- name: "validate consul config"
ansible.builtin.command:
cmd: "/usr/local/bin/consul validate {{ interpolated_consul_configuration_dir }}"
register: consul_config_valid
changed_when: false
failed_when: consul_config_valid.rc != 0
when:
- inventory_hostname != 'synology'
- name: "Copy system.d or launchctl service files"
block:
- name: ensure /Library/LaunchAgents exists (MacOSX)
ansible.builtin.file:
path: "{{ consul_plist_macos | dirname }}"
state: directory
mode: 0755
when:
- ansible_os_family == 'Darwin'
- name: create Consul launchd service (MacOSX)
ansible.builtin.template:
src: consul.launchd.j2
dest: "{{ consul_plist_macos }}"
mode: 0644
when:
- ansible_os_family == 'Darwin'
- name: create Consul service (Debian)
become: true
ansible.builtin.template:
src: consul.service.j2
dest: /etc/systemd/system/consul.service
mode: 0644
when:
- ansible_os_family == 'Debian'
- name: "Start Consul"
block:
- name: load the Consul agent (MacOSX)
ansible.builtin.command:
cmd: "launchctl load -w {{ consul_plist_macos }}"
when:
- ansible_os_family == 'Darwin'
- "'nostart' not in ansible_run_tags"
- name: start Consul (Debian)
become: true
ansible.builtin.systemd:
name: consul
enabled: true
state: started
when:
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
- name: make sure Consul service is really running
ansible.builtin.command:
cmd: systemctl is-active consul
register: is_consul_really_running
changed_when: false
failed_when: is_consul_really_running.rc != 0
when:
- ansible_os_family == 'Debian'
- "'nostart' not in ansible_run_tags"
- name: "Copy Consul service checks to synology"
when:
- inventory_hostname == 'synology'
block:
- name: copy config file
ansible.builtin.template:
src: consul_services/consul_synology_checks.json.j2
dest: "{{ interpolated_consul_configuration_dir }}/service_checks.json"
mode: 0644
- name: Reload configuration file
ansible.builtin.uri:
url: "http://{{ synology_second_ip }}:8500/v1/agent/reload"
method: PUT
status_code: 200
ignore_errors: true
check_mode: false
register: consul_agent_reload_http_response
failed_when: consul_agent_reload_http_response.status != 200
- name: debug when consul agent reload fails
ansible.builtin.debug:
var: consul_agent_reload_http_response.msg
check_mode: false
when: consul_agent_reload_http_response.status != 200

37
tasks/debug.yml Normal file
View File

@@ -0,0 +1,37 @@
---
# - name: architecture
# debug:
# var: ansible_facts['architecture']
# - name: distribution
# debug:
# var: ansible_facts['distribution']
# - name: distribution_file_variety
# debug:
# var: ansible_facts['distribution_file_variety']
# - name: service_mgr
# debug:
# var: ansible_facts['service_mgr']
# - name: os_family
# debug:
# var: ansible_facts['os_family']
# - debug:
# msg: "{{ ansible_os_family }}"
# - debug:
# msg: "pass: {{ ansible_become_pass }}"
# - debug:
# var: ansible_facts['nodename']
# - debug:
# var: ansible_facts['system_vendor']
# when:
# - ansible_facts['system_vendor'] is search("Synology")
- name: "end play"
ansible.builtin.meta: end_play

88
tasks/docker.yml Normal file
View File

@@ -0,0 +1,88 @@
---
# TASK DESCRIPTION:
# Installs Docker on specified server
- name: Check if Docker is already present
ansible.builtin.command:
cmd: docker --version
register: docker_command_result
changed_when: docker_command_result.rc == 1
failed_when: false
- name: install docker on Debian
when: ansible_os_family == 'Debian'
block:
- name: "Add docker local filesystem storage directory"
ansible.builtin.file:
path: "{{ rpi_localfs_storage }}"
mode: 0755
state: directory
- name: Download Docker install convenience script
ansible.builtin.get_url:
url: "https://get.docker.com/"
dest: /tmp/get-docker.sh
mode: 0775
when: docker_command_result.rc == 1
- name: Run Docker install convenience script
ansible.builtin.command: /tmp/get-docker.sh
environment:
CHANNEL: stable
when: docker_command_result.rc == 1
- name: Make sure Docker CE is the version specified
ansible.builtin.apt:
name: "docker-ce"
state: present
when: docker_command_result.rc == 1
- name: Ensure Docker is started
ansible.builtin.service:
name: docker
state: started
enabled: true
- name: Ensure docker users are added to the docker group
become: true
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: docker
append: true
when: docker_command_result.rc == 1
- name: install docker on macOS
when: "'macs' in group_names"
block:
- name: "Add docker directory to ~/Library"
ansible.builtin.file:
path: "{{ mac_localfs_storage }}"
mode: 0755
state: directory
- name: install base homebrew packages
community.general.homebrew:
name: docker
state: present
update_homebrew: false
upgrade_all: false
when: docker_command_result.rc == 1
- name: open docker application
ansible.builtin.command:
cmd: open /Applications/Docker.app
when: docker_command_result.rc == 1
- name: Must install Docker manually
ansible.builtin.debug:
msg: |
Docker must be installed manually on MacOS. Log in to mac to install then rerun playbook
Be certain to configure the following:
- run on login
- add '{{ mac_storage_mount_point }}' to mountable file system directories
when: docker_command_result.rc == 1
- name: end play
ansible.builtin.meta: end_play
when: docker_command_result.rc == 1

View File

@@ -0,0 +1,53 @@
---
# TASK DESCRIPTION:
# Creates variables based on other variables and Ansible facts
#
# Variables created:
# - interpolated_localfs_service_storage: [dir]
# - interpolated_consul_configuration_dir: [dir]
- name: "Set local filesystem location (pis)"
ansible.builtin.set_fact:
interpolated_localfs_service_storage: "{{ rpi_localfs_service_storage }}"
changed_when: false
when:
- "'pis' in group_names"
- name: "Set local filesystem location (macs)"
ansible.builtin.set_fact:
interpolated_localfs_service_storage: "{{ mac_localfs_service_storage }}"
changed_when: false
when:
- "'macs' in group_names"
- name: "Set NFS mount location (pis)"
ansible.builtin.set_fact:
interpolated_nfs_service_storage: "{{ rpi_nfs_mount_point }}"
changed_when: false
when:
- "'pis' in group_names"
- name: "Set NFS mount location location (macs)"
ansible.builtin.set_fact:
interpolated_nfs_service_storage: "{{ mac_storage_mount_point }}"
changed_when: false
when:
- "'macs' in group_names"
- name: "set consul configuration directory (synology)"
ansible.builtin.set_fact:
interpolated_consul_configuration_dir: "{{ synology_consul_configuration_dir }}"
when:
- inventory_hostname == 'synology'
- name: "set consul configuration directory (pis)"
ansible.builtin.set_fact:
interpolated_consul_configuration_dir: "{{ rpi_consul_configuration_dir }}"
when:
- "'pis' in group_names"
- name: "set consul configuration directory (macs)"
ansible.builtin.set_fact:
interpolated_consul_configuration_dir: "{{ mac_consul_configuration_dir }}"
when:
- "'macs' in group_names"

32
tasks/logrotate.yml Normal file
View File

@@ -0,0 +1,32 @@
---
# TASK DESCRIPTION:
# Insalls logrotate and associated rotation jobs
#
# NOTE: This task exists due to the arillso.logrotate failing completely on macOS
- name: add service_backups.log to logrotate
become: true
vars:
logrotate_applications:
- name: service_backups
definitions:
- logs:
- "{{ rpi_nfs_mount_point }}/pi-cluster/logs/service_backups.log"
options:
- rotate 1
- size 100k
- missingok
- notifempty
- su root root
- extension .log
- compress
- nodateext
- nocreate
- delaycompress
import_role:
name: arillso.logrotate
failed_when: false
ignore_errors: true
when:
- "'macs' not in group_names"
- is_cluster_leader

242
tasks/nomad.yml Normal file
View File

@@ -0,0 +1,242 @@
---
# TASK DESCRIPTION:
# Downloads, installs, and configures Hashicorp Nomad.
- name: "Set variables needed to install Nomad"
block:
- name: "set variable: check if we have a mounted USB drive (Debian)"
ansible.builtin.stat:
path: "{{ rpi_usb_drive_mount_point }}"
register: have_usb_drive
changed_when: false
when:
- ansible_os_family == 'Debian'
- name: "set variable: Use USB drive for nomad /opt (Debian)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "{{ rpi_usb_drive_mount_point }}/opt/nomad"
when:
- ansible_os_family == 'Debian'
- have_usb_drive.stat.exists
- name: "set variable: Use root dist for nomad /opt (Debian)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "/opt/nomad"
when:
- ansible_os_family == 'Debian'
- not have_usb_drive.stat.exists
- name: "set variable: Use ~/library for /opt files (macOSX)"
ansible.builtin.set_fact:
nomad_opt_dir_location: "/Users/{{ ansible_user }}/Library/nomad"
when:
- ansible_os_family == 'Darwin'
- name: "set variable: Set Nomad download Binary (armv7l)"
ansible.builtin.set_fact:
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "set variable: Set Nomad download Binary (MacOSX)"
ansible.builtin.set_fact:
nomad_download_file_uri: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_darwin_amd64.zip"
when:
- mac_intel
- name: Assert that we can install Nomad
ansible.builtin.assert:
that:
- nomad_download_file_uri is defined
- nomad_opt_dir_location is defined
fail_msg: "Unable to install Nomad on this host"
- name: "Create Nomad user and group (Debian)"
when: ansible_os_family == 'Debian'
block:
- name: "Ensure group 'nomad' exists (Debian)"
become: true
ansible.builtin.group:
name: nomad
state: present
- name: "Add the user 'nomad' with group 'nomad' (Debian)"
become: true
ansible.builtin.user:
name: nomad
group: nomad
- name: "Add user 'nomad' to docker and sudo groups (Debian)"
become: true
ansible.builtin.user:
user: nomad
groups: docker,sudo
append: true
- name: "Create Nomad /opt storage"
block:
- name: "create {{ nomad_opt_dir_location }} directories"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
recurse: true
mode: 0755
loop:
- "{{ nomad_opt_dir_location }}/logs"
- "{{ nomad_opt_dir_location }}/plugins"
- "{{ nomad_opt_dir_location }}/certs"
- name: Copy server certs
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
- { src: certs/nomad/server.pem, dest: "{{ nomad_opt_dir_location }}/certs/server.pem" }
- { src: certs/nomad/server-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/server-key.pem" }
notify: "restart nomad"
when: is_nomad_server
- name: Copy client certs
become: true
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0755
loop:
- { src: certs/nomad/nomad-ca.pem, dest: "{{ nomad_opt_dir_location }}/certs/nomad-ca.pem" }
- { src: certs/nomad/client.pem, dest: "{{ nomad_opt_dir_location }}/certs/client.pem" }
- { src: certs/nomad/client-key.pem, dest: "{{ nomad_opt_dir_location }}/certs/client-key.pem" }
notify: "restart nomad"
when: is_nomad_client
- name: "set owner of files to nomad:nomad (debian)"
become: true
ansible.builtin.file:
path: "{{ nomad_opt_dir_location }}"
owner: nomad
group: nomad
recurse: true
when: ansible_os_family == 'Debian'
- name: "set owner of files to {{ ansible_user_uid }}:{{ ansible_user_gid }} (MacOSX)"
become: true
ansible.builtin.file:
path: "{{ nomad_opt_dir_location }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when: ansible_os_family != 'Debian'
- name: "Template out the configuration file"
block:
- name: "create {{ nomad_configuration_dir }}"
become: true
ansible.builtin.file:
path: "{{ nomad_configuration_dir }}"
state: directory
mode: 0755
- name: copy base config file
become: true
ansible.builtin.template:
src: nomad.hcl.j2
dest: "{{ nomad_configuration_dir }}/nomad.hcl"
mode: 0644
notify: "restart nomad"
- name: "set owner of files to nomad:nomad (Debian)"
become: true
ansible.builtin.file:
path: "{{ nomad_configuration_dir }}"
owner: nomad
group: nomad
recurse: true
when:
- ansible_os_family == 'Debian'
- name: Install or Update Nomad
block:
- name: "set fact: do we need a nomad install?"
set_fact:
need_nomad_install: false
- name: Check if nomad is installed
ansible.builtin.stat:
path: /usr/local/bin/nomad
register: nomad_binary_file_location
- name: "set fact: do we need a nomad install"
set_fact:
need_nomad_install: true
when:
- not nomad_binary_file_location.stat.exists
- name: Check current version of Nomad
ansible.builtin.shell: /usr/local/bin/nomad --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
register: current_nomad_version
check_mode: false
changed_when: false
when:
- not need_nomad_install
- name: "set fact: do we need a nomad install"
set_fact:
need_nomad_install: true
when:
- not need_nomad_install
- current_nomad_version.stdout != nomad_version
- name: install Nomad
become: true
unarchive:
src: "{{ nomad_download_file_uri }}"
dest: /usr/local/bin
remote_src: true
notify: "restart nomad"
when:
- need_nomad_install
- name: "Copy system.d or launchctrl service files"
block:
- name: ensure /Library/LaunchAgents exists (MacOSX)
ansible.builtin.file:
path: "{{ nomad_plist_macos | dirname }}"
state: directory
mode: 0755
when:
- ansible_os_family == 'Darwin'
- name: create nomad launchd service (MacOSX)
ansible.builtin.template:
src: nomad.launchd.j2
dest: "{{ nomad_plist_macos }}"
mode: 0644
notify: "restart nomad"
when:
- ansible_os_family == 'Darwin'
- name: create nomad service (Debian)
become: true
ansible.builtin.template:
src: nomad.service.j2
dest: /etc/systemd/system/nomad.service
mode: 0644
notify: "restart nomad"
when:
- ansible_os_family == 'Debian'
- name: "start nomad, if stopped"
ansible.builtin.shell:
cmd: "/usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
register: node_status_response
ignore_errors: true
failed_when: false
changed_when: node_status_response.rc > 0
notify: "restart nomad"
when: "'nostart' not in ansible_run_tags"

View File

@@ -0,0 +1,76 @@
---
# TASK DESCRIPTION:
# Keeps Nomad jobs and docker-compose-files in sync. All job and docker-compose files are written as Jinja2 templates. Performs the following:
#
# - Syncs Nomad jobs
# - Syncs docker-compose files
# - Ensures we have directories on the local filesystem for jobs which can't function with networked
# storage. (I'm looking at you, 'arr' apps). These folders must be created, even if empty, to
# allow mounting nomad local storage end-points
- name: "Sync Nomad Jobs"
block:
- name: Remove nomad jobs directory
ansible.builtin.file:
path: "{{ nomad_jobfile_location }}"
state: absent
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- clean_nomad_jobs
- name: (Re)Create nomad jobs directory
ansible.builtin.file:
path: "{{ nomad_jobfile_location }}"
state: directory
mode: 0755
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: synchronize nomad job templates (jinja)
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ nomad_jobfile_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
mode: 0644
with_fileglob: "templates/nomad_jobs/*.j2"
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: synchronize nomad job templates (hcl)
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ nomad_jobfile_location }}/{{ item | basename }}"
mode: 0644
with_fileglob: "templates/nomad_jobs/*.hcl"
when:
- is_nomad_client or is_nomad_server or ("'macs' in group_names")
- "'nas' not in group_names"
- name: Ensure we have local storage folders
become: true
ansible.builtin.file:
path: "{{ interpolated_localfs_service_storage }}/{{ item }}"
state: directory
mode: 0777
group: "{{ ansible_user_gid }}"
owner: "{{ ansible_user_uid }}"
when:
- is_nomad_client or is_nomad_server
loop: "{{ service_localfs_dirs }}"
- name: "Sync docker compose files"
when: is_docker_compose_client
block:
- name: confirm compose file dir exists
ansible.builtin.file:
path: "{{ docker_compose_file_location }}"
state: directory
mode: 0755
- name: synchronize docker-compose files
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ docker_compose_file_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
mode: 0644
with_fileglob: "../templates/docker_compose_files/*.j2"

67
tasks/packages.yml Normal file
View File

@@ -0,0 +1,67 @@
---
# TASK DESCRIPTION:
# Ensures all packages are installed and updated. apt on Debian. Homebrew on Mac.
- name: "Update and install APT packages"
when:
- ansible_os_family != 'Darwin'
- manage_apt_packages_list
block:
- name: update APT package cache
become: true
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
- name: "upgrade APT to the latest packages (this may take a while)"
become: true
ansible.builtin.apt:
upgrade: safe
- name: "install/upgrade APT packages (this may take a while)"
become: true
ansible.builtin.apt:
pkg: "{{ item }}"
state: present
loop: "{{ apt_packages_list }}"
register: apt_output
- name: "Update and install Homebrew packages"
when:
- manage_homebrew_package_list
- ansible_os_family == 'Darwin'
block:
- name: upgrade homebrew and all packages
community.general.homebrew:
update_homebrew: true
upgrade_all: true
register: homebrew_output
ignore_errors: true
- name: install base homebrew packages
community.general.homebrew:
name: "{{ homebrew_package_list | join(',') }}"
state: present
update_homebrew: false
upgrade_all: false
register: homebrew_output
- name: homebrew packages updated or installed
ansible.builtin.debug:
msg: "{{ homebrew_output.changed_pkgs }}"
- name: unchanged homebrew packages
ansible.builtin.debug:
msg: "{{ homebrew_output.unchanged_pkgs }}"
- name: install homebrew casks
community.general.homebrew_cask:
name: "{{ item }}"
state: present
install_options: "appdir=/Applications"
accept_external_apps: true
upgrade_all: false
update_homebrew: false
greedy: false
loop: "{{ homebrew_casks_list }}"
ignore_errors: true

View File

@@ -0,0 +1,40 @@
---
# TASK DESCRIPTION:
# Runs a git pull against all repositories in ~/repos by running a shellscript named 'pull_all_repos'.
# NOTE: This shellscript is not part of this repository.
- name: "Check if pull_all_repos exists"
ansible.builtin.stat:
path: "~/bin/pull_all_repos"
check_mode: false
register: pull_script_check
- name: "Check if ~/repos exists"
ansible.builtin.stat:
path: "~/repos"
check_mode: false
register: repos_directory_check
- name: "run pull_all_repos script"
ansible.builtin.command:
cmd: "~/bin/pull_all_repos --directory ~/repos"
register: pull_script_output
when:
- not ansible_check_mode
- pull_script_check.stat.exists
- pull_script_check.stat.executable
- repos_directory_check.stat.isdir is defined
- repos_directory_check.stat.isdir
- repos_directory_check.stat.writeable
failed_when: pull_script_output.rc > 1
- name: "Output from pull_all_repos"
debug:
msg: "{{ pull_script_output.stdout }}"
when:
- not ansible_check_mode
- pull_script_check.stat.exists
- pull_script_check.stat.executable
- repos_directory_check.stat.isdir is defined
- repos_directory_check.stat.isdir
- repos_directory_check.stat.writeable

12
tasks/sanity.yml Normal file
View File

@@ -0,0 +1,12 @@
---
# TASK DESCRIPTION:
# Always runs fist. Confirms we can actually use Ansible
- name: sanity - user mode
become: false
ansible.builtin.debug:
msg: "sanity check: user mode"
- name: sanity - become mode
become: true
ansible.builtin.debug:
msg: "sanity check: become mode"

View File

@@ -0,0 +1,93 @@
---
# TASK DESCRIPTION:
# Downloads, installs, and configures Prometheus Node Exporter.
#
# NOTE: This is depreciated, I no longer use Prometheus and have migrated to Telegraf
- name: populate service facts
service_facts:
- name: stop node_exporter
become: true
ansible.builtin.systemd:
name: node_exporter
state: stopped
when: ansible_facts.services["node_exporter.service"] is defined
- name: Ensure group "prometheus" exists
become: true
ansible.builtin.group:
name: prometheus
state: present
- name: Add the user 'prometheus' with group 'prometheus'
become: true
ansible.builtin.user:
name: prometheus
group: prometheus
groups: docker
append: true
# --------------- Install or Update Prometheus
- name: "set fact: need to install Prometheus?"
set_fact:
need_prometheus_install: false
- name: Check if node_exporter is installed
ansible.builtin.stat:
path: /usr/local/bin/node_exporter
register: prometheus_binary_file_location
- name: "set fact: need to install Prometheus?"
set_fact:
need_prometheus_install: true
when:
- not prometheus_binary_file_location.stat.exists
- name: Check current version of Prometheus
ansible.builtin.shell: /usr/local/bin/node_exporter --version 3>&1 1>&2 2>&3 | head -n1 | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
ignore_errors: true
register: current_prometheus_version
check_mode: false
when:
- need_prometheus_install is false
- name: "set fact: need to install Prometheus?"
set_fact:
need_prometheus_install: true
when:
- need_prometheus_install is false
- current_prometheus_version.stdout != prometheus_verssion
- name: install node_exporter
become: true
ansible.builtin.unarchive:
src: "https://github.com/prometheus/node_exporter/releases/download/v{{ prometheus_verssion }}/node_exporter-{{ prometheus_verssion }}.linux-armv7.tar.gz"
dest: /usr/local/bin
group: prometheus
owner: prometheus
# reference for extra_opts: https://github.com/ansible/ansible/issues/27081
extra_opts:
- --strip=1
- --no-anchored
- "node_exporter"
remote_src: true
when:
- need_prometheus_install is true
- name: create node_exporter service
become: true
ansible.builtin.template:
src: node_exporter.service.j2
dest: /etc/systemd/system/node_exporter.service
mode: 0644
- name: start node_exporter
become: true
ansible.builtin.systemd:
name: node_exporter
daemon_reload: true
enabled: true
state: started
when:
- "'nostart' not in ansible_run_tags"

189
tasks/tdarr.yml Normal file
View File

@@ -0,0 +1,189 @@
---
# TASK DESCRIPTION:
# Downloads, installs, and configures Tdarr/
- name: "Set variables"
block:
- name: "Set tdarr local filesystem location (pis)"
ansible.builtin.set_fact:
interpolated_tdarr_dir: "{{ rpi1_tdarr_file_location }}"
changed_when: false
when:
- "'pis' in group_names"
- name: "Set tdarr local filesystem location (macs)"
ansible.builtin.set_fact:
interpolated_tdarr_dir: "{{ mac_tdarr_file_location }}"
changed_when: false
when:
- "'macs' in group_names"
- name: "set variable: Set tdarr download Binary (armv7l)"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/linux_arm/Tdarr_Updater.zip"
when:
- ansible_os_family == 'Debian'
- ansible_architecture == 'armv7l'
- name: "set variable: Set tdarr download Binary (MacOSX) - Intel"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_x64/Tdarr_Updater.zip"
when:
- mac_intel
- name: "set variable: Set tdarr download Binary (MacOSX) - ARM"
ansible.builtin.set_fact:
tdarr_download_uri: "https://f000.backblazeb2.com/file/tdarrs/versions/{{ tdarr_installer_version }}/darwin_arm64/Tdarr_Updater.zip"
when:
- mac_arm
- name: "set fact: do we need a tdarr install?"
set_fact:
need_tdarr_install: false
- name: Assert that we can install Tdarr
ansible.builtin.assert:
that:
- tdarr_download_uri is defined
- interpolated_tdarr_dir is defined
fail_msg: "Unable to install Tdarr on this host"
- name: "Install ffmpeg and HandbrakeCLI"
block:
- name: "ensure ffmpeg and handbrake are installed (Debian)"
become: true
ansible.builtin.apt:
pkg: "{{ item }}"
state: present
loop:
- ffmpeg
- handbrake
when: "'pis' in group_names"
- name: "ensure ffmpeg and handbrake are installed (MacOS)"
community.general.homebrew:
name: "{{ item }}"
state: present
update_homebrew: false
upgrade_all: false
loop:
- ffmpeg
- handbrake
when: "'macs' in group_names"
- name: "ensure tdarr directory exists"
become: true
ansible.builtin.file:
path: "{{ interpolated_tdarr_dir }}"
mode: 0755
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
state: directory
- name: "Install tdarr"
block:
- name: "set_fact: need Tdarr install?"
ansible.builtin.stat:
path: "{{ interpolated_tdarr_dir }}/configs"
register: tdarr_exists
changed_when: false
failed_when: false
- name: "set fact: do we need a tdarr install?"
set_fact:
need_tdarr_install: true
when: not tdarr_exists.stat.exists
- name: Download tdarr
ansible.builtin.unarchive:
src: "{{ tdarr_download_uri }}"
dest: "{{ interpolated_tdarr_dir }}"
remote_src: true
when: need_tdarr_install
- name: Did tdarr download?
ansible.builtin.stat:
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
register: tdarr_installer_exists
failed_when: not tdarr_installer_exists.stat.exists
when: need_tdarr_install
- name: Ensure correct permissions on Tdarr_Updater
ansible.builtin.file:
path: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
mode: 0755
when: need_tdarr_install
- name: Install tdarr
ansible.builtin.command:
cmd: "{{ interpolated_tdarr_dir }}/Tdarr_Updater"
register: tdarr_install
failed_when: tdarr_install.rc > 0
when: need_tdarr_install
- name: Ensure correct permissions on server/node executables
ansible.builtin.file:
path: "{{ interpolated_tdarr_dir }}/{{ item }}"
mode: 0755
loop:
- Tdarr_Server/Tdarr_Server
- Tdarr_Node/Tdarr_Node
when: need_tdarr_install
- name: "configure tdarr"
block:
- name: update server configuration file
ansible.builtin.template:
src: Tdarr_Server_Config.json.j2
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Server_Config.json"
mode: 0644
when: is_tdarr_server
- name: update node configuration file
ansible.builtin.template:
src: Tdarr_Node_Config.json.j2
dest: "{{ interpolated_tdarr_dir }}/configs/Tdarr_Node_Config.json"
mode: 0644
when: is_tdarr_node
- name: check if consul is installed?
ansible.builtin.stat:
path: "{{ interpolated_consul_configuration_dir }}"
register: consul_installed
changed_when: false
failed_when: false
when:
- is_tdarr_server
- name: move consul service config into place
become: true
ansible.builtin.template:
src: consul_services/tdarr_service.json.j2
dest: "{{ interpolated_consul_configuration_dir }}/tdarr_service.json"
mode: 0644
when:
- is_tdarr_server
- consul_installed.stat.exists
- name: Reload consul agent
ansible.builtin.uri:
url: "http://{{ ansible_host }}:8500/v1/agent/reload"
method: PUT
status_code: 200
ignore_errors: true
register: consul_agent_reload_http_response
failed_when: consul_agent_reload_http_response.status != 200
when:
- is_tdarr_server
- consul_installed.stat.exists
- name: debug when consul agent reload fails
ansible.builtin.debug:
var: consul_agent_reload_http_response.msg
when:
- is_tdarr_server
- consul_installed.stat.exists
- consul_agent_reload_http_response.status != 200
- name: mount shared storage
import_tasks: cluster_storage.yml

192
tasks/telegraf.yml Normal file
View File

@@ -0,0 +1,192 @@
---
# TASK DESCRIPTION:
# Downloads, installs, and configures Telegraf
# --------------------------------- Set variables depending on system type
- name: "Configure variables"
block:
- name: "set variable: telegraph_binary_location (Debian)"
ansible.builtin.set_fact:
telegraph_binary_location: "/usr/bin/telegraf"
when:
- ansible_os_family == 'Debian'
- name: "set variable: telegraph_binary_location (MacOS)"
ansible.builtin.set_fact:
telegraph_binary_location: "/usr/local/bin/telegraf"
when:
- ansible_os_family == 'Darwin'
- name: "set fact: telegraph_config_location (Debian)"
ansible.builtin.set_fact:
telegraph_config_location: "/etc/telegraf"
when:
- ansible_os_family == 'Debian'
- name: "set fact: telegraph_config_location (macOS)"
ansible.builtin.set_fact:
telegraph_config_location: "/usr/local/etc"
when:
- ansible_os_family == 'Darwin'
- name: "set fact: telegraph_config_location (macOS)"
ansible.builtin.set_fact:
telegraph_config_location: "/volume1/docker/telegraf/config"
when:
- inventory_hostname == 'synology'
- name: "Fail if arm Mac (need to update task) or variables not defined"
ansible.builtin.assert:
that:
- telegraph_binary_location is defined
- telegraph_config_location is defined
- not mac_arm
fail_msg: "Unable to install Telegraf on this host"
- name: "Install/upgrade Telegraf"
block:
- name: "set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: false
when: telegraph_binary_location is defined
- name: Check if telegraf is installed
ansible.builtin.stat:
path: "{{ telegraph_binary_location }}"
check_mode: false
register: telegraf_binary_exists
when: telegraph_binary_location is defined
- name: "set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: true
check_mode: false
when:
- telegraph_binary_location is defined
- not telegraf_binary_exists.stat.exists
- name: Check current version of telegraf
ansible.builtin.shell: "{{ telegraph_binary_location }} --version | grep -oE '[0-9]+\\.[0-9]+\\.[0-9]+'"
ignore_errors: true
register: current_telegraf_version
check_mode: false
changed_when: current_telegraf_version.stdout != telegraf_version
when:
- telegraph_binary_location is defined
- not need_telegraf_install
- name: "set fact: Need telegraf install?"
ansible.builtin.set_fact:
need_telegraf_install: true
when:
- telegraph_binary_location is defined
- not need_telegraf_install
- current_telegraf_version.stdout != telegraf_version
- name: install telegraf (MacOS)
community.general.homebrew:
name: telegraf
state: present
notify: restart_telegraf
when:
- ansible_os_family == 'Darwin'
- need_telegraf_install
- name: install base apt-transport (Debian)
become: true
ansible.builtin.apt:
pkg: apt-transport-https
state: present
update_cache: true
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: Download telegraf GPG key (Debian)
become: true
ansible.builtin.apt_key:
state: present
url: "https://repos.influxdata.com/influxdb.key"
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: Add telegraf repository to apt (Debian)
become: true
ansible.builtin.apt_repository:
repo: deb https://repos.influxdata.com/debian buster stable
state: present
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
- name: install telegraf (Debian)
become: true
ansible.builtin.apt:
pkg: telegraf
update_cache: true
notify: restart_telegraf
when:
- ansible_os_family == 'Debian'
- need_telegraf_install
# - name: give telegraf access to docker
# become: true
# ansible.builtin.user:
# name: telegraf
# groups: docker
# append: true
# state: present
# create_home: false
# when:
# - ansible_os_family == 'Debian'
# - need_telegraf_install
# -------------------------------------------------- Add Telegraf Configs
- name: "Configure Telegraf"
block:
- name: "Ensure {{ telegraph_config_location }} exists"
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: 0755
loop:
- "{{ telegraph_config_location }}"
- "{{ telegraph_config_location }}/telegraf.d"
- name: template config files to server
become: true
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "644"
loop:
- { src: "telegraf/base_config.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.conf" }
- { src: "telegraf/temperature.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/temperature.conf" }
- { src: "telegraf/nomad.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/nomad.conf" }
- { src: "telegraf/docker.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/docker.conf" }
notify: restart_telegraf
- name: template leader configs (ie, configs that should be placed on a single server)
become: true
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "644"
loop:
- { src: "telegraf/leader.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/leader.conf" }
- { src: "telegraf/pingHosts.conf.j2", dest: "{{ telegraph_config_location }}/telegraf.d/pingHosts.conf" }
when:
- is_cluster_leader
notify: restart_telegraf
- name: Reset file ownership (macOS)
become: true
ansible.builtin.file:
path: "{{ telegraph_config_location }}"
owner: "{{ ansible_user_uid }}"
group: "{{ ansible_user_gid }}"
recurse: true
when:
- (ansible_os_family == 'Darwin') or (inventory_hostname == 'synology')

View File

@@ -0,0 +1,22 @@
{
"nodeID": "{{ inventory_hostname }}",
"nodeIP": "{{ ansible_host }}",
"nodePort": "{{ tdarr_node_port }}",
"serverIP": "{% for h in groups['lan'] if hostvars[h].is_tdarr_server == true %}{{ hostvars[h].ansible_host }}{% endfor %}",
"serverPort": "{{ tdarr_server_port }}",
{% if ansible_os_family == 'Darwin' and ansible_architecture == 'arm64' -%}
"handbrakePath": "/opt/homebrew/bin/HandBrakeCLI",
"ffmpegPath": "/opt/homebrew/bin/ffmpeg",
{% else %}
"handbrakePath": "/usr/local/bin/HandBrakeCLI",
"ffmpegPath": "/usr/local/bin/ffmpeg",
{%- endif %}
"mkvpropeditPath": "",
"pathTranslators": [
{
"server": "",
"node": ""
}
]
}

View File

@@ -0,0 +1,13 @@
{
"serverPort": "{{ tdarr_server_port }}",
"webUIPort": "{{ tdarr_webui_port }}",
"serverIP": "{% for h in groups['lan'] if hostvars[h].is_tdarr_server == true %}{{ hostvars[h].ansible_host }}{% endfor %}",
{% if ansible_os_family == 'Darwin' and ansible_architecture == 'arm64' -%}
"handbrakePath": "/opt/homebrew/bin/HandBrakeCLI",
"ffmpegPath": "/opt/homebrew/bin/ffmpeg",
{% else %}
"handbrakePath": "/usr/local/bin/HandBrakeCLI",
"ffmpegPath": "/usr/local/bin/ffmpeg"
{%- endif %}
}

128
templates/consul.hcl.j2 Normal file
View File

@@ -0,0 +1,128 @@
# ----------------------------------------- General Info
"datacenter" = "{{ datacenter_name }}" # NOTE: changing the datacenter requires generating new certificates
"node_name" = "{{ inventory_hostname }}"
"domain" = "consul"
{% if is_consul_server %}
"server" = true
"ui_config" = {
"enabled" = true
}
{% else %}
"ui_config" = {
"enabled" = false
}
{% endif %}
# ----------------------------------------- Files and Logs
{% if 'synology' in inventory_hostname %}
"data_dir" = "/consul/data"
"log_file" = "/consul/data/logs/consul.log"
{% else %}
"data_dir" = "{{ consul_opt_dir }}"
"log_file" = "{{ consul_opt_dir }}/logs/consul.log"
{% endif %}
"log_level" = "warn"
"log_rotate_max_files" = 5
"enable_syslog" = false
# ----------------------------------------- Networking
"addresses" = {
"dns" = "0.0.0.0"
"grpc" = "0.0.0.0"
"http" = "0.0.0.0"
"https" = "0.0.0.0"
}
"ports" = {
"dns" = 8600
"http" = 8500
"server" = 8300
}
{% if 'linode' in group_names %}
"advertise_addr" = "{{ linode_private_ip }}"
"bind_addr" = "{{ linode_private_ip }}"
"client_addr" = "{{ linode_private_ip }} {{ '{{' }} GetInterfaceIP \"docker0\" {{ '}}' }}"
{% elif 'synology' in inventory_hostname %}
"advertise_addr" = "{{ synology_second_ip }}"
"bind_addr" = "{{ synology_second_ip }}"
"client_addr" = "{{ synology_second_ip }} {{ '{{' }} GetInterfaceIP \"docker0\" {{ '}}' }}"
{% else %}
"advertise_addr" = "{{ ansible_default_ipv4.address }}"
"bind_addr" = "{{ ansible_default_ipv4.address }}"
"client_addr" = "{{ ansible_default_ipv4.address }} {{ '{{' }} GetInterfaceIP \"docker0\" {{ '}}' }}"
{% endif %}
"retry_interval" = "30s"
"retry_interval_wan" = "30s"
{% if 'linode' in group_names %}
"retry_join" = [{% for h in groups['linode-cluster'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].linode_private_ip }}"{% if not loop.last %}, {% endif %}{% endfor %}]
{% else %}
"retry_join" = [{% for h in groups['lan'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
{% if is_consul_server %}
{% if 'linode' in group_names %}
"join_wan" = [{% for h in groups['linode-cluster'] if hostvars[h].is_consul_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
{% endif %}
{% endif %}
{% endif %}
# ----------------------------------------- Security
"encrypt" = "{{ consul_encryprion_key }}"
{% if is_consul_server %} {# Consul Servers #}
"verify_incoming" = true
"verify_outgoing" = true
"verify_server_hostname" = true
{% if 'synology' in inventory_hostname %} {# necessary, since running in docker container #}
"ca_file" = "/consul/data/certs/consul-agent-ca.pem"
"cert_file" = "/consul/data/certs/{{ datacenter_name }}-server-consul-0.pem"
"key_file" = "/consul/data/certs/{{ datacenter_name }}-server-consul-0-key.pem"
{% else %}
"ca_file" = "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
"cert_file" = "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0.pem"
"key_file" = "{{ consul_opt_dir }}/certs/{{ datacenter_name }}-server-consul-0-key.pem"
{% endif %}
"auto_encrypt" = {
"allow_tls" = true
}
{% else %} {# Consul Clients #}
"verify_incoming" = false
"verify_outgoing" = true
"verify_server_hostname" = true
{% if 'synology' in inventory_hostname %} {# necessary, since running in docker container #}
"ca_file" = "/consul/data/certs/consul-agent-ca.pem"
{% else %}
"ca_file" = "{{ consul_opt_dir }}/certs/consul-agent-ca.pem"
{% endif %}
"auto_encrypt" = {
"tls" = true
}
{% endif %}
"acl" = {
enabled = false
default_policy = "allow"
enable_token_persistence = true
}
# ----------------------------------------- Cluster Operations
{% if is_cluster_leader is defined %}
{% if is_cluster_leader %}
"bootstrap" = true
{% endif %}
{% endif %}
"disable_update_check" = false
"enable_local_script_checks" = false
"enable_script_checks" = false
"skip_leave_on_interrupt" = true
"leave_on_terminate" = false
"primary_datacenter" = "{{ datacenter_name }}"
"performance" = {
"leave_drain_time" = "5s"
"raft_multiplier" = 1
"rpc_hold_timeout" = "7s"
}
{# telemetry = {
"dogstatsd_addr" = "localhost:8125"
"disable_hostname" = true
"disable_compat_1.9" = true
} #}

View File

@@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>EnvironmentVariables</key>
<dict>
<key>PATH</key>
<string>/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/usr/local/sbin</string>
</dict>
<key>KeepAlive</key>
<dict>
<key>PathState</key>
<dict>
<key>{{ mac_keep_alive_file }}</key>
<true/>
</dict>
<key>SuccessfulExit</key>
<true/>
</dict>
<key>Label</key>
<string>com.{{ my_username }}.consul</string>
<key>ProgramArguments</key>
<array>
<string>/usr/local/bin/consul</string>
<string>agent</string>
<string>-config-dir</string>
<string>{{ interpolated_consul_configuration_dir }}</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>

View File

@@ -0,0 +1,21 @@
[Unit]
Description="HashiCorp Consul - A service mesh solution"
Documentation=https://www.consul.io/
Requires=network-online.target
After=network-online.target
After=docker.service
Requires=docker.service
ConditionFileNotEmpty={{ interpolated_consul_configuration_dir }}/consul.hcl
[Service]
Type=notify
User=consul
Group=consul
ExecStart=/usr/local/bin/consul agent -config-dir={{ interpolated_consul_configuration_dir }}
ExecReload=/usr/local/bin/consul reload
KillMode=process
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,67 @@
{
"services": [{
"name": "sabnzbd",
"id": "sabnzbd",
"tags": [
"traefik.enable=true",
"traefik.http.services.sabnzbd.loadbalancer.server.port=8080",
"traefik.http.routers.sabnzbd.rule=Host(`sab.{{ homelab_domain_name }}`)",
"traefik.http.routers.sabnzbd.entryPoints=web,websecure",
"traefik.http.routers.sabnzbd.service=sabnzbd",
"traefik.http.routers.sabnzbd.tls=true",
"traefik.http.routers.sabnzbd.tls.certresolver=cloudflare",
"traefik.http.routers.sabnzbd.middlewares=authelia@file"
],
"checks": [{
"id": "sabnzbd-http-check",
"http": "http://{{ synology_second_ip }}:8080",
"interval": "30s",
"timeout": "5s",
"success_before_passing": 3,
"failures_before_critical": 3
}]
},
{
"name": "synology",
"id": "synology",
"tags": [
"traefik.enable=true",
"traefik.http.services.synology.loadbalancer.server.port=5000",
"traefik.http.routers.synology.rule=Host(`nas.{{ homelab_domain_name }}`)",
"traefik.http.routers.synology.entryPoints=web,websecure",
"traefik.http.routers.synology.service=synology",
"traefik.http.routers.synology.tls=true",
"traefik.http.routers.synology.tls.certresolver=cloudflare"
],
"checks": [{
"id": "synology-http-check",
"http": "http://{{ synology_second_ip }}:5000",
"interval": "30s",
"timeout": "5s",
"success_before_passing": 3,
"failures_before_critical": 3
}]
},
{
"name": "asntoip",
"id": "asntoip",
"tags": [
"traefik.enable=true",
"traefik.http.services.asntoip.loadbalancer.server.port=5151",
"traefik.http.routers.asntoip.rule=Host(`asntoip.{{ homelab_domain_name }}`)",
"traefik.http.routers.asntoip.entryPoints=web,websecure",
"traefik.http.routers.asntoip.service=asntoip",
"traefik.http.routers.asntoip.tls=true",
"traefik.http.routers.asntoip.tls.certresolver=cloudflare"
],
"checks": [{
"id": "asntoip-http-check",
"http": "http://{{ synology_second_ip }}:5151",
"interval": "30s",
"timeout": "5s",
"success_before_passing": 3,
"failures_before_critical": 3
}]
}
]
}

View File

@@ -0,0 +1,25 @@
{
"services": [{
"name": "tdarr",
"id": "tdarr",
"tags": [
"traefik.enable=true",
"traefik.http.services.tdarr.loadbalancer.server.port={{ tdarr_webui_port }}",
"traefik.http.routers.tdarr.rule=Host(`tdarr.{{ homelab_domain_name }}`)",
"traefik.http.routers.tdarr.entryPoints=web,websecure",
"traefik.http.routers.tdarr.service=tdarr",
"traefik.http.routers.tdarr.tls=true",
"traefik.http.routers.tdarr.tls.certresolver=cloudflare",
"traefik.http.routers.tdarr.middlewares=authelia@file"
],
"checks": [{
"id": "tdarr-http-check",
"http": "http://{{ ansible_host }}:{{ tdarr_webui_port }}",
"interval": "30s",
"timeout": "30s",
"success_before_passing": 3,
"failures_before_critical": 3
}]
}
]
}

View File

@@ -0,0 +1,11 @@
version: '3.9'
services:
asn-to-ip:
image: ddimick/asn-to-ip:latest
hostname: asn-to-ip
container_name: asn-to-ip
network_mode: "bridge"
ports:
- 5151:5000
restart: unless-stopped

View File

@@ -0,0 +1,13 @@
version: '3.9'
services:
consul:
image: consul:latest
hostname: consul
container_name: consul
network_mode: "host"
volumes:
- /volume1/docker/consul/data:/consul/data
- /volume1/docker/consul/config:/consul/config
command: consul agent -config-dir=/consul/config
restart: unless-stopped

View File

@@ -0,0 +1,18 @@
version: "3.9"
services:
diun:
image: ghcr.io/crazy-max/diun
hostname: diun
container_name: diun
network_mode: "bridge"
environment:
- "TZ=America/New_York"
- "DIUN_WATCH_SCHEDULE=26 */48 * * *"
- "DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT=true"
- "DIUN_NOTIF_PUSHOVER_TOKEN={{ pushover_token }}"
- "DIUN_NOTIF_PUSHOVER_RECIPIENT={{ pushover_recipient }}"
- "DIUN_WATCH_FIRSTCHECKNOTIF=false"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
restart: unless-stopped

View File

@@ -0,0 +1,17 @@
version: '3.9'
services:
plex:
image: ghcr.io/linuxserver/plex:latest
hostname: plex
container_name: plex
network_mode: "host"
environment:
- "TZ=America/New_York"
- "PGID=101"
- "PUID={{ ansible_user_uid }}"
- "VERSION=docker"
volumes:
- /volume1/media/media:/data/media
- /volume1/docker/plex:/config
restart: unless-stopped

View File

@@ -0,0 +1,14 @@
version: '3.9'
services:
promtail:
image: grafana/promtail
hostname: promtail
container_name: promtail
ports:
- 9080:9080
network_mode: "bridge"
volumes:
- /volume1/docker/promtail/config.yml:/etc/promtail/config.yml
- /var/log:/var/log:ro
restart: unless-stopped

View File

@@ -0,0 +1,23 @@
version: '3.9'
services:
sabnzbd:
image: ghcr.io/linuxserver/sabnzbd
hostname: sabnzbd
container_name: sabnzbd
network_mode: "bridge"
environment:
- "TZ=America/New_York"
- "PGID=101"
- "PUID={{ ansible_user_uid }}"
volumes:
- /var/services/homes/{{ my_username }}:/{{ my_username }}
- /volume1/nate:/nate
- /volume1/media/downloads/nzb:/nzbd
- /volume1/media/downloads/temp:/incomplete-downloads
- /volume1/media/downloads/complete:/downloads
- /volume1/docker/sabnzbd:/config
ports:
- 8080:8080
- 9090:9090
restart: unless-stopped

View File

@@ -0,0 +1,29 @@
---
version: '3.9'
services:
tdarr_node:
image: haveagitgat/tdarr_node:latest
hostname: tdarr_node
container_name: tdarr_node
network_mode: "bridge"
environment:
- "nodeID={{ inventory_hostname }}"
- "nodeIP={{ ansible_host }}"
- "nodePort={{ tdarr_node_port }}"
- "serverIP={% for h in groups['lan'] if hostvars[h].is_tdarr_server == true %}{{ hostvars[h].ansible_host }}{% endfor %}"
- "serverPort={{ tdarr_server_port }}"
- "TZ=America/New_York"
- "PGID=101"
- "PUID={{ ansible_user_uid }}"
volumes:
- /volume1/docker/tdarr_node:/app/configs
- /volume1/media/media/movies:/movies
- /volume1/media/tdarr_tmp:/tdarr_tmp
- /volume1/media/tdarr_complete:/tdarr_complete
ports:
- {{ tdarr_node_port }}:{{ tdarr_node_port }}
devices:
- /dev/dri:/dev/dri
privileged: true
restart: unless-stopped

View File

@@ -0,0 +1,14 @@
version: '3.9'
services:
telegraf:
image: nuntz/telegraf-snmp:latest
hostname: telegraf
container_name: nuntz-telegraf-snmp
network_mode: "host"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /volume1/docker/telegraf/mibs:/usr/share/snmp/mibs
- /volume1/docker/telegraf/logs:/var/logs/telegraf
- /volume1/docker/telegraf/config:/etc/telegraf
restart: unless-stopped

217
templates/nomad.hcl.j2 Normal file
View File

@@ -0,0 +1,217 @@
# ----------------------------------------- General Info
name = "{{ inventory_hostname }}"
region = "global"
datacenter = "{{ datacenter_name }}"
# ----------------------------------------- Files and Logs
data_dir = "{{ nomad_opt_dir_location }}"
plugin_dir = "{{ nomad_opt_dir_location }}/plugins"
log_level = "warn"
log_file = "{{ nomad_opt_dir_location }}/logs/nomad.log"
log_rotate_max_files = 5
enable_syslog = false
# ----------------------------------------- Networking
bind_addr = "0.0.0.0" # the default
advertise {
{% if 'linode' in group_names %}
http = "{{ linode_private_ip }}:4646"
rpc = "{{ linode_private_ip }}:4647"
serf = "{{ linode_private_ip }}:4648" # non-default ports may be specified
{% elif 'synology' in group_names %}
http = "{{ synology_second_ip }}:4646"
rpc = "{{ synology_second_ip }}:4647"
serf = "{{ synology_second_ip }}:4648" # non-default ports may be specified
{% else %}
http = "{{ ansible_host }}:4646"
rpc = "{{ ansible_host }}:4647"
serf = "{{ ansible_host }}:4648" # non-default ports may be specified
{% endif %}
}
# ----------------------------------------- Consul Integration
consul {
{% if 'linode' in group_names %}
address = "{{ linode_private_ip }}:8500"
{% elif 'synology' in group_names %}
address = "{{ synology_second_ip }}:8500"
{% else %}
address = "{{ ansible_host }}:8500"
{% endif %}
server_service_name = "nomad-servers"
client_service_name = "nomad-clients"
auto_advertise = true
server_auto_join = true
client_auto_join = true
{% if is_nomad_server %}
tags = [
"traefik.enable=true",
"traefik.http.routers.nomad-server.entryPoints=web,websecure",
"traefik.http.routers.nomad-server.service=nomad-server",
"traefik.http.routers.nomad-server.rule=Host(`nomad.{{ homelab_domain_name }}`)",
"traefik.http.routers.nomad-server.tls=true",
"traefik.http.routers.nomad-server.middlewares=authelia@file,redirectScheme@file",
"traefik.http.services.nomad-server.loadbalancer.server.port=4646"
]
{% endif %}
}
# ----------------------------------------- CLient Config
client {
enabled = true
{% if 'pis' in group_names %}
node_class = "rpi"
{% elif 'macs' in group_names %}
node_class = "mac"
{% elif 'synology' in group_names %}
node_class = "synology"
{% endif %}
reserved {
cpu = 250
memory = 100
reserved_ports = "22"
}
{% if not is_nomad_server %}
{% if 'linode' in group_names %}
server_join {
retry_join = [{% for h in groups['linode'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
retry_max = 3
retry_interval = "15s"
}
{% else %}
server_join {
retry_join = [{% for h in groups['lan'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
retry_max = 3
retry_interval = "15s"
}
{% endif %}
{% endif %}
meta {
# These are variables that can be used in Nomad job files
PUID = "{{ ansible_user_uid }}"
PGID = "{{ ansible_user_gid }}"
nfsStorageRoot = "{{ interpolated_nfs_service_storage }}"
localStorageRoot = "{{ interpolated_localfs_service_storage }}"
{% if 'macs' in group_names %}
restoreCommand = "/usr/local/bin/service_restore"
restoreCommand1 = "--verbose"
restoreCommand2 = "--job"
restoreCommand3 = ""
backupCommand = "/usr/local/bin/service_backups"
backupCommandArg1 = "--verbose"
backupCommandArg2 = "--loglevel=INFO"
backupCommandArg3 = ""
backupAllocArg1 = "--verbose"
backupAllocArg2 = "--loglevel=INFO"
backupAllocArg3 = "--allocation"
backupAllocArg4 = "--delete"
backupAllocArg5 = "--job"
backupAllocArg6 = ""
{% else %}
restoreCommand = "sudo"
restoreCommand1 = "/usr/local/bin/service_restore"
restoreCommand2 = "--job"
restoreCommand3 = "--verbose"
backupCommand = "sudo"
backupCommandArg1 = "/usr/local/bin/service_backups"
backupCommandArg2 = "--verbose"
backupCommandArg3 = "--loglevel=INFO"
backupAllocArg1 = "/usr/local/bin/service_backups"
backupAllocArg2 = "--verbose"
backupAllocArg3 = "--loglevel=INFO"
backupAllocArg4 = "--allocation"
backupAllocArg5 = "--job"
backupAllocArg6 = "--delete"
{% endif %}
}
} # /client
{% if is_nomad_server %}
# ----------------------------------------- Server Config
server {
enabled = true
encrypt = "{{ nomad_encryption_key }}"
{% if 'linode' in group_names %}
bootstrap_expect = 1
{% else %}
bootstrap_expect = 3
{% endif %}
node_gc_threshold = "15m"
job_gc_interval = "15m"
job_gc_threshold = "6h"
heartbeat_grace = "60s"
min_heartbeat_ttl = "20s"
raft_protocol = "3"
server_join {
retry_join = [{% for h in groups['lan'] if hostvars[h].is_nomad_server == true %}"{{ hostvars[h].ansible_host }}"{% if not loop.last %}, {% endif %}{% endfor %}]
retry_max = 3
retry_interval = "15s"
}
}
autopilot {
cleanup_dead_servers = true
last_contact_threshold = "200ms"
max_trailing_logs = 250
server_stabilization_time = "10s"
enable_redundancy_zones = false
disable_upgrade_migration = false
enable_custom_upgrades = false
}
{% endif %}
{% if is_nomad_server and is_nomad_client %}
client {
enabled = true
}
{% endif %}
# ----------------------------------------- Telemety
telemetry = {
publish_allocation_metrics = true
publish_node_metrics = true
collection_interval = "10s"
filter_default = false
datadog_address = "localhost:8125"
prefix_filter = [
"+nomad.client.allocations.running",
"+nomad.client.allocations.terminal",
"+nomad.client.allocs.cpu.allocated",
"+nomad.client.allocs.cpu.total_percent",
"+nomad.client.allocs.memory.allocated",
"+nomad.client.allocs.memory.swap",
"+nomad.client.allocs.memory.usage",
"+nomad.nomad.job_status.dead",
"+nomad.nomad.job_status.running",
"+nomad.nomad.job_status.pending",
"+nomad.nomad.job_summary.running",
"+nomad.nomad.job_summary.complete",
"+nomad.nomad.job_summary.lost",
"+nomad.nomad.job_summary.failed"]
}
# ----------------------------------------- Plugins
plugin "raw_exec" {
config {
enabled = true
}
}
plugin "docker" {
config {
allow_caps = [ "ALL" ]
allow_privileged = true
volumes {
enabled = true
}
}
}

View File

@@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>EnvironmentVariables</key>
<dict>
<key>PATH</key>
<string>/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/usr/local/sbin</string>
</dict>
<key>KeepAlive</key>
<dict>
<key>PathState</key>
<dict>
<key>{{ mac_keep_alive_file }}</key>
<true/>
</dict>
<key>SuccessfulExit</key>
<true/>
</dict>
<key>Label</key>
<string>com.{{ my_username }}.nomad</string>
<key>ProgramArguments</key>
<array>
<string>/usr/local/bin/nomad</string>
<string>agent</string>
<string>-config</string>
<string>{{ nomad_configuration_dir }}</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>StandardErrorPath</key>
<string>/usr/local/var/log/nomad.log</string>
<key>StandardOutPath</key>
<string>/usr/local/var/log/nomad.log</string>
</dict>
</plist>

View File

@@ -0,0 +1,25 @@
[Unit]
Description=Nomad
Documentation=https://nomadproject.io/docs/
Wants=network-online.target
After=network-online.target
ConditionFileNotEmpty={{ nomad_configuration_dir }}/nomad.hcl
[Service]
{# {% if 'linode' in group_names %} #}
User=nomad
Group=nomad
{# {% endif %} #}
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nomad agent -config {{ nomad_configuration_dir }}
KillMode=process
KillSignal=SIGINT
LimitNOFILE=infinity
LimitNPROC=infinity
Restart=on-failure
RestartSec=2
StartLimitBurst=3
TasksMax=infinity
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,21 @@
job "backup_local_filesystems" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "sysbatch"
periodic {
cron = "0 */8 * * * *"
prohibit_overlap = true
time_zone = "America/New_York"
}
task "do_backups" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
}
} // /task do_backups
} //job

View File

@@ -0,0 +1,88 @@
job "changedetection" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "changedetection" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "webUI" {
to = "5000"
}
}
task "changedetection" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
BASE_URL = "https://changes.{{ homelab_domain_name }}"
}
driver = "docker"
config {
image = "dgtlmoon/changedetection.io:latest"
hostname = "${NOMAD_JOB_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/changedetection:/datastore"
]
ports = ["webUI"]
} // docker config
service {
port = "webUI"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`changes.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "http"
path = "/"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 100 # MHz
memory = 150 # MB
} // resources
} // task changedetection
} // group
} // job

View File

@@ -0,0 +1,109 @@
job "chronograf" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "chronograf" {
restart {
attempts = 0
delay = "30s"
}
network {
port "chronografPort" {
to = "8888"
}
}
task "await-influxdb" {
driver = "docker"
config {
image = "busybox:latest"
command = "sh"
args = [
"-c",
"echo -n 'Waiting for influxdb.service.consul to come alive'; until nslookup influxdb.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"
]
network_mode = "host"
}
resources {
cpu = 200
memory = 128
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task
task "chronograf" {
// env {
// KEY = "VALUE"
// }
driver = "docker"
config {
image = "chronograf:latest"
hostname = "${NOMAD_JOB_NAME}"
ports = ["chronografPort"]
} // docker config
service {
port = "chronografPort"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "chronografPort"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
// resources {
// cpu = 40 # MHz
// memory = 10 # MB
// } // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,100 @@
job "code" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "code" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
// static = "80"
to = "3000"
}
}
task "code" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
SUDO_PASSWORD = "{{ simple_web_password }}"
PROXY_DOMAIN = "code.{{ homelab_domain_name }}"
CONNECTION_TOKEN = "1234"
DOCKER_MODS = "linuxserver/mods:code-server-python3|linuxserver/mods:code-server-shellcheck|linuxserver/mods:universal-git|linuxserver/mods:code-server-zsh"
// CONNECTION_TOKEN = supersecrettoken
// CONNECTION_SECRET = supersecrettoken
}
driver = "docker"
config {
image = "lscr.io/linuxserver/openvscode-server"
hostname = "${NOMAD_JOB_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_JOB_NAME}:/config"
]
ports = ["port1"]
} // docker config
service {
port = "port1"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,redirectScheme@file"
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 1500 # MHz
memory = 300 # MB
} // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,64 @@
job "diagnostics" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
group "diagnostics" {
count = 1
restart {
attempts = 0
delay = "30s"
}
task "diagnostics" {
// env {
// KEY = "VALUE"
// }
driver = "docker"
config {
image = "alpine:latest"
hostname = "${NOMAD_JOB_NAME}"
args = [
"/bin/sh",
"-c",
"chmod 755 /local/bootstrap.sh && /local/bootstrap.sh"
]
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/backups/config_backups:/backups",
"${meta.localStorageRoot}:/docker"
]
} // docker config
template {
destination = "local/bootstrap.sh"
data = <<EOH
#!/bin/sh
apk update
apk add --no-cache bash
apk add --no-cache bind-tools
apk add --no-cache curl
apk add --no-cache git
apk add --no-cache jq
apk add --no-cache openssl
apk add --no-cache iperf3
apk add --no-cache nano
apk add --no-cache wget
tail -f /dev/null # Keep container running
EOH
}
} // tasks
} // group
} // job

View File

@@ -0,0 +1,41 @@
job "diun" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "system"
group "diun" {
restart {
attempts = 0
delay = "30s"
}
task "diun" {
env {
// DIUN_PROVIDERS_DOCKER_ENDPOINT = "unix:///var/run/docker.sock"
DIUN_NOTIF_PUSHOVER_RECIPIENT = "{{ pushover_recipient }}"
DIUN_NOTIF_PUSHOVER_TOKEN = "{{ pushover_token }}"
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT = "true"
DIUN_WATCH_FIRSTCHECKNOTIF = "false"
DIUN_WATCH_SCHEDULE = "26 */48 * * *"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "crazymax/diun:latest"
hostname = "${NOMAD_JOB_NAME}"
volumes = [
"/var/run/docker.sock:/var/run/docker.sock"
]
} // docker config
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task diun
} // group
} // job

View File

@@ -0,0 +1,120 @@
job "grafana" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "grafana" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "http" {}
}
task "grafana" {
env {
GF_PATHS_CONFIG = "/local/grafana.ini"
}
driver = "docker"
config {
image = "grafana/grafana:latest"
hostname = "${NOMAD_JOB_NAME}"
ports = ["http"]
volumes = ["${meta.nfsStorageRoot}/pi-cluster/grafana:/var/lib/grafana"]
} // docker config
template {
destination = "local/grafana.ini"
data = <<EOH
[server]
domain = grafana.{{ homelab_domain_name }}
{% raw %}http_port = {{ env "NOMAD_PORT_http" }}{% endraw +%}
[analytics]
reporting_enabled = false
[security]
admin_user = {{ my_username }}
admin_password = {{ grafana_admin_password }}
cookie_secure = true
[users]
allow_sign_up = false
allow_org_create = false
[smtp]
enabled = true
host = {{ email_smtp_host }}:{{ email_smtp_port}}
user = {{ email_smtp_account }}
password = {{ grafana_smtp_password }}
skip_verify = true
from_address = {{ my_email_address }}
from_name = Grafana
[log.file]
level = info
[date_formats]
default_timezone = America/New_York
[auth.proxy]
enabled = true
header_name = Remote-User
header_property = username
auto_sign_up = false
sync_ttl = 60
EOH
}
service {
port = "http"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.middlewares.${NOMAD_JOB_NAME}_logout_redirect.redirectregex.regex=${NOMAD_JOB_NAME}\\.{{ homelab_domain_name }}/logout$",
"traefik.http.middlewares.${NOMAD_JOB_NAME}_logout_redirect.redirectregex.replacement=authelia.{{ homelab_domain_name }}/logout",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,${NOMAD_JOB_NAME}_logout_redirect"
]
check {
type = "http"
port = "http"
path = "/"
interval = "90s"
timeout = "15s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 200 # MHz
memory = 60 # MB
} // resources
} // task grafana
} // group
} // job

View File

@@ -0,0 +1,88 @@
job "headless-chrome" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "headless-chrome" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
static = "9222"
to = "9222"
}
}
task "headless-chrome" {
// env {
// PUID = "${meta.PUID}"
// PGID = "${meta.PGID}"
// }
driver = "docker"
config {
image = "alpeware/chrome-headless-trunk:latest"
hostname = "${NOMAD_JOB_NAME}"
ports = ["port1"]
} // docker config
service {
port = "port1"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`chrome.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,113 @@
job "influxdb" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "influxdbGroup" {
count = 1
network {
port "httpAPI" {
static = "{{ influxdb_port }}"
to = "8086"
}
}
restart {
attempts = 0
delay = "30s"
}
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "influxdb" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "influxdb:{{ influxdb_version }}"
hostname = "${NOMAD_JOB_NAME}"
ports = ["httpAPI"]
volumes = [
"${meta.localStorageRoot}/influxdb:/var/lib/influxdb"
]
} // docker config
service {
port = "httpAPI"
name = "${NOMAD_JOB_NAME}"
check {
type = "tcp"
port = "httpAPI"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 1000 # MHz
memory = 400 # MB
} // resources
} // /task influxdb
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
} // job

View File

@@ -0,0 +1,126 @@
job "lidarr" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "10m"
progress_deadline = "15m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "lidarrGroup" {
count = 1
restart {
attempts = 0
delay = "10m"
}
network {
port "lidarr" {
to = "8686"
}
}
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "lidarr" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
//UMASK_SET = 022 #optional
}
driver = "docker"
config {
image = "linuxserver/lidarr:latest"
hostname = "${NOMAD_JOB_NAME}"
ports = ["lidarr"]
volumes = [
"${meta.localStorageRoot}/lidarr:/config",
"${meta.nfsStorageRoot}/media:/media"
]
} // docker config
service {
port = "lidarr"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "lidarr"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "10m"
ignore_warnings = true
}
} // service
resources {
cpu = 2000 # MHz
memory = 400 # MB
} // resources
} // /task lidarr main task
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
} // job

View File

@@ -0,0 +1,157 @@
job "loki" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "loki" {
count = 1
restart {
attempts = 0
delay = "1m"
}
network {
port "loki_port" {
static = "3100"
to = "3100"
}
}
task "loki" {
driver = "docker"
config {
image = "grafana/loki:latest"
hostname = "${NOMAD_JOB_NAME}"
volumes = [
"local/loki/local-config.yaml:/etc/loki/local-config.yaml",
"${meta.nfsStorageRoot}/pi-cluster/loki:/loki"
]
ports = ["loki_port"]
} // docker config
service {
port = "loki_port"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "http"
path = "/metrics"
interval = "30s"
timeout = "10s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
template {
destination = "local/loki/local-config.yaml"
env = false
change_mode = "noop"
data = <<-EOH
---
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
ingester:
wal:
enabled: true
dir: /tmp/wal
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed
max_chunk_age: 1h # All chunks will be flushed when they hit this age. Def: 1h
chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
max_transfer_retries: 0 # Chunk transfers disabled
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /loki/boltdb-shipper-active
cache_location: /loki/boltdb-shipper-cache
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
shared_store: filesystem
filesystem:
directory: /loki/chunks
compactor:
working_directory: /loki/boltdb-shipper-compactor
shared_store: filesystem
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: false
retention_period: 0s
ruler:
storage:
type: local
local:
directory: /loki/rules
rule_path: /loki/rules-temp
alertmanager_url: http://localhost:9093
ring:
kvstore:
store: inmemory
enable_api: true
EOH
} // template
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task loki
} // group
} // job

View File

@@ -0,0 +1,93 @@
job "nginx" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "rpi"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "nginx" {
restart {
attempts = 0
delay = "30s"
}
network {
port "web" {
to = "80"
}
// port "websecure" {
// to = "443"
// }
}
task "nginx" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/nginx"
hostname = "${NOMAD_JOB_NAME}"
volumes = [
"/mnt/usbDrive/nginx:/config"
]
ports = ["web"]
} // docker config
service {
port = "web"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "web"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 100 # MHz
memory = 300 # MB
} // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,91 @@
job "nzbhydra" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "nzbhydra" {
restart {
attempts = 0
delay = "30s"
}
network {
port "hydra_port" {
to = "5076"
}
}
task "nzbhydra" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/nzbhydra2:latest"
hostname = "${NOMAD_JOB_NAME}"
ports = ["hydra_port"]
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/nzbhydra:/config"
]
} // docker config
service {
port = "hydra_port"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`hydra.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "http"
path = "/"
interval = "30s"
timeout = "10s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 600 # MHz
memory = 400 # MB
} // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,94 @@
job "overseerr" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "overseerr" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "overseerr" {
to = "5055"
}
}
task "overseerr" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/overseerr"
hostname = "${NOMAD_JOB_NAME}"
ports = ["overseerr"]
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/overseerr:/config"
]
} // docker config
service {
port = "overseerr"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=overseerr",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
]
check {
type = "tcp"
port = "overseerr"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 1600 # MHz
memory = 300 # MB
} // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,155 @@
job "pihole" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "rpi(2|3)"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "pihole-group" {
network {
port "web" {
static = "80"
to = "80"
}
port "dns" {
static = "53"
to = "53"
}
// port "dhcp" {
// static = "67"
// to = "67"
// }
}
task "await_filesytem" {
driver = "docker"
config {
image = "busybox:latest"
command = "sh"
network_mode = "host"
args = [
"-c",
"echo -n 'Waiting for /mnt/pi-cluster/pihole5 to be mounted'; until [ -f /etc/pihole/gravity.db ]; do echo '.'; sleep 2; done",
]
volumes = [
"/mnt/pi-cluster/pihole5:/etc/pihole/"
]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /await-filesystem
task "pihole" {
env {
// REV_SERVER_DOMAIN = ""
ADMIN_EMAIL = "{{ my_email_address }}"
DHCP_ACTIVE = "false"
DNS_BOGUS_PRIV = "false"
DNS_FQDN_REQUIRED = "false"
DNSSEC = "false"
FTLCONF_REPLY_ADDR4 = "${attr.unique.network.ip-address}"
IPv6 = "false"
PIHOLE_DNS_ = "10.0.30.1#53"
QUERY_LOGGING = "true"
REV_SERVER = "true"
REV_SERVER_CIDR = "10.0.0.0/16"
REV_SERVER_TARGET = "10.0.30.1"
TEMPERATUREUNIT = "f"
TZ = "America/New_York"
WEBTHEME = "default-light"
WEBUIBOXEDLAYOUT = "traditional"
}
driver = "docker"
config {
image = "pihole/pihole:latest"
hostname = "${NOMAD_JOB_NAME}"
dns_servers = [
"127.0.0.1",
"1.1.1.1"
]
extra_hosts = [
"laptopVPN:10.0.90.2",
"FiddleStixPhoneVPN:10.0.90.3"
]
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/pihole5:/etc/pihole/",
"${meta.nfsStorageRoot}/pi-cluster/pihole5/dnsmasq.d:/etc/dnsmasq.d/"
// "${meta.nfsStorageRoot}/pi-cluster/pihole5/logs/pihole.log:/var/log/pihole.log",
// "${meta.nfsStorageRoot}/pi-cluster/pihole5/logs/pihole-FTL.log:/var/log/pihole-FTL.log"
]
ports = ["web", "dns"]
}
resources {
cpu = 400 # MHz
memory = 80 # MB
}
service {
name = "${NOMAD_JOB_NAME}"
port = "web"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`p.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.middlewares.piholeRedirect.redirectregex.regex=^(https?://p\\.{{ homelab_domain_name }})/?$",
"traefik.http.middlewares.piholeRedirect.redirectregex.replacement=$${1}/admin/",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,piholeRedirect"
]
check {
type = "http"
path = "/admin/"
port = "web"
interval = "30s"
timeout = "2s"
}
check_restart {
limit = 3
grace = "10m"
ignore_warnings = false
}
}
service {
name = "piholeDNStcp"
port = "dns"
check {
type = "tcp"
port = "dns"
interval = "30s"
timeout = "2s"
}
check_restart {
limit = 3
grace = "60s"
ignore_warnings = false
}
}
}
} // group
}

View File

@@ -0,0 +1,88 @@
job "promtail-syslogs" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "system"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "promtail-syslogs" {
restart {
attempts = 0
delay = "30s"
}
task "promtail-syslogs" {
driver = "docker"
config {
image = "grafana/promtail"
hostname = "${NOMAD_JOB_NAME}"
volumes = [
"/var/log:/var/log"
]
args = [
"-config.file",
"/local/promtail-config.yaml",
"-print-config-stderr"
]
} // docker config
template {
destination = "local/promtail-config.yaml"
env = false
data = <<EOH
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
{% raw -%}
clients:
- url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
{% endraw %}
scrape_configs:
- job_name: system
static_configs:
- targets:
- localhost
labels:
job: syslog
{% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
__path__: /var/log/syslog
- targets:
- localhost
labels:
job: authlog
{% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
__path__: /var/log/auth.log
EOH
} // template
resources {
cpu = 30 # MHz
memory = 30 # MB
} // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,129 @@
job "prowlarr" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi4"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "prowlarrGroup" {
count = 1
restart {
attempts = 0
delay = "10m"
}
network {
port "prowlarr" {
to = "9696"
}
}
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "prowlarr" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
//UMASK_SET = 022 #optional
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/prowlarr:develop"
force_pull = true
hostname = "${NOMAD_JOB_NAME}"
ports = ["prowlarr"]
volumes = [
"${meta.localStorageRoot}/prowlarr:/config"
]
} // docker config
service {
port = "prowlarr"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "prowlarr"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 1000 # MHz
memory = 400 # MB
} // resources
} // /task prowlarr
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
} // job

View File

@@ -0,0 +1,123 @@
job "radarr" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "radarrGroup" {
restart {
attempts = 0
delay = "10m"
}
network {
port "radarr" {
to = "7878"
}
}
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "radarr" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
//UMASK_SET = 022 #optional
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/radarr:develop"
hostname = "${NOMAD_JOB_NAME}"
force_pull = true
ports = ["radarr"]
volumes = [
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
"${meta.nfsStorageRoot}/media:/media"
]
} // docker config
service {
port = "radarr"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "radarr"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 2000 # MHz
memory = 400 # MB
} // resources
} // /task radarr
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
} // job

View File

@@ -0,0 +1,468 @@
job "reverse-proxy" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
value = "rpi1"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "reverse-proxy-group" {
restart {
attempts = 0
delay = "30s"
}
network {
port "authelia-port" {
static = { { authelia_port } }
to = 9091
}
port "whoami" {
to = 80
}
port "dashboard" {
static = 8080
to = 8080
}
port "web" {
static = 80
to = 80
}
port "websecure" {
static = 443
to = 443
}
port "externalwebsecure" {
static = 4430
to = 4430
}
}
task "authelia" {
env {
TZ = "America/New_York"
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
}
driver = "docker"
config {
image = "authelia/authelia"
hostname = "authelia"
ports = ["authelia-port"]
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/authelia:/config"
]
args = [
"--config",
"/local/authelia/config.yaml"
]
} // docker config
template {
destination = "local/authelia/config.yaml"
env = false
change_mode = "noop"
perms = "644"
data = <<-EOH
---
## The theme to display: light, dark, grey, auto.
theme: auto
jwt_secret: {{ authelia_jwt_secret}}
default_redirection_url: https://authelia.{{ homelab_domain_name}}
server:
host: 0.0.0.0
port: 9091
path: ""
read_buffer_size: 4096
write_buffer_size: 4096
enable_pprof: false
enable_expvars: false
disable_healthcheck: false
log:
level: info
format: text
# file_path: "/config/log.txt"
keep_stdout: false
totp:
issuer: authelia.com
authentication_backend:
disable_reset_password: false
file:
path: /config/users.yml
password:
algorithm: argon2id
iterations: 1
salt_length: 16
parallelism: 8
memory: 64
access_control:
default_policy: deny
networks:
- name: internal
networks:
- 10.0.0.0/16
#- 172.16.0.0/12
#- 192.168.0.0/18
rules:
# Rules applied to everyone
- domain: "*.{{ homelab_domain_name }}"
policy: two_factor
networks:
- internal
session:
name: authelia_session
domain: {{ homelab_domain_name }}
same_site: lax
secret: {{ authelia_session_secret }}
expiration: 1h
inactivity: 15m
remember_me_duration: 1w
regulation:
max_retries: 5
find_time: 10m
ban_time: 15m
storage:
encryption_key: {{ authelia_sqlite_encryption_key}}
local:
path: /config/db.sqlite3
notifier:
smtp:
username: {{ email_smtp_account }}
password: {{ authelia_smtp_password }}
host: {{ email_smtp_host }}
port: {{ email_smtp_port }}
sender: "Authelia <{{ my_email_address }}>"
subject: "[Authelia] {title}"
startup_check_address: {{ my_email_address }}
EOH
}
service {
port = "authelia-port"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`authelia.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Cache-Control=no-store",
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Pragma=no-cache",
"traefik.http.routers.authelia.middlewares=authelia-headers"
]
check {
type = "tcp"
port = "authelia-port"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 200 # MHz
memory = 110 # MB
}
} // task authelia
task "whoami" {
driver = "docker"
config {
image = "containous/whoami:latest"
hostname = "${NOMAD_TASK_NAME}"
ports = ["whoami"]
} // /docker config
service {
port = "whoami"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
]
check {
type = "http"
path = "/"
interval = "90s"
timeout = "15s"
}
check_restart {
limit = 2
grace = "1m"
ignore_warnings = true
}
}
resources {
cpu = 25 # MHz
memory = 10 # MB
}
} // /task whoami
task "traefik" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
CF_API_EMAIL = "{{ my_email_address }}"
CF_DNS_API_TOKEN = "{{ traefik_cf_api_token }}"
}
driver = "docker"
config {
image = "traefik:{{ traefik_version }}"
hostname = "traefik"
ports = ["dashboard", "web", "websecure", "externalwebsecure"]
volumes = ["${meta.nfsStorageRoot}/pi-cluster/traefik/acme:/acme"]
args = [
"--global.sendAnonymousUsage=false",
"--global.checkNewVersion=false",
"--entryPoints.web.address=:80",
"--entryPoints.websecure.address=:443",
"--entryPoints.externalwebsecure.address=:4430",
"--entrypoints.web.http.redirections.entryPoint.to=websecure",
"--entrypoints.web.http.redirections.entryPoint.scheme=https",
"--entrypoints.web.http.redirections.entryPoint.permanent=true",
"--providers.file.filename=/local/traefik/siteconfigs.toml",
"--providers.file.watch=true",
"--providers.consulcatalog=true",
"--providers.consulcatalog.endpoint.address=http://consul.service.consul:8500",
"--providers.consulcatalog.prefix=traefik",
"--providers.consulcatalog.exposedbydefault=false",
"--metrics=true",
"--metrics.influxdb=true",
"--metrics.influxdb.address=influxdb.service.consul:{{ influxdb_port }}",
"--metrics.influxdb.protocol=http",
"--metrics.influxdb.pushinterval=10s",
"--metrics.influxdb.database=homelab",
"--metrics.influxdb.retentionpolicy=2day",
"--metrics.influxdb.addentrypointslabels=true",
"--metrics.influxdb.addserviceslabels=true",
"--accesslog=true",
"--log=true",
"--log.level=ERROR",
"--api=true",
"--api.dashboard=true",
"--api.insecure=true",
"--certificatesresolvers.cloudflare.acme.email={{ my_email_address }}",
"--certificatesresolvers.cloudflare.acme.storage=/acme/acme-${node.unique.name}.json",
"--certificatesresolvers.cloudflare.acme.dnschallenge=true",
"--certificatesresolvers.cloudflare.acme.dnschallenge.provider=cloudflare",
"--certificatesresolvers.cloudflare.acme.dnschallenge.delaybeforecheck=10",
"--certificatesresolvers.cloudflare.acme.dnschallenge.resolvers=1.1.1.1:53,8.8.8.8:53"
]
} // docker config
template {
destination = "local/traefik/httpasswd"
env = false
change_mode = "noop"
data = <<-EOH
{{ my_username }}:{{ traefik_http_pass_me }}
family:{{ traefik_http_pass_family }}
EOH
}
template {
destination = "local/traefik/httpasswdFamily"
env = false
change_mode = "noop"
data = <<-EOH
{{ my_username }}:{{ traefik_http_pass_me }}
family:{{ traefik_http_pass_family }}
EOH
}
template {
destination = "local/traefik/siteconfigs.toml"
env = false
change_mode = "noop"
data = <<-EOH
[http]
[http.middlewares]
[http.middlewares.compress.compress]
[http.middlewares.localIPOnly.ipWhiteList]
sourceRange = ["10.0.0.0/16"]
[http.middlewares.redirectScheme.redirectScheme]
scheme = "https"
permanent = true
[http.middlewares.authelia.forwardAuth]
address = "http://authelia.service.consul:{{ authelia_port }}/api/verify?rd=https://authelia.{{ homelab_domain_name }}"
trustForwardHeader = true
authResponseHeaders = ["Remote-User", "Remote-Groups", "Remote-Name", "Remote-Email"]
[http.middlewares.basicauth.basicauth]
usersfile = "/local/traefik/httpasswd"
removeHeader = true
[http.middlewares.basicauth-family.basicauth]
usersfile = "/local/traefik/httpasswdFamily"
removeHeader = true
[http.middlewares.allowFrame.headers]
customFrameOptionsValue = "allow-from https://home.{{ homelab_domain_name }}"
[http.routers]
[http.routers.consul]
rule = "Host(`consul.{{ homelab_domain_name }}`)"
service = "consul"
entrypoints = ["web","websecure"]
[http.routers.consul.tls]
certResolver = "cloudflare" # From static configuration
[http.services]
[http.services.consul]
[http.services.consul.loadBalancer]
passHostHeader = true
[[http.services.consul.loadBalancer.servers]]
url = "http://consul.service.consul:8500"
EOH
}
service {
port = "dashboard"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file,redirectScheme@file"
]
check {
type = "tcp"
port = "dashboard"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
//cpu = 40 # MHz
memory = 64 # MB
} // resources
} // task traefik
// task "promtail-traefik" {
// driver = "docker"
// config {
// image = "grafana/promtail"
// hostname = "promtail-traefik"
// volumes = [
// "/mnt/pi-cluster/logs:/traefik"
// ]
// args = [
// "-config.file",
// "/local/promtail-config.yaml",
// "-print-config-stderr",
// ]
// } // docker config
// template {
// destination = "local/promtail-config.yaml"
// env = false
// data = <<-EOH
// server:
// http_listen_port: 9080
// grpc_listen_port: 0
// positions:
// filename: /alloc/positions.yaml
// {% raw -%}
// clients:
// - url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
// {% endraw %}
// scrape_configs:
// - job_name: traefik
// static_configs:
// - targets:
// - localhost
// labels:
// job: traefik_access
// {% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
// __path__: "/alloc/logs/traefik.std*.0"
// pipeline_stages:
// - regex:
// expression: '^(?P<remote_addr>[\w\.]+) - (?P<remote_user>[^ ]*) \[(?P<time_local>.*)\] "(?P<method>[^ ]*) (?P<request>[^ ]*) (?P<protocol>[^ ]*)" (?P<status>[\d]+) (?P<body_bytes_sent>[\d]+) "(?P<http_referer>[^"]*)" "(?P<http_user_agent>[^"]*)" (?P<request_number>[^ ]+) "(?P<router>[^ ]+)" "(?P<server_URL>[^ ]+)" (?P<response_time_ms>[^ ]+)ms$'
// - labels:
// method:
// status:
// router:
// response_time_ms:
// EOH
// } // template
// lifecycle {
// hook = "poststart"
// sidecar = true
// }
// resources {
// cpu = 30 # MHz
// memory = 30 # MB
// } // resources
// } // promtail sidecar task
} // reverse-proxy-group
}

View File

@@ -0,0 +1,139 @@
job "sonarr" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "macmini"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "sonarrGroup" {
count = 1
restart {
attempts = 0
delay = "10m"
}
network {
port "sonarr" {
to = "8989"
}
}
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = [
"${meta.restoreCommand1}",
"${meta.restoreCommand2}",
"${NOMAD_JOB_NAME}",
"${meta.restoreCommand3}"
]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "sonarr" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
//UMASK_SET = 022 #optional
}
driver = "docker"
config {
image = "linuxserver/sonarr:latest"
hostname = "${NOMAD_JOB_NAME}"
ports = ["sonarr"]
volumes = [
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
"${meta.nfsStorageRoot}/media:/media"
]
} // docker config
service {
port = "sonarr"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=sonarr",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "sonarr"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 1000 # MHz
memory = 400 # MB
} // resources
} // /task sonarr
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = [
"${meta.backupAllocArg1}",
"${meta.backupAllocArg2}",
"${meta.backupAllocArg3}",
"${meta.backupAllocArg4}",
"${meta.backupAllocArg5}",
"${NOMAD_JOB_NAME}",
"${meta.backupAllocArg6}"
]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
} // job

View File

@@ -0,0 +1,103 @@
job "stash" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "macmini"
}
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "stashGroup" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
to = "9999"
}
}
task "stash" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
STASH_STASH = "/data/"
STASH_GENERATED = "/generated/"
STASH_METADATA = "/metadata/"
STASH_CACHE = "/cache/"
STASH_PORT = "9999"
STASH_EXTERNAL_HOST = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
}
driver = "docker"
config {
image = "stashapp/stash:latest"
hostname = "${NOMAD_JOB_NAME}"
volumes = [
"${meta.nfsStorageRoot}/nate/.stash/cache:/cache",
"${meta.nfsStorageRoot}/nate/.stash/config:/root/.stash",
"${meta.nfsStorageRoot}/nate/.stash/generated:/generated",
"${meta.nfsStorageRoot}/nate/.stash/media:/data",
"${meta.nfsStorageRoot}/nate/.stash/metadata:/metadata",
"/etc/timezone:/etc/timezone:ro"
]
ports = ["port1"]
} // docker config
service {
port = "port1"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 4500 # MHz
memory = 400 # MB
} // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,100 @@
job "syncthing" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "syncthing" {
restart {
attempts = 0
delay = "30s"
}
network {
port "webGUI" {
to = "8384"
}
port "listen_tcp_udp" {
static = "22000"
to = "22000"
}
port "udp_proto_discovery" {
static = "21027"
to = "21027"
}
}
task "syncthing" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "ghcr.io/linuxserver/syncthing"
hostname = "${NOMAD_JOB_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_JOB_NAME}:/config",
"${meta.nfsStorageRoot}/${NOMAD_JOB_NAME}:/Sync"
]
ports = ["webGUI","listen_tcp_udp","udp_proto_discovery"]
} // docker config
service {
port = "webGUI"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=syncthing",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
]
check {
type = "tcp"
port = "webGUI"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 1200 # MHz
memory = 300 # MB
} // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,191 @@
job "TEMPLATE" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "TEMPLATE-db-group" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
static = "80"
to = "80"
}
}
task "TEMPLATE-db" {
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
env {
// PUID = "${meta.PUID}"
// PGID = "${meta.PGID}"
// TZ = "America/New_York"
}
driver = "docker"
config {
image = ""
hostname = "${NOMAD_JOB_NAME}1"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_JOB_NAME}1:/data",
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro"
]
ports = ["port1"]
} // docker config
service {
port = "port1"
name = "${NOMAD_JOB_NAME}1"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}1.rule=Host(`${NOMAD_JOB_NAME}1.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}1.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}1.service=${NOMAD_JOB_NAME}1",
"traefik.http.routers.${NOMAD_JOB_NAME}1.tls=true",,
"traefik.http.routers.${NOMAD_JOB_NAME}1.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_JOB_NAME}1.middlewares=authelia@file"
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
// resources {
// cpu = 40 # MHz
// memory = 10 # MB
// }
} // resources
} // task
} // group
group "TEMPLATE-app-group" {
restart {
attempts = 1
delay = "30s"
}
network {
port "port2" {
static = "443"
to = "443"
}
}
task "await-TEMPLATEdb" {
driver = "docker"
config {
image = "busybox:latest"
command = "sh"
args = ["-c", "echo -n 'Waiting for service'; until nslookup ${NOMAD_JOB_NAME}1.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"]
network_mode = "host"
}
resources {
cpu = 200
memory = 128
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task
task "TEMPLATE" {
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
// env {
// PUID = "${meta.PUID}"
// PGID = "${meta.PGID}"
// TZ = "America/New_York"
// }
driver = "docker"
config {
image = ""
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/data",
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro"
]
ports = ["port2"]
}
service {
name = "${NOMAD_TASK_NAME}"
port = "port2"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",,
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
"traefik.http.routers.${NOMAD_TASK_NAME}.priority=1"
]
check {
type = "http"
port = "port2"
path = "/"
interval = "5m"
timeout = "1m"
}
check_restart {
limit = 3
grace = "1m"
ignore_warnings = true
}
} // service
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// }
} // TASK
} // close group
} // job

View File

@@ -0,0 +1,95 @@
job "TEMPLATE" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "TEMPLATE" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "port1" {
static = "80"
to = "80"
}
}
task "TEMPLATE" {
// env {
// PUID = "${meta.PUID}"
// PGID = "${meta.PGID}"
// }
driver = "docker"
config {
image = ""
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/",
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro"
]
ports = ["port1"]
} // docker config
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,128 @@
job "TEMPLATE" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi4"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "TEMPLATE-group" {
count = 1
restart {
attempts = 0
delay = "10m"
}
network {
port "port1" {
static = ""
to = ""
}
}
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "TEMPLATE" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = ""
hostname = "${NOMAD_TASK_NAME}"
ports = ["port1"]
volumes = [
"${meta.localStorageRoot}/${NOMAD_TASK_NAME}:/config"
]
} // docker config
service {
port = "port1"
name = "${NOMAD_TASK_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
]
check {
type = "tcp"
port = "port1"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 1000 # MHz
memory = 400 # MB
} // resources
} // /task ${NOMAD_JOB_NAME}
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
} // job

View File

@@ -0,0 +1,27 @@
job "execTest" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "batch"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "rpi3"
}
group "testing" {
task "execTest" {
driver = "raw_exec"
config {
command = "/usr/local/bin/backup_configs"
args = ["--verbose","--job","sonarr"]
}
resources {
cpu = 500
memory = 256
}
}
}
}

View File

@@ -0,0 +1,110 @@
job "uptimekuma" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "uptimekumaGroup" {
count = 1
restart {
attempts = 0
delay = "30s"
}
network {
port "web" {
to = "3001"
}
}
task "create_filesystem" {
// Copy the most recent backup into place on the local computer. sonarr will not work with
// its database in an NFS share
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.restoreCommand}"
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task create_filesystem
task "uptimekuma" {
// env {
// PUID = "${meta.PUID}"
// PGID = "${meta.PGID}"
// }
driver = "docker"
config {
image = "louislam/uptime-kuma:latest"
hostname = "${NOMAD_JOB_NAME}"
volumes = [ "${meta.localStorageRoot}/uptimekuma:/app/data" ]
ports = ["web"]
} // docker config
service {
port = "web"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`uptime.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "tcp"
port = "web"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
resources {
cpu = 400 # MHz
memory = 100 # MB
} // resources
} // task
task "save_configuration" {
driver = "raw_exec"
config {
# When running a binary that exists on the host, the path must be absolute
command = "${meta.backupCommand}"
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
}
lifecycle {
hook = "poststop"
sidecar = false
}
} // /task save_configuration
} // group
} // job

View File

@@ -0,0 +1,95 @@
job "whoogle" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
// constraint {
// attribute = "${node.unique.name}"
// operator = "regexp"
// value = "rpi(1|2|3)"
// }
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "whoogle" {
restart {
attempts = 0
delay = "30s"
}
network {
port "whoogle" {
to = "5000"
}
}
task "whoogle" {
env {
WHOOGLE_CONFIG_BLOCK = "pinterest.com"
WHOOGLE_CONFIG_DISABLE = "1"
WHOOGLE_CONFIG_GET_ONLY = "1"
WHOOGLE_CONFIG_LANGUAGE = "lang_en"
WHOOGLE_CONFIG_NEW_TAB = "0"
WHOOGLE_CONFIG_SEARCH_LANGUAGE = "lang_en"
WHOOGLE_CONFIG_THEME = "light"
WHOOGLE_CONFIG_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
WHOOGLE_CONFIG_VIEW_IMAGE = "1"
WHOOGLE_RESULTS_PER_PAGE = "20"
}
driver = "docker"
config {
image = "benbusby/whoogle-search:latest"
hostname = "${NOMAD_JOB_NAME}"
ports = ["whoogle"]
} // docker config
service {
port = "whoogle"
name = "${NOMAD_JOB_NAME}"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
]
check {
type = "http"
path = "/"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 0
grace = "1m"
ignore_warnings = true
}
} // service
// resources {
// cpu = 100 # MHz
// memory = 300 # MB
// } // resources
} // task
} // group
} // job

View File

@@ -0,0 +1,257 @@
job "wikijs" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "service"
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
canary = 0
stagger = "30s"
}
group "wikijs_db_group" {
restart {
attempts = 1
delay = "30s"
}
network {
port "db" {
static = "5434"
to = "5432"
}
}
task "await_db_filesytem" {
constraint {
attribute = "${node.unique.name}"
value = "macmini"
}
driver = "docker"
config {
image = "busybox:latest"
command = "sh"
args = [
"-c",
"echo -n 'Waiting for /etc/postgresql/postgresql.conf to be available'; until [ -f /etc/postgresql/my-postgres.conf ]; do echo '.'; sleep 2; done",
]
network_mode = "host"
volumes = [
"/Users/{{ my_username }}/cluster/wikidb:/etc/postgresql"
]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task
task "await_backup_filesytem" {
constraint {
attribute = "${node.unique.name}"
value = "macmini"
}
driver = "docker"
config {
image = "busybox:latest"
command = "sh"
args = [
"-c",
"echo -n 'Waiting for /backups to be available'; until [ -f /backups/dbBackup.log ]; do echo '.'; sleep 2; done",
]
network_mode = "host"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/backups/wikijsdb:/backups"
]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task
task "wikijs_db" {
constraint {
attribute = "${node.unique.name}"
value = "macmini"
}
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
POSTGRES_USER = "wikijs"
POSTGRES_PASSWORD = "wikijs"
POSTGRES_DB = "wikijs"
PGDATA = "/var/lib/postgresql/data/pgdata"
}
driver = "docker"
config {
image = "postgres:9.6.17"
hostname = "wikijs_db"
volumes = [
"/Users/{{ my_username }}/cluster/wikidb/pgdata:/var/lib/postgresql/data",
"/Users/{{ my_username }}/cluster/wikidb/my-postgres.conf:/etc/postgresql/postgresql.conf",
"/Users/{{ my_username }}/cluster/wikidb/entrypoint:/docker-entrypoint-initdb.d",
"${meta.nfsStorageRoot}/pi-cluster/backups/wikijsdb:/backups"
]
ports = ["db"]
}
artifact {
source = "git::https://github.com/{{ my_username }}/db_scripts.git"
destination = "local/scripts"
}
service {
port = "db"
name = "wikijsdb"
check {
type = "tcp"
port = "db"
interval = "30s"
timeout = "4s"
}
check_restart {
limit = 2
grace = "1m"
ignore_warnings = true
}
}
resources {
cpu = 55 # MHz
memory = 60 # MB
}
} // /task
} // /group
group "wikijs_app_group" {
restart {
attempts = 1
delay = "30s"
}
network {
port "http" {
to = "3000"
}
}
task "await_database" {
driver = "docker"
config {
image = "busybox:latest"
command = "sh"
args = [
"-c",
"echo -n 'Waiting for wikijsdb.service.consul to come alive'; until nslookup wikijsdb.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"
]
network_mode = "host"
}
resources {
cpu = 200
memory = 128
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task
task "await_filesytem" {
driver = "docker"
config {
image = "busybox:latest"
command = "sh"
args = [
"-c",
"echo -n 'Waiting for ${meta.nfsStorageRoot}/pi-cluster/wikijs/ to be mounted'; until less -E /wiki/config.yml | grep 'wikijsdb.service.consul' 2>&1 >/dev/null; do echo '.'; sleep 2; done",
]
network_mode = "host"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/wikijs/config/config.yml:/wiki/config.yml"
]
}
lifecycle {
hook = "prestart"
sidecar = false
}
} // /task
task "wikijs_app" {
env {
PUID = "${meta.PUID}"
PGID = "${meta.PGID}"
TZ = "America/New_York"
}
driver = "docker"
config {
image = "linuxserver/wikijs:version-2.5.170"
hostname = "wikijs-app"
volumes = [
"${meta.nfsStorageRoot}/pi-cluster/wikijs/config/config.yml:/wiki/config.yml",
"${meta.nfsStorageRoot}/pi-cluster/wikijs/config:/config",
"${meta.nfsStorageRoot}/pi-cluster/wikijs/data/:/data"
]
ports = ["http"]
} // /config
service {
port = "http"
name = "wikijs"
tags = [
"traefik.enable=true",
"traefik.http.routers.wikijs.rule=Host(`wiki.{{ homelab_domain_name }}`)",
"traefik.http.routers.wikijs.entryPoints=web,websecure",
"traefik.http.routers.wikijs.service=wikijs",
"traefik.http.routers.wikijs.tls=true"
]
check {
type = "http"
path = "/"
interval = "90s"
timeout = "15s"
}
check_restart {
limit = 3
grace = "30s"
ignore_warnings = true
}
} // /service
resources {
// cpu = 100 # MHz
// memory = 60 # MB
}
} // /task
} // /group
} // job

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,948 @@
#!/usr/bin/env bash
_mainScript_() {
_setPATH_ "/bin" "/usr/bin" "/usr/local/bin"
debug "whoami: $(whoami)"
if ! _rootAvailable_; then fatal "This script must be run as root"; fi
if [ -z ${JOB:-} ]; then
error "Service name is not set"
_safeExit_ 1
fi
JOB_DIR="{{ interpolated_localfs_service_storage }}/${JOB}"
debug "JOB_DIR: ${JOB_DIR}" ${LINENO}
if [ -z ${JOB:-} ] || [ ! -d "${JOB_DIR}" ]; then
error "Can not find job directory: ${JOB_DIR}" "${LINENO}"
_safeExit_ 1
fi
if [ ! -d "${BACKUP_DIR}" ]; then
error "Can not find backup directory: ${BACKUP_DIR}" "${LINENO}"
_safeExit_ 1
fi
# Identify the latest backup
# shellcheck disable=SC2010
MOST_RECENT_BACKUP="$(ls "${BACKUP_DIR}" | grep --color=never "${JOB}" | sort -n -t _ -k 2 | tail -1)"
if [ -f "${BACKUP_DIR}/${MOST_RECENT_BACKUP}" ]; then
debug "Most recent backup: ${MOST_RECENT_BACKUP}"
else
error "Most recent backup does not exist" "${LINENO}"
fi
# Don't run as root on macOS
if [[ $(_detectOS_) == mac ]]; then
# Ensure destination directory is clean
_execute_ "command rm -rf \"${JOB_DIR:?}\""
_execute_ "mkdir \"${JOB_DIR:?}\""
# Extract the backup
if [ ${DRYRUN} == true ]; then
dryrun "tar zxvf \"${BACKUP_DIR}/${MOST_RECENT_BACKUP}\" -C \"${JOB_DIR}\""
elif tar zxvf "${BACKUP_DIR}/${MOST_RECENT_BACKUP}" -C "${JOB_DIR}"; then
info "Restore successful"
else
error "Restore failed"
_safeExit_ 1
fi
# Ensure permissions are correct
_execute_ "chown -R {{ ansible_user_uid }}:{{ ansible_user_gid }} \"${JOB_DIR}\""
else
# Ensure destination directory is clean
_execute_ "_runAsRoot_ command rm -rf \"${JOB_DIR:?}\""
_execute_ "_runAsRoot_ mkdir \"${JOB_DIR:?}\""
# Extract the backup
if [ ${DRYRUN} == true ]; then
dryrun "_runAsRoot_ tar zxvf \"${BACKUP_DIR}/${MOST_RECENT_BACKUP}\" -C \"${JOB_DIR}\""
elif _runAsRoot_ tar zxvf "${BACKUP_DIR}/${MOST_RECENT_BACKUP}" -C "${JOB_DIR}"; then
info "Restore successful"
else
error "Restore failed"
_safeExit_ 1
fi
# Ensure permissions are correct
_execute_ "_runAsRoot_ chown -R {{ ansible_user_uid }}:{{ ansible_user_gid }} \"${JOB_DIR}\""
fi
notice "Restored: ${MOST_RECENT_BACKUP}"
}
# end _mainScript_
# ################################## Flags and defaults
# Required variables
QUIET=false
LOGLEVEL=NOTICE
VERBOSE=false
FORCE=false
DRYRUN=false
declare -a ARGS=()
# Script specific
MULTIHOST=false
BACKUP_DIR="{{ interpolated_nfs_service_storage }}/pi-cluster/backups/config_backups"
LOGFILE="{{ interpolated_nfs_service_storage }}/pi-cluster/logs/$(basename "$0").log"
# ################################## Custom utility functions (Pasted from repository)
_execute_() {
# DESC:
# Executes commands while respecting global DRYRUN, VERBOSE, LOGGING, and QUIET flags
# ARGS:
# $1 (Required) - The command to be executed. Quotation marks MUST be escaped.
# $2 (Optional) - String to display after command is executed
# OPTS:
# -v Always print output from the execute function to STDOUT
# -n Use NOTICE level alerting (default is INFO)
# -p Pass a failed command with 'return 0'. This effectively bypasses set -e.
# -e Bypass _alert_ functions and use 'echo RESULT'
# -s Use '_alert_ success' for successful output. (default is 'info')
# -q Do not print output (QUIET mode)
# OUTS:
# stdout: Configurable output
# USE :
# _execute_ "cp -R \"~/dir/somefile.txt\" \"someNewFile.txt\"" "Optional message"
# _execute_ -sv "mkdir \"some/dir\""
# NOTE:
# If $DRYRUN=true, no commands are executed and the command that would have been executed
# is printed to STDOUT using dryrun level alerting
# If $VERBOSE=true, the command's native output is printed to stdout. This can be forced
# with '_execute_ -v'
local _localVerbose=false
local _passFailures=false
local _echoResult=false
local _echoSuccessResult=false
local _quietMode=false
local _echoNoticeResult=false
local opt
local OPTIND=1
while getopts ":vVpPeEsSqQnN" opt; do
case $opt in
v | V) _localVerbose=true ;;
p | P) _passFailures=true ;;
e | E) _echoResult=true ;;
s | S) _echoSuccessResult=true ;;
q | Q) _quietMode=true ;;
n | N) _echoNoticeResult=true ;;
*)
{
error "Unrecognized option '$1' passed to _execute_. Exiting."
_safeExit_
}
;;
esac
done
shift $((OPTIND - 1))
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
local _command="${1}"
local _executeMessage="${2:-$1}"
local _saveVerbose=${VERBOSE}
if "${_localVerbose}"; then
VERBOSE=true
fi
if "${DRYRUN}"; then
if "${_quietMode}"; then
VERBOSE=${_saveVerbose}
return 0
fi
if [ -n "${2:-}" ]; then
dryrun "${1} (${2})" "$(caller)"
else
dryrun "${1}" "$(caller)"
fi
elif ${VERBOSE}; then
if eval "${_command}"; then
if "${_quietMode}"; then
VERBOSE=${_saveVerbose}
elif "${_echoResult}"; then
printf "%s\n" "${_executeMessage}"
elif "${_echoSuccessResult}"; then
success "${_executeMessage}"
elif "${_echoNoticeResult}"; then
notice "${_executeMessage}"
else
info "${_executeMessage}"
fi
else
if "${_quietMode}"; then
VERBOSE=${_saveVerbose}
elif "${_echoResult}"; then
printf "%s\n" "warning: ${_executeMessage}"
else
warning "${_executeMessage}"
fi
VERBOSE=${_saveVerbose}
"${_passFailures}" && return 0 || return 1
fi
else
if eval "${_command}" >/dev/null 2>&1; then
if "${_quietMode}"; then
VERBOSE=${_saveVerbose}
elif "${_echoResult}"; then
printf "%s\n" "${_executeMessage}"
elif "${_echoSuccessResult}"; then
success "${_executeMessage}"
elif "${_echoNoticeResult}"; then
notice "${_executeMessage}"
else
info "${_executeMessage}"
fi
else
if "${_quietMode}"; then
VERBOSE=$_saveVerbose
elif "${_echoResult}"; then
printf "%s\n" "error: ${_executeMessage}"
else
warning "${_executeMessage}"
fi
VERBOSE=${_saveVerbose}
"${_passFailures}" && return 0 || return 1
fi
fi
VERBOSE=${_saveVerbose}
return 0
}
_runAsRoot_() {
# DESC:
# Run the requested command as root (via sudo if requested)
# ARGS:
# $1 (optional): Set to zero to not attempt execution via sudo
# $@ (required): Passed through for execution as root user
# OUTS:
# Runs the requested command as root
# CREDIT:
# https://github.com/ralish/bash-script-template
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
local _skip_sudo=false
if [[ ${1} =~ ^0$ ]]; then
_skip_sudo=true
shift
fi
if [[ ${EUID} -eq 0 ]]; then
"$@"
elif [[ -z ${_skip_sudo} ]]; then
sudo -H -- "$@"
else
fatal "Unable to run requested command as root: $*"
fi
}
_rootAvailable_() {
# DESC:
# Validate we have superuser access as root (via sudo if requested)
# ARGS:
# $1 (optional): Set to any value to not attempt root access via sudo
# OUTS:
# 0 if true
# 1 if false
# CREDIT:
# https://github.com/ralish/bash-script-template
local _superuser
local _testEUID
if [[ ${EUID} -eq 0 ]]; then
_superuser=true
elif [[ -z ${1:-} ]]; then
debug 'Sudo: Updating cached credentials ...'
if sudo -v; then
if [[ $(sudo -H -- "$BASH" -c 'printf "%s" "$EUID"') -eq 0 ]]; then
_superuser=true
else
_superuser=false
fi
else
_superuser=false
fi
fi
if [[ ${_superuser} == true ]]; then
debug 'Successfully acquired superuser credentials.'
return 0
else
debug 'Unable to acquire superuser credentials.'
return 1
fi
}
_detectOS_() {
# DESC:
# Identify the OS the script is run on
# ARGS:
# None
# OUTS:
# 0 - Success
# 1 - Failed to detect OS
# stdout: One of 'mac', 'linux', 'windows'
# USAGE:
# _detectOS_
# CREDIT:
# https://github.com/labbots/bash-utility
local _uname
local _os
if _uname=$(command -v uname); then
case $("${_uname}" | tr '[:upper:]' '[:lower:]') in
linux*)
_os="linux"
;;
darwin*)
_os="mac"
;;
msys* | cygwin* | mingw* | nt | win*)
# or possible 'bash on windows'
_os="windows"
;;
*)
return 1
;;
esac
else
return 1
fi
printf "%s" "${_os}"
}
# ################################## Functions required for this template to work
# Functions for providing alerts to the user and printing them to the log
_setColors_() {
# DESC:
# Sets colors use for alerts.
# ARGS:
# None
# OUTS:
# None
# USAGE:
# echo "${blue}Some text${reset}"
if tput setaf 1 >/dev/null 2>&1; then
bold=$(tput bold)
underline=$(tput smul)
reverse=$(tput rev)
reset=$(tput sgr0)
if [[ $(tput colors) -ge 256 ]] >/dev/null 2>&1; then
white=$(tput setaf 231)
blue=$(tput setaf 38)
yellow=$(tput setaf 11)
tan=$(tput setaf 3)
green=$(tput setaf 82)
red=$(tput setaf 1)
purple=$(tput setaf 171)
gray=$(tput setaf 250)
else
white=$(tput setaf 7)
blue=$(tput setaf 38)
yellow=$(tput setaf 3)
tan=$(tput setaf 3)
green=$(tput setaf 2)
red=$(tput setaf 1)
purple=$(tput setaf 13)
gray=$(tput setaf 7)
fi
else
bold="\033[4;37m"
reset="\033[0m"
underline="\033[4;37m"
reverse=""
white="\033[0;37m"
blue="\033[0;34m"
yellow="\033[0;33m"
tan="\033[0;33m"
green="\033[1;32m"
red="\033[0;31m"
purple="\033[0;35m"
gray="\033[0;37m"
fi
}
_alert_() {
# DESC:
# Controls all printing of messages to log files and stdout.
# ARGS:
# $1 (required) - The type of alert to print
# (success, header, notice, dryrun, debug, warning, error,
# fatal, info, input)
# $2 (required) - The message to be printed to stdout and/or a log file
# $3 (optional) - Pass '${LINENO}' to print the line number where the _alert_ was triggered
# OUTS:
# stdout: The message is printed to stdout
# log file: The message is printed to a log file
# USAGE:
# [_alertType] "[MESSAGE]" "${LINENO}"
# NOTES:
# - The colors of each alert type are set in this function
# - For specified alert types, the funcstac will be printed
local _color
local _alertType="${1}"
local _message="${2}"
local _line="${3:-}" # Optional line number
[[ $# -lt 2 ]] && fatal 'Missing required argument to _alert_'
if [[ -n ${_line} && ${_alertType} =~ ^(fatal|error) && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
_message="${_message} ${gray}(line: ${_line}) $(_printFuncStack_)"
elif [[ -n ${_line} && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
_message="${_message} ${gray}(line: ${_line})"
elif [[ -z ${_line} && ${_alertType} =~ ^(fatal|error) && ${FUNCNAME[2]} != "_trapCleanup_" ]]; then
_message="${_message} ${gray}$(_printFuncStack_)"
fi
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
_color="${bold}${red}"
elif [ "${_alertType}" == "info" ]; then
_color="${gray}"
elif [ "${_alertType}" == "warning" ]; then
_color="${red}"
elif [ "${_alertType}" == "success" ]; then
_color="${green}"
elif [ "${_alertType}" == "debug" ]; then
_color="${purple}"
elif [ "${_alertType}" == "header" ]; then
_color="${bold}${white}${underline}"
elif [ ${_alertType} == "notice" ]; then
_color="${bold}"
elif [ ${_alertType} == "input" ]; then
_color="${bold}${underline}"
elif [ "${_alertType}" = "dryrun" ]; then
_color="${blue}"
else
_color=""
fi
_writeToScreen_() {
("${QUIET}") && return 0 # Print to console when script is not 'quiet'
[[ ${VERBOSE} == false && ${_alertType} =~ ^(debug|verbose) ]] && return 0
if ! [[ -t 1 || -z ${TERM:-} ]]; then # Don't use colors on non-recognized terminals
_color=""
reset=""
fi
if [[ ${_alertType} == header ]]; then
printf "${_color}%s${reset}\n" "${_message}"
else
printf "${_color}[%7s] %s${reset}\n" "${_alertType}" "${_message}"
fi
}
_writeToScreen_
_writeToLog_() {
[[ ${_alertType} == "input" ]] && return 0
[[ ${LOGLEVEL} =~ (off|OFF|Off) ]] && return 0
if [ -z "${LOGFILE:-}" ]; then
LOGFILE="$(pwd)/$(basename "$0").log"
fi
[ ! -d "$(dirname "${LOGFILE}")" ] && mkdir -p "$(dirname "${LOGFILE}")"
[[ ! -f ${LOGFILE} ]] && touch "${LOGFILE}"
# Don't use colors in logs
local cleanmessage="$(echo "${_message}" | sed -E 's/(\x1b)?\[(([0-9]{1,2})(;[0-9]{1,3}){0,2})?[mGK]//g')"
# Print message to log file
printf "%s [%7s] %s %s\n" "$(date +"%b %d %R:%S")" "${_alertType}" "[$(/bin/hostname)]" "${cleanmessage}" >>"${LOGFILE}"
}
# Write specified log level data to logfile
case "${LOGLEVEL:-ERROR}" in
ALL | all | All)
_writeToLog_
;;
DEBUG | debug | Debug)
_writeToLog_
;;
INFO | info | Info)
if [[ ${_alertType} =~ ^(error|fatal|warning|info|notice|success) ]]; then
_writeToLog_
fi
;;
NOTICE | notice | Notice)
if [[ ${_alertType} =~ ^(error|fatal|warning|notice|success) ]]; then
_writeToLog_
fi
;;
WARN | warn | Warn)
if [[ ${_alertType} =~ ^(error|fatal|warning) ]]; then
_writeToLog_
fi
;;
ERROR | error | Error)
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
_writeToLog_
fi
;;
FATAL | fatal | Fatal)
if [[ ${_alertType} =~ ^fatal ]]; then
_writeToLog_
fi
;;
OFF | off)
return 0
;;
*)
if [[ ${_alertType} =~ ^(error|fatal) ]]; then
_writeToLog_
fi
;;
esac
} # /_alert_
error() { _alert_ error "${1}" "${2:-}"; }
warning() { _alert_ warning "${1}" "${2:-}"; }
notice() { _alert_ notice "${1}" "${2:-}"; }
info() { _alert_ info "${1}" "${2:-}"; }
success() { _alert_ success "${1}" "${2:-}"; }
dryrun() { _alert_ dryrun "${1}" "${2:-}"; }
input() { _alert_ input "${1}" "${2:-}"; }
header() { _alert_ header "${1}" "${2:-}"; }
debug() { _alert_ debug "${1}" "${2:-}"; }
fatal() {
_alert_ fatal "${1}" "${2:-}"
_safeExit_ "1"
}
# shellcheck disable=SC1009,SC1054,SC1056,SC1072,SC1073,SC1083
{% raw %}
_printFuncStack_() {
# DESC:
# Prints the function stack in use. Used for debugging, and error reporting.
# ARGS:
# None
# OUTS:
# stdout: Prints [function]:[file]:[line]
# NOTE:
# Does not print functions from the alert class
local _i
_funcStackResponse=()
for ((_i = 1; _i < ${#BASH_SOURCE[@]}; _i++)); do
case "${FUNCNAME[$_i]}" in "_alert_" | "_trapCleanup_" | fatal | error | warning | notice | info | debug | dryrun | header | success) continue ;; esac
_funcStackResponse+=("${FUNCNAME[$_i]}:$(basename ${BASH_SOURCE[$_i]}):${BASH_LINENO[_i - 1]}")
done
printf "( "
printf %s "${_funcStackResponse[0]}"
printf ' < %s' "${_funcStackResponse[@]:1}"
printf ' )\n'
}
{% endraw %}
_safeExit_() {
# DESC:
# Cleanup and exit from a script
# ARGS:
# $1 (optional) - Exit code (defaults to 0)
# OUTS:
# None
if [[ -d ${SCRIPT_LOCK:-} ]]; then
if command rm -rf "${SCRIPT_LOCK}"; then
debug "Removing script lock"
else
warning "Script lock could not be removed. Try manually deleting ${tan}'${LOCK_DIR}'"
fi
fi
if [[ -n ${TMP_DIR:-} && -d ${TMP_DIR:-} ]]; then
if [[ ${1:-} == 1 && -n "$(ls "${TMP_DIR}")" ]]; then
command rm -r "${TMP_DIR}"
else
command rm -r "${TMP_DIR}"
debug "Removing temp directory"
fi
fi
trap - INT TERM EXIT
exit ${1:-0}
}
_trapCleanup_() {
# DESC:
# Log errors and cleanup from script when an error is trapped. Called by 'trap'
# ARGS:
# $1: Line number where error was trapped
# $2: Line number in function
# $3: Command executing at the time of the trap
# $4: Names of all shell functions currently in the execution call stack
# $5: Scriptname
# $6: $BASH_SOURCE
# USAGE:
# trap '_trapCleanup_ ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${FUNCNAME[*]}" "${0}" "${BASH_SOURCE[0]}"' EXIT INT TERM SIGINT SIGQUIT SIGTERM
# OUTS:
# Exits script with error code 1
local _line=${1:-} # LINENO
local _linecallfunc=${2:-}
local _command="${3:-}"
local _funcstack="${4:-}"
local _script="${5:-}"
local _sourced="${6:-}"
if [[ "$(declare -f "fatal")" && "$(declare -f "_printFuncStack_")" ]]; then
_funcstack="'$(echo "${_funcstack}" | sed -E 's/ / < /g')'"
if [[ ${_script##*/} == "${_sourced##*/}" ]]; then
fatal "${7:-} command: '${_command}' (line: ${_line}) [func: $(_printFuncStack_)]"
else
fatal "${7:-} command: '${_command}' (func: ${_funcstack} called at line ${_linecallfunc} of '${_script##*/}') (line: ${_line} of '${_sourced##*/}') "
fi
else
printf "%s\n" "Fatal error trapped. Exiting..."
fi
if [ "$(declare -f "_safeExit_")" ]; then
_safeExit_ 1
else
exit 1
fi
}
_makeTempDir_() {
# DESC:
# Creates a temp directory to house temporary files
# ARGS:
# $1 (Optional) - First characters/word of directory name
# OUTS:
# Sets $TMP_DIR variable to the path of the temp directory
# USAGE:
# _makeTempDir_ "$(basename "$0")"
[ -d "${TMP_DIR:-}" ] && return 0
if [ -n "${1:-}" ]; then
TMP_DIR="${TMPDIR:-/tmp/}${1}.${RANDOM}.${RANDOM}.$$"
else
TMP_DIR="${TMPDIR:-/tmp/}$(basename "$0").${RANDOM}.${RANDOM}.${RANDOM}.$$"
fi
(umask 077 && mkdir "${TMP_DIR}") || {
fatal "Could not create temporary directory! Exiting."
}
debug "\$TMP_DIR=${TMP_DIR}"
}
_acquireScriptLock_() {
# DESC:
# Acquire script lock to prevent running the same script a second time before the
# first instance exits
# ARGS:
# $1 (optional) - Scope of script execution lock (system or user)
# OUTS:
# exports $SCRIPT_LOCK - Path to the directory indicating we have the script lock
# Exits script if lock cannot be acquired
# NOTE:
# If the lock was acquired it's automatically released in _safeExit_()
local _lockDir
if [[ ${1:-} == 'system' ]]; then
_lockDir="${TMPDIR:-/tmp/}$(basename "$0").lock"
else
_lockDir="${TMPDIR:-/tmp/}$(basename "$0").$UID.lock"
fi
if command mkdir "${LOCK_DIR}" 2>/dev/null; then
readonly SCRIPT_LOCK="${_lockDir}"
debug "Acquired script lock: ${yellow}${SCRIPT_LOCK}${purple}"
else
if [ "$(declare -f "_safeExit_")" ]; then
error "Unable to acquire script lock: ${tan}${LOCK_DIR}${red}"
fatal "If you trust the script isn't running, delete the lock dir"
else
printf "%s\n" "ERROR: Could not acquire script lock. If you trust the script isn't running, delete: ${LOCK_DIR}"
exit 1
fi
fi
}
_setPATH_() {
# DESC:
# Add directories to $PATH so script can find executables
# ARGS:
# $@ - One or more paths
# OUTS: Adds items to $PATH
# USAGE:
# _setPATH_ "/usr/local/bin" "${HOME}/bin" "$(npm bin)"
[[ $# == 0 ]] && fatal "Missing required argument to ${FUNCNAME[0]}"
local _newPath
for _newPath in "$@"; do
if [ -d "${_newPath}" ]; then
if ! echo "${PATH}" | grep -Eq "(^|:)${_newPath}($|:)"; then
if PATH="${_newPath}:${PATH}"; then
debug "Added '${_newPath}' to PATH"
else
return 1
fi
else
debug "_setPATH_: '${_newPath}' already exists in PATH"
fi
else
debug "_setPATH_: can not find: ${_newPath}"
return 0
fi
done
return 0
}
_useGNUutils_() {
# DESC:
# Add GNU utilities to PATH to allow consistent use of sed/grep/tar/etc. on MacOS
# ARGS:
# None
# OUTS:
# 0 if successful
# 1 if unsuccessful
# PATH: Adds GNU utilities to the path
# USAGE:
# # if ! _useGNUUtils_; then exit 1; fi
# NOTES:
# GNU utilities can be added to MacOS using Homebrew
! declare -f "_setPATH_" &>/dev/null && fatal "${FUNCNAME[0]} needs function _setPATH_"
if _setPATH_ \
"/usr/local/opt/gnu-tar/libexec/gnubin" \
"/usr/local/opt/coreutils/libexec/gnubin" \
"/usr/local/opt/gnu-sed/libexec/gnubin" \
"/usr/local/opt/grep/libexec/gnubin" \
"/usr/local/opt/findutils/libexec/gnubin" \
"/opt/homebrew/opt/findutils/libexec/gnubin" \
"/opt/homebrew/opt/gnu-sed/libexec/gnubin" \
"/opt/homebrew/opt/grep/libexec/gnubin" \
"/opt/homebrew/opt/coreutils/libexec/gnubin" \
"/opt/homebrew/opt/gnu-tar/libexec/gnubin"; then
return 0
else
return 1
fi
}
_homebrewPath_() {
# DESC:
# Add homebrew bin dir to PATH
# ARGS:
# None
# OUTS:
# 0 if successful
# 1 if unsuccessful
# PATH: Adds homebrew bin directory to PATH
# USAGE:
# # if ! _homebrewPath_; then exit 1; fi
! declare -f "_setPATH_" &>/dev/null && fatal "${FUNCNAME[0]} needs function _setPATH_"
if _uname=$(command -v uname); then
if "${_uname}" | tr '[:upper:]' '[:lower:]' | grep -q 'darwin'; then
if _setPATH_ "/usr/local/bin" "/opt/homebrew/bin"; then
return 0
else
return 1
fi
fi
else
if _setPATH_ "/usr/local/bin" "/opt/homebrew/bin"; then
return 0
else
return 1
fi
fi
}
{% raw %}
_parseOptions_() {
# DESC:
# Iterates through options passed to script and sets variables. Will break -ab into -a -b
# when needed and --foo=bar into --foo bar
# ARGS:
# $@ from command line
# OUTS:
# Sets array 'ARGS' containing all arguments passed to script that were not parsed as options
# USAGE:
# _parseOptions_ "$@"
# Iterate over options
local _optstring=h
declare -a _options
local _c
local i
while (($#)); do
case $1 in
# If option is of type -ab
-[!-]?*)
# Loop over each character starting with the second
for ((i = 1; i < ${#1}; i++)); do
_c=${1:i:1}
_options+=("-${_c}") # Add current char to options
# If option takes a required argument, and it's not the last char make
# the rest of the string its argument
if [[ ${_optstring} == *"${_c}:"* && ${1:i+1} ]]; then
_options+=("${1:i+1}")
break
fi
done
;;
# If option is of type --foo=bar
--?*=*) _options+=("${1%%=*}" "${1#*=}") ;;
# add --endopts for --
--) _options+=(--endopts) ;;
# Otherwise, nothing special
*) _options+=("$1") ;;
esac
shift
done
set -- "${_options[@]:-}"
unset _options
# Read the options and set stuff
while [[ ${1:-} == -?* ]]; do
case $1 in
# Custom options
--job)
shift
JOB="$1"
;;
# Common options
-h | --help)
_usage_
_safeExit_
;;
--loglevel)
shift
LOGLEVEL=${1}
;;
--logfile)
shift
LOGFILE="${1}"
;;
-n | --dryrun) DRYRUN=true ;;
-v | --verbose) VERBOSE=true ;;
-q | --quiet) QUIET=true ;;
--force) FORCE=true ;;
--endopts)
shift
break
;;
*)
if [ "$(declare -f "_safeExit_")" ]; then
fatal "invalid option: $1"
else
printf "%s\n" "Invalid option: $1"
exit 1
fi
;;
esac
shift
done
if [[ -z ${*} || ${*} == null ]]; then
ARGS=()
else
ARGS+=("$@") # Store the remaining user input as arguments.
fi
}
{% endraw %}
_usage_() {
cat <<USAGE_TEXT
Restores the most recent backup of a service to a target directory on the host. It is assumed
that the destination directory is already mounted on the host and available at: {{ interpolated_nfs_service_storage }}/[service name]
${bold}Options:${reset}
--job [service name] Name of the service to restore
-h, --help Display this help and exit
--loglevel [LEVEL] One of: FATAL, ERROR, WARN, INFO, NOTICE, DEBUG, ALL, OFF
(Default is 'ERROR')
--logfile [FILE] Full PATH to logfile. (Default is '${HOME}/logs/$(basename "$0").log')
-n, --dryrun Non-destructive. Makes no permanent changes.
-q, --quiet Quiet (no output)
-v, --verbose Output more information. (Items echoed to 'verbose')
--force Skip all user interaction. Implied 'Yes' to all actions.
${bold}Example Usage:${reset}
${gray}# Run the script and specify log level and log file.${reset}
$(basename "$0") -vn --logfile "/path/to/file.log" --loglevel 'WARN'
USAGE_TEXT
}
# ################################## INITIALIZE AND RUN THE SCRIPT
# (Comment or uncomment the lines below to customize script behavior)
trap '_trapCleanup_ ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${FUNCNAME[*]}" "${0}" "${BASH_SOURCE[0]}"' EXIT INT TERM SIGINT SIGQUIT SIGTERM
# Trap errors in subshells and functions
set -o errtrace
# Exit on error. Append '||true' if you expect an error
set -o errexit
# Use last non-zero exit code in a pipeline
set -o pipefail
# Confirm we have BASH greater than v4
[ "${BASH_VERSINFO:-0}" -ge 4 ] || {
printf "%s\n" "ERROR: BASH_VERSINFO is '${BASH_VERSINFO:-0}'. This script requires BASH v4 or greater."
exit 1
}
# Make `for f in *.txt` work when `*.txt` matches zero files
shopt -s nullglob globstar
# Set IFS to preferred implementation
IFS=$' \n\t'
# Run in debug mode
# set -o xtrace
# Initialize color constants
_setColors_
# Disallow expansion of unset variables
set -o nounset
# Force arguments when invoking the script
# [[ $# -eq 0 ]] && _parseOptions_ "-h"
# Parse arguments passed to script
_parseOptions_ "$@"
# Create a temp directory '$TMP_DIR'
# _makeTempDir_ "$(basename "$0")"
# Acquire script lock
# _acquireScriptLock_
# Add Homebrew bin directory to PATH (MacOS)
_homebrewPath_
# Source GNU utilities from Homebrew (MacOS)
_useGNUutils_
# Run the main logic script
_mainScript_
# Exit cleanly
_safeExit_

View File

@@ -0,0 +1,96 @@
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Global tags can be specified here in key="value" format.
[global_tags]
dc = "{{ datacenter_name }}"
ip = "{{ ansible_host }}"
# Configuration for telegraf agent
[agent]
interval = "10s" ## Default data collection interval for all inputs
round_interval = true ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
metric_batch_size = 1000 ## Controls the size of writes that Telegraf sends to output plugins
metric_buffer_limit = 10000 ## Maximum number of unwritten metrics per output.
collection_jitter = "5s" ## Jitter the collection by a random amount.
flush_interval = "10s" ## Default flushing interval for all outputs.
flush_jitter = "5s" ## Jitter the flush interval by a random amount
precision = ""
debug = false ## Log at debug level.
# quiet = false ## Log only error level messages.
{% if 'pis' in group_names %}
logtarget = "file" ## destination logs can be one of "file" or "stderr"
logfile = "/var/log/telegraf/telegraf.log"
logfile_rotation_interval = "1d"
# logfile_rotation_max_size = "0MB"
logfile_rotation_max_archives = 2
{% elif 'macs' in group_names %}
logtarget = "stderr" ## destination logs can be one of "file" or "stderr"
{% endif %}
hostname = "{{ inventory_hostname }}" ## Override default hostname, if empty use os.Hostname()
omit_hostname = false ## If set to true, do no set the "host" tag in the telegraf agent.
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
[[outputs.influxdb]]
urls = ["http://influxdb.service.consul:{{ influxdb_port }}"]
database = "homelab"
retention_policy = "2day"
timeout = "5s"
###############################################################################
# INPUT PLUGINS #
###############################################################################
[[inputs.cpu]] # Read metrics about cpu usage
percpu = true ## Whether to report per-cpu stats or not
totalcpu = true ## Whether to report total system cpu stats or not
collect_cpu_time = false ## If true, collect raw CPU time metrics.
report_active = false ## If true, compute and report the sum of all non-idle CPU states.
[[inputs.disk]] # Read metrics about disk usage by mount point
#mount_points = ["/mnt/usbDrive","/boot"] # Restrict the stats to only the specified mount points.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs", "nfsd", "nfs4", "smbfs"]
[[inputs.diskio]] # Read metrics about disk IO by device
[[inputs.internal]] # Collect telegraf memory stats.
collect_memstats = true
[[inputs.mem]] # Read metrics about memory usage
[[inputs.processes]] # Get the number of processes and group them by status
[[inputs.swap]] # Read metrics about swap memory usage
[[inputs.system]] # Read metrics about system load & uptime
[[inputs.net]] # Gather metrics about network interfaces
#[[inputs.netstat]] # Collect TCP connections state and UDP socket counts
{% if 'macs' not in group_names %}
[[inputs.nstat]] # Collects network metrics
{% endif %}
{% if 'pis' in group_names%}
[[inputs.ntpq]]
dns_lookup = false ## If false, add -n for ntpq command. Can reduce metric gather times.
{% endif %}
{% if 'opnsense' in group_names %}
[[inputs.ntpq]]
dns_lookup = false ## If false, add -n for ntpq command. Can reduce metric gather times.
[[inputs.wireguard]]
devices = ["wg0","wg1"]
[[inputs.pf]]
{% endif %}

View File

@@ -0,0 +1,156 @@
##############################################################################
# PROCESSOR PLUGINS #
##############################################################################
[[processors.regex]]
namepass = ["docker_container_mem"]
# Tag and field conversions defined in a separate sub-tables
[[processors.regex.tags]]
## Tag to change
key = "container_name"
## Regular expression to match on a tag value
pattern = "^([a-zA-Z0-9_]+)-\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}"
[[processors.regex]]
namepass = ["docker_container_net"]
# Tag and field conversions defined in a separate sub-tables
[[processors.regex.tags]]
## Tag to change
key = "container_name"
## Regular expression to match on a tag value
pattern = "^([a-zA-Z0-9_]+)-\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}"
[[processors.regex]]
namepass = ["docker_container_cpu"]
# Tag and field conversions defined in a separate sub-tables
[[processors.regex.tags]]
## Tag to change
key = "container_name"
## Regular expression to match on a tag value
pattern = "^([a-zA-Z0-9_]+)-\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}"
[[processors.regex]]
namepass = ["docker_container_blkio"]
# Tag and field conversions defined in a separate sub-tables
[[processors.regex.tags]]
## Tag to change
key = "container_name"
## Regular expression to match on a tag value
pattern = "^([a-zA-Z0-9_]+)-\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}"
[[processors.regex]]
namepass = ["docker_container_health"]
# Tag and field conversions defined in a separate sub-tables
[[processors.regex.tags]]
## Tag to change
key = "container_name"
## Regular expression to match on a tag value
pattern = "^([a-zA-Z0-9_]+)-\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}"
[[processors.regex]]
namepass = ["docker_container_status"]
# Tag and field conversions defined in a separate sub-tables
[[processors.regex.tags]]
## Tag to change
key = "container_name"
## Regular expression to match on a tag value
pattern = "^([a-zA-Z0-9_]+)-\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}"
###############################################################################
# INPUT PLUGINS #
###############################################################################
[[inputs.docker]]
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
## Note: configure this in one of the manager nodes in a Swarm cluster.
## configuring in multiple Swarm managers results in duplication of metrics.
gather_services = false
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
container_names = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false
## Containers to include and exclude. Collect all if empty. Globs accepted.
container_name_include = []
container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...),
## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
## is honored.
perdevice = true
## Specifies for which classes a per-device metric should be issued
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
## Please note that this setting has no effect if 'perdevice' is set to 'true'
# perdevice_include = ["cpu"]
## Whether to report for each container total blkio and network stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
## is honored.
total = false
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
## Possible values are 'cpu', 'blkio' and 'network'
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
## Please note that this setting has no effect if 'total' is set to 'false'
# total_include = ["cpu", "blkio", "network"]
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = ["traefik.*"] # Do not report on Traefik tags
## Which environment variables should we use as a tag
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View File

@@ -0,0 +1,22 @@
[[processors.regex]]
namepass = ["consul_health_checks"]
# Tag and field conversions defined in a separate sub-tables
[[processors.regex.tags]]
## Tag to change
key = "check_name"
## Regular expression to match on a tag value
pattern = "^service: \\W(\\w+)\\W check$"
## Matches of the pattern will be replaced with this string. Use ${1}
## notation to use the text of the first submatch.
replacement = "${1}"
[[inputs.consul]]
address = "consul.service.consul:8500"
scheme = "http"
insecure_skip_verify = true
metric_version = 2
namedrop = ["traefik.http*","traefik.enable*","traefik.tcp*"]
tagexclude = ["traefik.http*","traefik.enable*", "traefik.tcp*"]
[inputs.consul.tagdrop]
check_name = [ "Nomad Client*", "Nomad Server*", "Serf Health Status" ]

View File

@@ -0,0 +1,15 @@
[[inputs.statsd]]
protocol = "udp" # Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp)
service_address = "127.0.0.1:8125" # Address and port to host UDP listener on
delete_gauges = true # Reset gauges every interval (default=true)
delete_counters = true # Reset counters every interval (default=true)
delete_sets = true # Reset sets every interval (default=true)
delete_timings = true # Reset timings & histograms every interval (default=true)
percentiles = [90.0] # Percentiles to calculate for timing & histogram stats
metric_separator = "_"
datadog_extensions = true # Parses tags in the datadog statsd format
allowed_pending_messages = 10000
percentile_limit = 1000
[inputs.statsd.tagdrop]
task = [ "await-*","run-*","await_*","run_*","create_*","create-*" ]
task_group = [ "await-*","run-*","await_*","run_*","create_*","create-*" ]

View File

@@ -0,0 +1,88 @@
{# Ping internal servers #}
[[processors.enum]]
[[processors.enum.mapping]]
## Name of the field to map
#field = "url"
## Name of the tag to map
tag = "url"
## Destination tag or field to be used for the mapped value. By default the
## source tag or field is used, overwriting the original value.
dest = "host"
## Default value to be used for all values not contained in the mapping
## table. When unset and no match is found, the original field will remain
## unmodified and the destination tag or field will not be created.
# default = 0
## Table of mappings
[processors.enum.mapping.value_mappings]
"10.0.30.6" = "synology"
{% for i in groups['pis'] %}
"{{ hostvars[i].ansible_host }}" = "{{ hostvars[i].inventory_hostname }}"
{% endfor %}
[[inputs.ping]]
## Hosts to send ping packets to.
# https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/ping/README.md
urls = [{% for i in groups['pis'] %}'{{ hostvars[i].ansible_host }}'{% if not loop.last %}, {% endif %}{% endfor %},
'10.0.30.6',
'core1.bos1.he.net',
'core1.lax1.he.net',
'core1.nyc4.he.net',
'core1.oma1.he.net',
'core1.chi1.he.net',
'core1.dal1.he.net',
'core1.den1.he.net',
'core1.mia1.he.net',
'core1.bna1.he.net',
'core1.phx1.he.net',
'core1.sea1.he.net',
'core1.blp1.he.net',
'core1.ams1.he.net',
'core1.dxb1.he.net',
'core1.jnb1.he.net',
'core1.man1.he.net',
'core1.rom1.he.net',
'core1.tyo1.he.net',
'core1.zrh3.he.net',
'core2.sao1.he.net',
'core1.sin1.he.net',
'core1.kpb1.he.net',
'core1.nbo1.he.net',
'core1.tpe1.he.net',
'core1.sto1.he.net',
'core1.ymq1.he.net',
'core2.syd1.he.net'
]
## Method used for sending pings, can be either "exec" or "native". When set
## to "exec" the systems ping command will be executed. When set to "native"
## the plugin will send pings directly.
##
## While the default is "exec" for backwards compatibility, new deployments
## are encouraged to use the "native" method for improved compatibility and
## performance.
method = "exec"
## Number of ping packets to send per interval. Corresponds to the "-c"
## option of the ping command.
count = 1
## Time to wait between sending ping packets in seconds. Operates like the
## "-i" option of the ping command.
ping_interval = 1.0
fielddrop = ["packets_received", "packets_transmitted", "ttl", "standard_deviation_ms"]
interval = "1m" ## Interval to send pings
## Specify the ping executable binary.
{% if 'pis' in group_names %}
binary = "/usr/bin/ping"
{% elif 'macs' in group_names %}
binary = "/sbin/ping"
{% else %}
binary = "/bin/ping"
{% endif %}

Some files were not shown because too many files have changed in this diff Show More