mirror of
https://github.com/natelandau/ansible-homelab-config.git
synced 2025-11-18 09:53:41 -05:00
Initial commit
This commit is contained in:
21
templates/nomad_jobs/backup_fs.hcl
Normal file
21
templates/nomad_jobs/backup_fs.hcl
Normal file
@@ -0,0 +1,21 @@
|
||||
job "backup_local_filesystems" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "sysbatch"
|
||||
|
||||
periodic {
|
||||
cron = "0 */8 * * * *"
|
||||
prohibit_overlap = true
|
||||
time_zone = "America/New_York"
|
||||
}
|
||||
|
||||
task "do_backups" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
|
||||
}
|
||||
} // /task do_backups
|
||||
|
||||
} //job
|
||||
88
templates/nomad_jobs/changedetection.hcl
Normal file
88
templates/nomad_jobs/changedetection.hcl
Normal file
@@ -0,0 +1,88 @@
|
||||
job "changedetection" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "changedetection" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "webUI" {
|
||||
to = "5000"
|
||||
}
|
||||
}
|
||||
|
||||
task "changedetection" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
BASE_URL = "https://changes.{{ homelab_domain_name }}"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "dgtlmoon/changedetection.io:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/changedetection:/datastore"
|
||||
]
|
||||
ports = ["webUI"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "webUI"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`changes.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 100 # MHz
|
||||
memory = 150 # MB
|
||||
} // resources
|
||||
|
||||
} // task changedetection
|
||||
} // group
|
||||
} // job
|
||||
109
templates/nomad_jobs/chronograf.hcl
Normal file
109
templates/nomad_jobs/chronograf.hcl
Normal file
@@ -0,0 +1,109 @@
|
||||
job "chronograf" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "chronograf" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "chronografPort" {
|
||||
to = "8888"
|
||||
}
|
||||
}
|
||||
|
||||
task "await-influxdb" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:latest"
|
||||
command = "sh"
|
||||
args = [
|
||||
"-c",
|
||||
"echo -n 'Waiting for influxdb.service.consul to come alive'; until nslookup influxdb.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"
|
||||
]
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 128
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task
|
||||
|
||||
task "chronograf" {
|
||||
|
||||
// env {
|
||||
// KEY = "VALUE"
|
||||
// }
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "chronograf:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["chronografPort"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "chronografPort"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "chronografPort"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 40 # MHz
|
||||
// memory = 10 # MB
|
||||
// } // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
100
templates/nomad_jobs/code.hcl
Normal file
100
templates/nomad_jobs/code.hcl
Normal file
@@ -0,0 +1,100 @@
|
||||
job "code" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "code" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
// static = "80"
|
||||
to = "3000"
|
||||
}
|
||||
}
|
||||
|
||||
task "code" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
SUDO_PASSWORD = "{{ simple_web_password }}"
|
||||
PROXY_DOMAIN = "code.{{ homelab_domain_name }}"
|
||||
CONNECTION_TOKEN = "1234"
|
||||
DOCKER_MODS = "linuxserver/mods:code-server-python3|linuxserver/mods:code-server-shellcheck|linuxserver/mods:universal-git|linuxserver/mods:code-server-zsh"
|
||||
// CONNECTION_TOKEN = supersecrettoken
|
||||
// CONNECTION_SECRET = supersecrettoken
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "lscr.io/linuxserver/openvscode-server"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_JOB_NAME}:/config"
|
||||
]
|
||||
ports = ["port1"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,redirectScheme@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1500 # MHz
|
||||
memory = 300 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
64
templates/nomad_jobs/diagnostics.hcl
Normal file
64
templates/nomad_jobs/diagnostics.hcl
Normal file
@@ -0,0 +1,64 @@
|
||||
job "diagnostics" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
group "diagnostics" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
task "diagnostics" {
|
||||
|
||||
// env {
|
||||
// KEY = "VALUE"
|
||||
// }
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "alpine:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
args = [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"chmod 755 /local/bootstrap.sh && /local/bootstrap.sh"
|
||||
]
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/backups/config_backups:/backups",
|
||||
"${meta.localStorageRoot}:/docker"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
template {
|
||||
destination = "local/bootstrap.sh"
|
||||
data = <<EOH
|
||||
#!/bin/sh
|
||||
|
||||
apk update
|
||||
apk add --no-cache bash
|
||||
apk add --no-cache bind-tools
|
||||
apk add --no-cache curl
|
||||
apk add --no-cache git
|
||||
apk add --no-cache jq
|
||||
apk add --no-cache openssl
|
||||
apk add --no-cache iperf3
|
||||
apk add --no-cache nano
|
||||
apk add --no-cache wget
|
||||
|
||||
tail -f /dev/null # Keep container running
|
||||
EOH
|
||||
}
|
||||
|
||||
} // tasks
|
||||
} // group
|
||||
} // job
|
||||
41
templates/nomad_jobs/diun.hcl
Normal file
41
templates/nomad_jobs/diun.hcl
Normal file
@@ -0,0 +1,41 @@
|
||||
job "diun" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "system"
|
||||
|
||||
group "diun" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
task "diun" {
|
||||
|
||||
env {
|
||||
// DIUN_PROVIDERS_DOCKER_ENDPOINT = "unix:///var/run/docker.sock"
|
||||
DIUN_NOTIF_PUSHOVER_RECIPIENT = "{{ pushover_recipient }}"
|
||||
DIUN_NOTIF_PUSHOVER_TOKEN = "{{ pushover_token }}"
|
||||
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT = "true"
|
||||
DIUN_WATCH_FIRSTCHECKNOTIF = "false"
|
||||
DIUN_WATCH_SCHEDULE = "26 */48 * * *"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "crazymax/diun:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [
|
||||
"/var/run/docker.sock:/var/run/docker.sock"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task diun
|
||||
} // group
|
||||
} // job
|
||||
120
templates/nomad_jobs/grafana.hcl
Normal file
120
templates/nomad_jobs/grafana.hcl
Normal file
@@ -0,0 +1,120 @@
|
||||
job "grafana" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "grafana" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "http" {}
|
||||
}
|
||||
|
||||
|
||||
task "grafana" {
|
||||
|
||||
env {
|
||||
GF_PATHS_CONFIG = "/local/grafana.ini"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "grafana/grafana:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["http"]
|
||||
volumes = ["${meta.nfsStorageRoot}/pi-cluster/grafana:/var/lib/grafana"]
|
||||
} // docker config
|
||||
|
||||
template {
|
||||
destination = "local/grafana.ini"
|
||||
data = <<EOH
|
||||
[server]
|
||||
domain = grafana.{{ homelab_domain_name }}
|
||||
{% raw %}http_port = {{ env "NOMAD_PORT_http" }}{% endraw +%}
|
||||
[analytics]
|
||||
reporting_enabled = false
|
||||
[security]
|
||||
admin_user = {{ my_username }}
|
||||
admin_password = {{ grafana_admin_password }}
|
||||
cookie_secure = true
|
||||
[users]
|
||||
allow_sign_up = false
|
||||
allow_org_create = false
|
||||
[smtp]
|
||||
enabled = true
|
||||
host = {{ email_smtp_host }}:{{ email_smtp_port}}
|
||||
user = {{ email_smtp_account }}
|
||||
password = {{ grafana_smtp_password }}
|
||||
skip_verify = true
|
||||
from_address = {{ my_email_address }}
|
||||
from_name = Grafana
|
||||
[log.file]
|
||||
level = info
|
||||
[date_formats]
|
||||
default_timezone = America/New_York
|
||||
[auth.proxy]
|
||||
enabled = true
|
||||
header_name = Remote-User
|
||||
header_property = username
|
||||
auto_sign_up = false
|
||||
sync_ttl = 60
|
||||
EOH
|
||||
}
|
||||
|
||||
service {
|
||||
port = "http"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.middlewares.${NOMAD_JOB_NAME}_logout_redirect.redirectregex.regex=${NOMAD_JOB_NAME}\\.{{ homelab_domain_name }}/logout$",
|
||||
"traefik.http.middlewares.${NOMAD_JOB_NAME}_logout_redirect.redirectregex.replacement=authelia.{{ homelab_domain_name }}/logout",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,${NOMAD_JOB_NAME}_logout_redirect"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
port = "http"
|
||||
path = "/"
|
||||
interval = "90s"
|
||||
timeout = "15s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 200 # MHz
|
||||
memory = 60 # MB
|
||||
} // resources
|
||||
|
||||
} // task grafana
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
88
templates/nomad_jobs/headless_chrome.hcl
Normal file
88
templates/nomad_jobs/headless_chrome.hcl
Normal file
@@ -0,0 +1,88 @@
|
||||
job "headless-chrome" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "headless-chrome" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
static = "9222"
|
||||
to = "9222"
|
||||
}
|
||||
}
|
||||
|
||||
task "headless-chrome" {
|
||||
|
||||
// env {
|
||||
// PUID = "${meta.PUID}"
|
||||
// PGID = "${meta.PGID}"
|
||||
// }
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "alpeware/chrome-headless-trunk:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["port1"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`chrome.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
113
templates/nomad_jobs/influxdb.hcl
Normal file
113
templates/nomad_jobs/influxdb.hcl
Normal file
@@ -0,0 +1,113 @@
|
||||
job "influxdb" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "influxdbGroup" {
|
||||
count = 1
|
||||
network {
|
||||
port "httpAPI" {
|
||||
static = "{{ influxdb_port }}"
|
||||
to = "8086"
|
||||
}
|
||||
}
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
task "influxdb" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "influxdb:{{ influxdb_version }}"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["httpAPI"]
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/influxdb:/var/lib/influxdb"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "httpAPI"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "httpAPI"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
|
||||
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // /task influxdb
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
} // group
|
||||
} // job
|
||||
126
templates/nomad_jobs/lidarr.hcl
Normal file
126
templates/nomad_jobs/lidarr.hcl
Normal file
@@ -0,0 +1,126 @@
|
||||
job "lidarr" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "10m"
|
||||
progress_deadline = "15m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "lidarrGroup" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "10m"
|
||||
}
|
||||
|
||||
network {
|
||||
port "lidarr" {
|
||||
to = "8686"
|
||||
}
|
||||
}
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
task "lidarr" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
|
||||
//UMASK_SET = 022 #optional
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "linuxserver/lidarr:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["lidarr"]
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/lidarr:/config",
|
||||
"${meta.nfsStorageRoot}/media:/media"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "lidarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "lidarr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "10m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 2000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // /task lidarr main task
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
157
templates/nomad_jobs/loki.hcl
Normal file
157
templates/nomad_jobs/loki.hcl
Normal file
@@ -0,0 +1,157 @@
|
||||
job "loki" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "loki" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "1m"
|
||||
}
|
||||
|
||||
network {
|
||||
port "loki_port" {
|
||||
static = "3100"
|
||||
to = "3100"
|
||||
}
|
||||
}
|
||||
|
||||
task "loki" {
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "grafana/loki:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [
|
||||
"local/loki/local-config.yaml:/etc/loki/local-config.yaml",
|
||||
"${meta.nfsStorageRoot}/pi-cluster/loki:/loki"
|
||||
|
||||
]
|
||||
ports = ["loki_port"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "loki_port"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/metrics"
|
||||
interval = "30s"
|
||||
timeout = "10s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
template {
|
||||
destination = "local/loki/local-config.yaml"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
---
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
|
||||
ingester:
|
||||
wal:
|
||||
enabled: true
|
||||
dir: /tmp/wal
|
||||
lifecycler:
|
||||
address: 127.0.0.1
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
replication_factor: 1
|
||||
final_sleep: 0s
|
||||
chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed
|
||||
max_chunk_age: 1h # All chunks will be flushed when they hit this age. Def: 1h
|
||||
chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
|
||||
chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
|
||||
max_transfer_retries: 0 # Chunk transfers disabled
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: boltdb-shipper
|
||||
object_store: filesystem
|
||||
schema: v11
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
storage_config:
|
||||
boltdb_shipper:
|
||||
active_index_directory: /loki/boltdb-shipper-active
|
||||
cache_location: /loki/boltdb-shipper-cache
|
||||
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
|
||||
shared_store: filesystem
|
||||
filesystem:
|
||||
directory: /loki/chunks
|
||||
|
||||
compactor:
|
||||
working_directory: /loki/boltdb-shipper-compactor
|
||||
shared_store: filesystem
|
||||
|
||||
limits_config:
|
||||
reject_old_samples: true
|
||||
reject_old_samples_max_age: 168h
|
||||
|
||||
chunk_store_config:
|
||||
max_look_back_period: 0s
|
||||
|
||||
table_manager:
|
||||
retention_deletes_enabled: false
|
||||
retention_period: 0s
|
||||
|
||||
ruler:
|
||||
storage:
|
||||
type: local
|
||||
local:
|
||||
directory: /loki/rules
|
||||
rule_path: /loki/rules-temp
|
||||
alertmanager_url: http://localhost:9093
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
enable_api: true
|
||||
EOH
|
||||
} // template
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task loki
|
||||
} // group
|
||||
} // job
|
||||
93
templates/nomad_jobs/nginx.hcl
Normal file
93
templates/nomad_jobs/nginx.hcl
Normal file
@@ -0,0 +1,93 @@
|
||||
job "nginx" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "rpi"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "nginx" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "web" {
|
||||
to = "80"
|
||||
}
|
||||
// port "websecure" {
|
||||
// to = "443"
|
||||
// }
|
||||
}
|
||||
|
||||
task "nginx" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/nginx"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [
|
||||
"/mnt/usbDrive/nginx:/config"
|
||||
]
|
||||
ports = ["web"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "web"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "web"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 100 # MHz
|
||||
memory = 300 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
91
templates/nomad_jobs/nzbhydra.hcl
Normal file
91
templates/nomad_jobs/nzbhydra.hcl
Normal file
@@ -0,0 +1,91 @@
|
||||
job "nzbhydra" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "nzbhydra" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "hydra_port" {
|
||||
to = "5076"
|
||||
}
|
||||
}
|
||||
|
||||
task "nzbhydra" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/nzbhydra2:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["hydra_port"]
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/nzbhydra:/config"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "hydra_port"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`hydra.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "30s"
|
||||
timeout = "10s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 600 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
94
templates/nomad_jobs/overseerr.hcl
Normal file
94
templates/nomad_jobs/overseerr.hcl
Normal file
@@ -0,0 +1,94 @@
|
||||
job "overseerr" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "overseerr" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "overseerr" {
|
||||
to = "5055"
|
||||
}
|
||||
}
|
||||
|
||||
task "overseerr" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/overseerr"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["overseerr"]
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/overseerr:/config"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "overseerr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=overseerr",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "overseerr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1600 # MHz
|
||||
memory = 300 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
155
templates/nomad_jobs/pihole.hcl
Normal file
155
templates/nomad_jobs/pihole.hcl
Normal file
@@ -0,0 +1,155 @@
|
||||
job "pihole" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "rpi(2|3)"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "pihole-group" {
|
||||
|
||||
network {
|
||||
port "web" {
|
||||
static = "80"
|
||||
to = "80"
|
||||
}
|
||||
port "dns" {
|
||||
static = "53"
|
||||
to = "53"
|
||||
}
|
||||
// port "dhcp" {
|
||||
// static = "67"
|
||||
// to = "67"
|
||||
// }
|
||||
}
|
||||
|
||||
task "await_filesytem" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:latest"
|
||||
command = "sh"
|
||||
network_mode = "host"
|
||||
args = [
|
||||
"-c",
|
||||
"echo -n 'Waiting for /mnt/pi-cluster/pihole5 to be mounted'; until [ -f /etc/pihole/gravity.db ]; do echo '.'; sleep 2; done",
|
||||
]
|
||||
volumes = [
|
||||
"/mnt/pi-cluster/pihole5:/etc/pihole/"
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
} // /await-filesystem
|
||||
|
||||
task "pihole" {
|
||||
env {
|
||||
// REV_SERVER_DOMAIN = ""
|
||||
ADMIN_EMAIL = "{{ my_email_address }}"
|
||||
DHCP_ACTIVE = "false"
|
||||
DNS_BOGUS_PRIV = "false"
|
||||
DNS_FQDN_REQUIRED = "false"
|
||||
DNSSEC = "false"
|
||||
FTLCONF_REPLY_ADDR4 = "${attr.unique.network.ip-address}"
|
||||
IPv6 = "false"
|
||||
PIHOLE_DNS_ = "10.0.30.1#53"
|
||||
QUERY_LOGGING = "true"
|
||||
REV_SERVER = "true"
|
||||
REV_SERVER_CIDR = "10.0.0.0/16"
|
||||
REV_SERVER_TARGET = "10.0.30.1"
|
||||
TEMPERATUREUNIT = "f"
|
||||
TZ = "America/New_York"
|
||||
WEBTHEME = "default-light"
|
||||
WEBUIBOXEDLAYOUT = "traditional"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "pihole/pihole:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
dns_servers = [
|
||||
"127.0.0.1",
|
||||
"1.1.1.1"
|
||||
]
|
||||
extra_hosts = [
|
||||
"laptopVPN:10.0.90.2",
|
||||
"FiddleStixPhoneVPN:10.0.90.3"
|
||||
]
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/pihole5:/etc/pihole/",
|
||||
"${meta.nfsStorageRoot}/pi-cluster/pihole5/dnsmasq.d:/etc/dnsmasq.d/"
|
||||
// "${meta.nfsStorageRoot}/pi-cluster/pihole5/logs/pihole.log:/var/log/pihole.log",
|
||||
// "${meta.nfsStorageRoot}/pi-cluster/pihole5/logs/pihole-FTL.log:/var/log/pihole-FTL.log"
|
||||
]
|
||||
ports = ["web", "dns"]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 400 # MHz
|
||||
memory = 80 # MB
|
||||
}
|
||||
|
||||
service {
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
port = "web"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`p.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.middlewares.piholeRedirect.redirectregex.regex=^(https?://p\\.{{ homelab_domain_name }})/?$",
|
||||
"traefik.http.middlewares.piholeRedirect.redirectregex.replacement=$${1}/admin/",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,piholeRedirect"
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
path = "/admin/"
|
||||
port = "web"
|
||||
interval = "30s"
|
||||
timeout = "2s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "10m"
|
||||
ignore_warnings = false
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "piholeDNStcp"
|
||||
port = "dns"
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "dns"
|
||||
interval = "30s"
|
||||
timeout = "2s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "60s"
|
||||
ignore_warnings = false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} // group
|
||||
}
|
||||
88
templates/nomad_jobs/promtail-syslogs.hcl
Normal file
88
templates/nomad_jobs/promtail-syslogs.hcl
Normal file
@@ -0,0 +1,88 @@
|
||||
job "promtail-syslogs" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "system"
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "promtail-syslogs" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
task "promtail-syslogs" {
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "grafana/promtail"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [
|
||||
"/var/log:/var/log"
|
||||
]
|
||||
args = [
|
||||
"-config.file",
|
||||
"/local/promtail-config.yaml",
|
||||
"-print-config-stderr"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
|
||||
template {
|
||||
destination = "local/promtail-config.yaml"
|
||||
env = false
|
||||
data = <<EOH
|
||||
server:
|
||||
http_listen_port: 9080
|
||||
grpc_listen_port: 0
|
||||
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
|
||||
{% raw -%}
|
||||
clients:
|
||||
- url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
|
||||
{% endraw %}
|
||||
|
||||
scrape_configs:
|
||||
- job_name: system
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: syslog
|
||||
{% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
|
||||
__path__: /var/log/syslog
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: authlog
|
||||
{% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
|
||||
__path__: /var/log/auth.log
|
||||
|
||||
EOH
|
||||
} // template
|
||||
|
||||
|
||||
resources {
|
||||
cpu = 30 # MHz
|
||||
memory = 30 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
129
templates/nomad_jobs/prowlarr.hcl
Normal file
129
templates/nomad_jobs/prowlarr.hcl
Normal file
@@ -0,0 +1,129 @@
|
||||
|
||||
job "prowlarr" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi4"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "prowlarrGroup" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "10m"
|
||||
}
|
||||
|
||||
network {
|
||||
port "prowlarr" {
|
||||
to = "9696"
|
||||
}
|
||||
}
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
task "prowlarr" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
|
||||
//UMASK_SET = 022 #optional
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/prowlarr:develop"
|
||||
force_pull = true
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["prowlarr"]
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/prowlarr:/config"
|
||||
]
|
||||
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "prowlarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "prowlarr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // /task prowlarr
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
123
templates/nomad_jobs/radarr.hcl
Normal file
123
templates/nomad_jobs/radarr.hcl
Normal file
@@ -0,0 +1,123 @@
|
||||
job "radarr" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "radarrGroup" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "10m"
|
||||
}
|
||||
|
||||
network {
|
||||
port "radarr" {
|
||||
to = "7878"
|
||||
}
|
||||
}
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
task "radarr" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
|
||||
//UMASK_SET = 022 #optional
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/radarr:develop"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
force_pull = true
|
||||
ports = ["radarr"]
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
|
||||
"${meta.nfsStorageRoot}/media:/media"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "radarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "radarr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 2000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // /task radarr
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
|
||||
} // group
|
||||
} // job
|
||||
468
templates/nomad_jobs/reverse-proxy.hcl
Normal file
468
templates/nomad_jobs/reverse-proxy.hcl
Normal file
@@ -0,0 +1,468 @@
|
||||
job "reverse-proxy" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
value = "rpi1"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "reverse-proxy-group" {
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "authelia-port" {
|
||||
static = { { authelia_port } }
|
||||
to = 9091
|
||||
}
|
||||
port "whoami" {
|
||||
to = 80
|
||||
}
|
||||
port "dashboard" {
|
||||
static = 8080
|
||||
to = 8080
|
||||
}
|
||||
port "web" {
|
||||
static = 80
|
||||
to = 80
|
||||
}
|
||||
port "websecure" {
|
||||
static = 443
|
||||
to = 443
|
||||
}
|
||||
port "externalwebsecure" {
|
||||
static = 4430
|
||||
to = 4430
|
||||
}
|
||||
}
|
||||
|
||||
task "authelia" {
|
||||
|
||||
env {
|
||||
TZ = "America/New_York"
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "authelia/authelia"
|
||||
hostname = "authelia"
|
||||
ports = ["authelia-port"]
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/authelia:/config"
|
||||
]
|
||||
args = [
|
||||
"--config",
|
||||
"/local/authelia/config.yaml"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
template {
|
||||
destination = "local/authelia/config.yaml"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
perms = "644"
|
||||
data = <<-EOH
|
||||
---
|
||||
## The theme to display: light, dark, grey, auto.
|
||||
theme: auto
|
||||
|
||||
jwt_secret: {{ authelia_jwt_secret}}
|
||||
default_redirection_url: https://authelia.{{ homelab_domain_name}}
|
||||
|
||||
server:
|
||||
host: 0.0.0.0
|
||||
port: 9091
|
||||
path: ""
|
||||
read_buffer_size: 4096
|
||||
write_buffer_size: 4096
|
||||
enable_pprof: false
|
||||
enable_expvars: false
|
||||
disable_healthcheck: false
|
||||
|
||||
log:
|
||||
level: info
|
||||
format: text
|
||||
# file_path: "/config/log.txt"
|
||||
keep_stdout: false
|
||||
|
||||
totp:
|
||||
issuer: authelia.com
|
||||
|
||||
authentication_backend:
|
||||
disable_reset_password: false
|
||||
file:
|
||||
path: /config/users.yml
|
||||
password:
|
||||
algorithm: argon2id
|
||||
iterations: 1
|
||||
salt_length: 16
|
||||
parallelism: 8
|
||||
memory: 64
|
||||
|
||||
access_control:
|
||||
default_policy: deny
|
||||
networks:
|
||||
- name: internal
|
||||
networks:
|
||||
- 10.0.0.0/16
|
||||
#- 172.16.0.0/12
|
||||
#- 192.168.0.0/18
|
||||
rules:
|
||||
# Rules applied to everyone
|
||||
- domain: "*.{{ homelab_domain_name }}"
|
||||
policy: two_factor
|
||||
networks:
|
||||
- internal
|
||||
|
||||
session:
|
||||
name: authelia_session
|
||||
domain: {{ homelab_domain_name }}
|
||||
same_site: lax
|
||||
secret: {{ authelia_session_secret }}
|
||||
expiration: 1h
|
||||
inactivity: 15m
|
||||
remember_me_duration: 1w
|
||||
|
||||
regulation:
|
||||
max_retries: 5
|
||||
find_time: 10m
|
||||
ban_time: 15m
|
||||
|
||||
storage:
|
||||
encryption_key: {{ authelia_sqlite_encryption_key}}
|
||||
local:
|
||||
path: /config/db.sqlite3
|
||||
|
||||
notifier:
|
||||
smtp:
|
||||
username: {{ email_smtp_account }}
|
||||
password: {{ authelia_smtp_password }}
|
||||
host: {{ email_smtp_host }}
|
||||
port: {{ email_smtp_port }}
|
||||
sender: "Authelia <{{ my_email_address }}>"
|
||||
subject: "[Authelia] {title}"
|
||||
startup_check_address: {{ my_email_address }}
|
||||
|
||||
EOH
|
||||
}
|
||||
|
||||
service {
|
||||
port = "authelia-port"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`authelia.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Cache-Control=no-store",
|
||||
"traefik.http.middlewares.authelia-headers.headers.customResponseHeaders.Pragma=no-cache",
|
||||
"traefik.http.routers.authelia.middlewares=authelia-headers"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "authelia-port"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 200 # MHz
|
||||
memory = 110 # MB
|
||||
}
|
||||
|
||||
} // task authelia
|
||||
|
||||
task "whoami" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "containous/whoami:latest"
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
ports = ["whoami"]
|
||||
|
||||
} // /docker config
|
||||
|
||||
service {
|
||||
port = "whoami"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "90s"
|
||||
timeout = "15s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 2
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
}
|
||||
resources {
|
||||
cpu = 25 # MHz
|
||||
memory = 10 # MB
|
||||
}
|
||||
|
||||
} // /task whoami
|
||||
|
||||
task "traefik" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
CF_API_EMAIL = "{{ my_email_address }}"
|
||||
CF_DNS_API_TOKEN = "{{ traefik_cf_api_token }}"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "traefik:{{ traefik_version }}"
|
||||
hostname = "traefik"
|
||||
ports = ["dashboard", "web", "websecure", "externalwebsecure"]
|
||||
volumes = ["${meta.nfsStorageRoot}/pi-cluster/traefik/acme:/acme"]
|
||||
args = [
|
||||
"--global.sendAnonymousUsage=false",
|
||||
"--global.checkNewVersion=false",
|
||||
"--entryPoints.web.address=:80",
|
||||
"--entryPoints.websecure.address=:443",
|
||||
"--entryPoints.externalwebsecure.address=:4430",
|
||||
"--entrypoints.web.http.redirections.entryPoint.to=websecure",
|
||||
"--entrypoints.web.http.redirections.entryPoint.scheme=https",
|
||||
"--entrypoints.web.http.redirections.entryPoint.permanent=true",
|
||||
"--providers.file.filename=/local/traefik/siteconfigs.toml",
|
||||
"--providers.file.watch=true",
|
||||
"--providers.consulcatalog=true",
|
||||
"--providers.consulcatalog.endpoint.address=http://consul.service.consul:8500",
|
||||
"--providers.consulcatalog.prefix=traefik",
|
||||
"--providers.consulcatalog.exposedbydefault=false",
|
||||
"--metrics=true",
|
||||
"--metrics.influxdb=true",
|
||||
"--metrics.influxdb.address=influxdb.service.consul:{{ influxdb_port }}",
|
||||
"--metrics.influxdb.protocol=http",
|
||||
"--metrics.influxdb.pushinterval=10s",
|
||||
"--metrics.influxdb.database=homelab",
|
||||
"--metrics.influxdb.retentionpolicy=2day",
|
||||
"--metrics.influxdb.addentrypointslabels=true",
|
||||
"--metrics.influxdb.addserviceslabels=true",
|
||||
"--accesslog=true",
|
||||
"--log=true",
|
||||
"--log.level=ERROR",
|
||||
"--api=true",
|
||||
"--api.dashboard=true",
|
||||
"--api.insecure=true",
|
||||
"--certificatesresolvers.cloudflare.acme.email={{ my_email_address }}",
|
||||
"--certificatesresolvers.cloudflare.acme.storage=/acme/acme-${node.unique.name}.json",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge=true",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.provider=cloudflare",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.delaybeforecheck=10",
|
||||
"--certificatesresolvers.cloudflare.acme.dnschallenge.resolvers=1.1.1.1:53,8.8.8.8:53"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
template {
|
||||
destination = "local/traefik/httpasswd"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
{{ my_username }}:{{ traefik_http_pass_me }}
|
||||
family:{{ traefik_http_pass_family }}
|
||||
EOH
|
||||
}
|
||||
|
||||
template {
|
||||
destination = "local/traefik/httpasswdFamily"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
{{ my_username }}:{{ traefik_http_pass_me }}
|
||||
family:{{ traefik_http_pass_family }}
|
||||
EOH
|
||||
}
|
||||
|
||||
template {
|
||||
destination = "local/traefik/siteconfigs.toml"
|
||||
env = false
|
||||
change_mode = "noop"
|
||||
data = <<-EOH
|
||||
[http]
|
||||
[http.middlewares]
|
||||
[http.middlewares.compress.compress]
|
||||
|
||||
[http.middlewares.localIPOnly.ipWhiteList]
|
||||
sourceRange = ["10.0.0.0/16"]
|
||||
|
||||
[http.middlewares.redirectScheme.redirectScheme]
|
||||
scheme = "https"
|
||||
permanent = true
|
||||
|
||||
[http.middlewares.authelia.forwardAuth]
|
||||
address = "http://authelia.service.consul:{{ authelia_port }}/api/verify?rd=https://authelia.{{ homelab_domain_name }}"
|
||||
trustForwardHeader = true
|
||||
authResponseHeaders = ["Remote-User", "Remote-Groups", "Remote-Name", "Remote-Email"]
|
||||
|
||||
[http.middlewares.basicauth.basicauth]
|
||||
usersfile = "/local/traefik/httpasswd"
|
||||
removeHeader = true
|
||||
|
||||
[http.middlewares.basicauth-family.basicauth]
|
||||
usersfile = "/local/traefik/httpasswdFamily"
|
||||
removeHeader = true
|
||||
|
||||
[http.middlewares.allowFrame.headers]
|
||||
customFrameOptionsValue = "allow-from https://home.{{ homelab_domain_name }}"
|
||||
|
||||
[http.routers]
|
||||
|
||||
[http.routers.consul]
|
||||
rule = "Host(`consul.{{ homelab_domain_name }}`)"
|
||||
service = "consul"
|
||||
entrypoints = ["web","websecure"]
|
||||
[http.routers.consul.tls]
|
||||
certResolver = "cloudflare" # From static configuration
|
||||
|
||||
[http.services]
|
||||
|
||||
[http.services.consul]
|
||||
[http.services.consul.loadBalancer]
|
||||
passHostHeader = true
|
||||
[[http.services.consul.loadBalancer.servers]]
|
||||
url = "http://consul.service.consul:8500"
|
||||
EOH
|
||||
}
|
||||
|
||||
service {
|
||||
port = "dashboard"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file,redirectScheme@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "dashboard"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
//cpu = 40 # MHz
|
||||
memory = 64 # MB
|
||||
} // resources
|
||||
|
||||
} // task traefik
|
||||
|
||||
// task "promtail-traefik" {
|
||||
|
||||
// driver = "docker"
|
||||
// config {
|
||||
// image = "grafana/promtail"
|
||||
// hostname = "promtail-traefik"
|
||||
// volumes = [
|
||||
// "/mnt/pi-cluster/logs:/traefik"
|
||||
// ]
|
||||
// args = [
|
||||
// "-config.file",
|
||||
// "/local/promtail-config.yaml",
|
||||
// "-print-config-stderr",
|
||||
// ]
|
||||
// } // docker config
|
||||
|
||||
// template {
|
||||
// destination = "local/promtail-config.yaml"
|
||||
// env = false
|
||||
// data = <<-EOH
|
||||
// server:
|
||||
// http_listen_port: 9080
|
||||
// grpc_listen_port: 0
|
||||
|
||||
// positions:
|
||||
// filename: /alloc/positions.yaml
|
||||
|
||||
// {% raw -%}
|
||||
// clients:
|
||||
// - url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
|
||||
// {% endraw %}
|
||||
|
||||
// scrape_configs:
|
||||
// - job_name: traefik
|
||||
// static_configs:
|
||||
// - targets:
|
||||
// - localhost
|
||||
// labels:
|
||||
// job: traefik_access
|
||||
// {% raw %}host: {{ env "node.unique.name" }}{% endraw +%}
|
||||
// __path__: "/alloc/logs/traefik.std*.0"
|
||||
// pipeline_stages:
|
||||
// - regex:
|
||||
// expression: '^(?P<remote_addr>[\w\.]+) - (?P<remote_user>[^ ]*) \[(?P<time_local>.*)\] "(?P<method>[^ ]*) (?P<request>[^ ]*) (?P<protocol>[^ ]*)" (?P<status>[\d]+) (?P<body_bytes_sent>[\d]+) "(?P<http_referer>[^"]*)" "(?P<http_user_agent>[^"]*)" (?P<request_number>[^ ]+) "(?P<router>[^ ]+)" "(?P<server_URL>[^ ]+)" (?P<response_time_ms>[^ ]+)ms$'
|
||||
// - labels:
|
||||
// method:
|
||||
// status:
|
||||
// router:
|
||||
// response_time_ms:
|
||||
|
||||
// EOH
|
||||
// } // template
|
||||
|
||||
// lifecycle {
|
||||
// hook = "poststart"
|
||||
// sidecar = true
|
||||
// }
|
||||
|
||||
// resources {
|
||||
// cpu = 30 # MHz
|
||||
// memory = 30 # MB
|
||||
// } // resources
|
||||
|
||||
// } // promtail sidecar task
|
||||
|
||||
} // reverse-proxy-group
|
||||
}
|
||||
139
templates/nomad_jobs/sonarr.hcl
Normal file
139
templates/nomad_jobs/sonarr.hcl
Normal file
@@ -0,0 +1,139 @@
|
||||
job "sonarr" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "macmini"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "sonarrGroup" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "10m"
|
||||
}
|
||||
|
||||
network {
|
||||
port "sonarr" {
|
||||
to = "8989"
|
||||
}
|
||||
}
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = [
|
||||
"${meta.restoreCommand1}",
|
||||
"${meta.restoreCommand2}",
|
||||
"${NOMAD_JOB_NAME}",
|
||||
"${meta.restoreCommand3}"
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
task "sonarr" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
//DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
|
||||
//UMASK_SET = 022 #optional
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "linuxserver/sonarr:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["sonarr"]
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
|
||||
"${meta.nfsStorageRoot}/media:/media"
|
||||
]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "sonarr"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=sonarr",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "sonarr"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // /task sonarr
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = [
|
||||
"${meta.backupAllocArg1}",
|
||||
"${meta.backupAllocArg2}",
|
||||
"${meta.backupAllocArg3}",
|
||||
"${meta.backupAllocArg4}",
|
||||
"${meta.backupAllocArg5}",
|
||||
"${NOMAD_JOB_NAME}",
|
||||
"${meta.backupAllocArg6}"
|
||||
]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
103
templates/nomad_jobs/stash.hcl
Normal file
103
templates/nomad_jobs/stash.hcl
Normal file
@@ -0,0 +1,103 @@
|
||||
job "stash" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "macmini"
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "stashGroup" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
to = "9999"
|
||||
}
|
||||
}
|
||||
|
||||
task "stash" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
STASH_STASH = "/data/"
|
||||
STASH_GENERATED = "/generated/"
|
||||
STASH_METADATA = "/metadata/"
|
||||
STASH_CACHE = "/cache/"
|
||||
STASH_PORT = "9999"
|
||||
STASH_EXTERNAL_HOST = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "stashapp/stash:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/nate/.stash/cache:/cache",
|
||||
"${meta.nfsStorageRoot}/nate/.stash/config:/root/.stash",
|
||||
"${meta.nfsStorageRoot}/nate/.stash/generated:/generated",
|
||||
"${meta.nfsStorageRoot}/nate/.stash/media:/data",
|
||||
"${meta.nfsStorageRoot}/nate/.stash/metadata:/metadata",
|
||||
"/etc/timezone:/etc/timezone:ro"
|
||||
]
|
||||
ports = ["port1"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 4500 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
100
templates/nomad_jobs/syncthing.hcl
Normal file
100
templates/nomad_jobs/syncthing.hcl
Normal file
@@ -0,0 +1,100 @@
|
||||
job "syncthing" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "syncthing" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "webGUI" {
|
||||
to = "8384"
|
||||
}
|
||||
port "listen_tcp_udp" {
|
||||
static = "22000"
|
||||
to = "22000"
|
||||
}
|
||||
port "udp_proto_discovery" {
|
||||
static = "21027"
|
||||
to = "21027"
|
||||
}
|
||||
}
|
||||
|
||||
task "syncthing" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/syncthing"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_JOB_NAME}:/config",
|
||||
"${meta.nfsStorageRoot}/${NOMAD_JOB_NAME}:/Sync"
|
||||
]
|
||||
ports = ["webGUI","listen_tcp_udp","udp_proto_discovery"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "webGUI"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=syncthing",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "webGUI"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1200 # MHz
|
||||
memory = 300 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
191
templates/nomad_jobs/template-groups.hcl
Normal file
191
templates/nomad_jobs/template-groups.hcl
Normal file
@@ -0,0 +1,191 @@
|
||||
job "TEMPLATE" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "TEMPLATE-db-group" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
static = "80"
|
||||
to = "80"
|
||||
}
|
||||
}
|
||||
|
||||
task "TEMPLATE-db" {
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
env {
|
||||
// PUID = "${meta.PUID}"
|
||||
// PGID = "${meta.PGID}"
|
||||
// TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = ""
|
||||
hostname = "${NOMAD_JOB_NAME}1"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_JOB_NAME}1:/data",
|
||||
"/etc/timezone:/etc/timezone:ro",
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
]
|
||||
ports = ["port1"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_JOB_NAME}1"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}1.rule=Host(`${NOMAD_JOB_NAME}1.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}1.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}1.service=${NOMAD_JOB_NAME}1",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}1.tls=true",,
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}1.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}1.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 40 # MHz
|
||||
// memory = 10 # MB
|
||||
// }
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
group "TEMPLATE-app-group" {
|
||||
|
||||
restart {
|
||||
attempts = 1
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port2" {
|
||||
static = "443"
|
||||
to = "443"
|
||||
}
|
||||
}
|
||||
|
||||
task "await-TEMPLATEdb" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:latest"
|
||||
command = "sh"
|
||||
args = ["-c", "echo -n 'Waiting for service'; until nslookup ${NOMAD_JOB_NAME}1.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"]
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 128
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task
|
||||
|
||||
task "TEMPLATE" {
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
// env {
|
||||
// PUID = "${meta.PUID}"
|
||||
// PGID = "${meta.PGID}"
|
||||
// TZ = "America/New_York"
|
||||
// }
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = ""
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/data",
|
||||
"/etc/timezone:/etc/timezone:ro",
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
]
|
||||
ports = ["port2"]
|
||||
}
|
||||
|
||||
service {
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
port = "port2"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",,
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.priority=1"
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
port = "port2"
|
||||
path = "/"
|
||||
interval = "5m"
|
||||
timeout = "1m"
|
||||
}
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// }
|
||||
} // TASK
|
||||
} // close group
|
||||
} // job
|
||||
95
templates/nomad_jobs/template-simple.hcl
Normal file
95
templates/nomad_jobs/template-simple.hcl
Normal file
@@ -0,0 +1,95 @@
|
||||
job "TEMPLATE" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "TEMPLATE" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
static = "80"
|
||||
to = "80"
|
||||
}
|
||||
}
|
||||
|
||||
task "TEMPLATE" {
|
||||
|
||||
// env {
|
||||
// PUID = "${meta.PUID}"
|
||||
// PGID = "${meta.PGID}"
|
||||
// }
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = ""
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/",
|
||||
"/etc/timezone:/etc/timezone:ro",
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
]
|
||||
ports = ["port1"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
128
templates/nomad_jobs/template_localfs.hcl
Normal file
128
templates/nomad_jobs/template_localfs.hcl
Normal file
@@ -0,0 +1,128 @@
|
||||
|
||||
job "TEMPLATE" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi4"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "TEMPLATE-group" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "10m"
|
||||
}
|
||||
|
||||
network {
|
||||
port "port1" {
|
||||
static = ""
|
||||
to = ""
|
||||
}
|
||||
}
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
task "TEMPLATE" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = ""
|
||||
hostname = "${NOMAD_TASK_NAME}"
|
||||
ports = ["port1"]
|
||||
volumes = [
|
||||
"${meta.localStorageRoot}/${NOMAD_TASK_NAME}:/config"
|
||||
]
|
||||
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "port1"
|
||||
name = "${NOMAD_TASK_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "port1"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 1000 # MHz
|
||||
memory = 400 # MB
|
||||
} // resources
|
||||
|
||||
} // /task ${NOMAD_JOB_NAME}
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
27
templates/nomad_jobs/testing/execTest.hcl
Normal file
27
templates/nomad_jobs/testing/execTest.hcl
Normal file
@@ -0,0 +1,27 @@
|
||||
job "execTest" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "batch"
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "regexp"
|
||||
value = "rpi3"
|
||||
}
|
||||
|
||||
group "testing" {
|
||||
|
||||
task "execTest" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
command = "/usr/local/bin/backup_configs"
|
||||
args = ["--verbose","--job","sonarr"]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500
|
||||
memory = 256
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
110
templates/nomad_jobs/uptimekuma.hcl
Normal file
110
templates/nomad_jobs/uptimekuma.hcl
Normal file
@@ -0,0 +1,110 @@
|
||||
job "uptimekuma" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "uptimekumaGroup" {
|
||||
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "web" {
|
||||
to = "3001"
|
||||
}
|
||||
}
|
||||
|
||||
task "create_filesystem" {
|
||||
// Copy the most recent backup into place on the local computer. sonarr will not work with
|
||||
// its database in an NFS share
|
||||
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.restoreCommand}"
|
||||
args = ["${meta.restoreCommand1}", "${meta.restoreCommand2}", "${NOMAD_JOB_NAME}", "${meta.restoreCommand3}"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
} // /task create_filesystem
|
||||
|
||||
task "uptimekuma" {
|
||||
|
||||
// env {
|
||||
// PUID = "${meta.PUID}"
|
||||
// PGID = "${meta.PGID}"
|
||||
// }
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "louislam/uptime-kuma:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
volumes = [ "${meta.localStorageRoot}/uptimekuma:/app/data" ]
|
||||
ports = ["web"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "web"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`uptime.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "web"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
resources {
|
||||
cpu = 400 # MHz
|
||||
memory = 100 # MB
|
||||
} // resources
|
||||
|
||||
} // task
|
||||
|
||||
task "save_configuration" {
|
||||
driver = "raw_exec"
|
||||
config {
|
||||
# When running a binary that exists on the host, the path must be absolute
|
||||
command = "${meta.backupCommand}"
|
||||
args = ["${meta.backupAllocArg1}", "${meta.backupAllocArg2}", "${meta.backupAllocArg3}", "${meta.backupAllocArg4}", "${meta.backupAllocArg5}", "${NOMAD_JOB_NAME}", "${meta.backupAllocArg6}"]
|
||||
}
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task save_configuration
|
||||
} // group
|
||||
} // job
|
||||
95
templates/nomad_jobs/whoogle.hcl
Normal file
95
templates/nomad_jobs/whoogle.hcl
Normal file
@@ -0,0 +1,95 @@
|
||||
job "whoogle" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
// constraint {
|
||||
// attribute = "${node.unique.name}"
|
||||
// operator = "regexp"
|
||||
// value = "rpi(1|2|3)"
|
||||
// }
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "whoogle" {
|
||||
|
||||
restart {
|
||||
attempts = 0
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "whoogle" {
|
||||
to = "5000"
|
||||
}
|
||||
}
|
||||
|
||||
task "whoogle" {
|
||||
|
||||
env {
|
||||
WHOOGLE_CONFIG_BLOCK = "pinterest.com"
|
||||
WHOOGLE_CONFIG_DISABLE = "1"
|
||||
WHOOGLE_CONFIG_GET_ONLY = "1"
|
||||
WHOOGLE_CONFIG_LANGUAGE = "lang_en"
|
||||
WHOOGLE_CONFIG_NEW_TAB = "0"
|
||||
WHOOGLE_CONFIG_SEARCH_LANGUAGE = "lang_en"
|
||||
WHOOGLE_CONFIG_THEME = "light"
|
||||
WHOOGLE_CONFIG_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
||||
WHOOGLE_CONFIG_VIEW_IMAGE = "1"
|
||||
WHOOGLE_RESULTS_PER_PAGE = "20"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "benbusby/whoogle-search:latest"
|
||||
hostname = "${NOMAD_JOB_NAME}"
|
||||
ports = ["whoogle"]
|
||||
} // docker config
|
||||
|
||||
service {
|
||||
port = "whoogle"
|
||||
name = "${NOMAD_JOB_NAME}"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
|
||||
check_restart {
|
||||
limit = 0
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // service
|
||||
|
||||
// resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 300 # MB
|
||||
// } // resources
|
||||
|
||||
} // task
|
||||
|
||||
|
||||
} // group
|
||||
|
||||
|
||||
} // job
|
||||
257
templates/nomad_jobs/wikijs.hcl
Normal file
257
templates/nomad_jobs/wikijs.hcl
Normal file
@@ -0,0 +1,257 @@
|
||||
job "wikijs" {
|
||||
region = "global"
|
||||
datacenters = ["{{ datacenter_name }}"]
|
||||
type = "service"
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "10s"
|
||||
healthy_deadline = "5m"
|
||||
progress_deadline = "10m"
|
||||
auto_revert = true
|
||||
canary = 0
|
||||
stagger = "30s"
|
||||
}
|
||||
|
||||
group "wikijs_db_group" {
|
||||
|
||||
restart {
|
||||
attempts = 1
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "db" {
|
||||
static = "5434"
|
||||
to = "5432"
|
||||
}
|
||||
}
|
||||
|
||||
task "await_db_filesytem" {
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
value = "macmini"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:latest"
|
||||
command = "sh"
|
||||
args = [
|
||||
"-c",
|
||||
"echo -n 'Waiting for /etc/postgresql/postgresql.conf to be available'; until [ -f /etc/postgresql/my-postgres.conf ]; do echo '.'; sleep 2; done",
|
||||
]
|
||||
network_mode = "host"
|
||||
volumes = [
|
||||
"/Users/{{ my_username }}/cluster/wikidb:/etc/postgresql"
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task
|
||||
|
||||
task "await_backup_filesytem" {
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
value = "macmini"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:latest"
|
||||
command = "sh"
|
||||
args = [
|
||||
"-c",
|
||||
"echo -n 'Waiting for /backups to be available'; until [ -f /backups/dbBackup.log ]; do echo '.'; sleep 2; done",
|
||||
]
|
||||
network_mode = "host"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/backups/wikijsdb:/backups"
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task
|
||||
|
||||
task "wikijs_db" {
|
||||
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
value = "macmini"
|
||||
}
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
POSTGRES_USER = "wikijs"
|
||||
POSTGRES_PASSWORD = "wikijs"
|
||||
POSTGRES_DB = "wikijs"
|
||||
PGDATA = "/var/lib/postgresql/data/pgdata"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "postgres:9.6.17"
|
||||
hostname = "wikijs_db"
|
||||
volumes = [
|
||||
"/Users/{{ my_username }}/cluster/wikidb/pgdata:/var/lib/postgresql/data",
|
||||
"/Users/{{ my_username }}/cluster/wikidb/my-postgres.conf:/etc/postgresql/postgresql.conf",
|
||||
"/Users/{{ my_username }}/cluster/wikidb/entrypoint:/docker-entrypoint-initdb.d",
|
||||
"${meta.nfsStorageRoot}/pi-cluster/backups/wikijsdb:/backups"
|
||||
]
|
||||
ports = ["db"]
|
||||
}
|
||||
|
||||
artifact {
|
||||
source = "git::https://github.com/{{ my_username }}/db_scripts.git"
|
||||
destination = "local/scripts"
|
||||
}
|
||||
|
||||
service {
|
||||
port = "db"
|
||||
name = "wikijsdb"
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "db"
|
||||
interval = "30s"
|
||||
timeout = "4s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 2
|
||||
grace = "1m"
|
||||
ignore_warnings = true
|
||||
}
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 55 # MHz
|
||||
memory = 60 # MB
|
||||
}
|
||||
|
||||
} // /task
|
||||
} // /group
|
||||
|
||||
group "wikijs_app_group" {
|
||||
|
||||
restart {
|
||||
attempts = 1
|
||||
delay = "30s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "http" {
|
||||
to = "3000"
|
||||
}
|
||||
}
|
||||
|
||||
task "await_database" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:latest"
|
||||
command = "sh"
|
||||
args = [
|
||||
"-c",
|
||||
"echo -n 'Waiting for wikijsdb.service.consul to come alive'; until nslookup wikijsdb.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"
|
||||
]
|
||||
network_mode = "host"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 128
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task
|
||||
|
||||
task "await_filesytem" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "busybox:latest"
|
||||
command = "sh"
|
||||
args = [
|
||||
"-c",
|
||||
"echo -n 'Waiting for ${meta.nfsStorageRoot}/pi-cluster/wikijs/ to be mounted'; until less -E /wiki/config.yml | grep 'wikijsdb.service.consul' 2>&1 >/dev/null; do echo '.'; sleep 2; done",
|
||||
]
|
||||
network_mode = "host"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/wikijs/config/config.yml:/wiki/config.yml"
|
||||
]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
} // /task
|
||||
|
||||
task "wikijs_app" {
|
||||
|
||||
env {
|
||||
PUID = "${meta.PUID}"
|
||||
PGID = "${meta.PGID}"
|
||||
TZ = "America/New_York"
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "linuxserver/wikijs:version-2.5.170"
|
||||
hostname = "wikijs-app"
|
||||
volumes = [
|
||||
"${meta.nfsStorageRoot}/pi-cluster/wikijs/config/config.yml:/wiki/config.yml",
|
||||
"${meta.nfsStorageRoot}/pi-cluster/wikijs/config:/config",
|
||||
"${meta.nfsStorageRoot}/pi-cluster/wikijs/data/:/data"
|
||||
]
|
||||
ports = ["http"]
|
||||
} // /config
|
||||
|
||||
service {
|
||||
port = "http"
|
||||
name = "wikijs"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.wikijs.rule=Host(`wiki.{{ homelab_domain_name }}`)",
|
||||
"traefik.http.routers.wikijs.entryPoints=web,websecure",
|
||||
"traefik.http.routers.wikijs.service=wikijs",
|
||||
"traefik.http.routers.wikijs.tls=true"
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "90s"
|
||||
timeout = "15s"
|
||||
}
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "30s"
|
||||
ignore_warnings = true
|
||||
}
|
||||
} // /service
|
||||
|
||||
resources {
|
||||
// cpu = 100 # MHz
|
||||
// memory = 60 # MB
|
||||
}
|
||||
|
||||
|
||||
} // /task
|
||||
} // /group
|
||||
|
||||
} // job
|
||||
Reference in New Issue
Block a user