fix: update job files

This commit is contained in:
Nathaniel Landau
2024-01-09 08:53:40 -05:00
parent 6b00bf557c
commit feb1fbedf4
7 changed files with 389 additions and 303 deletions

View File

@@ -13,6 +13,7 @@ tdarr_installer_version: 2.00.13
telegraf_version: 1.28.4 telegraf_version: 1.28.4
traefik_version: 2.10.7 traefik_version: 2.10.7
valentina_version: 2.1.0 valentina_version: 2.1.0
sabnzbd_version: 4.2.1
# ---------------------------------- SERVICE STATIC PORT MAPPINGS # ---------------------------------- SERVICE STATIC PORT MAPPINGS
influxdb_port: "8086" influxdb_port: "8086"

View File

@@ -2,7 +2,7 @@ version: '3.9'
services: services:
sabnzbd: sabnzbd:
image: ghcr.io/linuxserver/sabnzbd image: ghcr.io/linuxserver/sabnzbd:{{ sabnzbd_version }}
hostname: sabnzbd hostname: sabnzbd
container_name: sabnzbd container_name: sabnzbd
network_mode: "bridge" network_mode: "bridge"
@@ -10,7 +10,7 @@ services:
- "TZ=America/New_York" - "TZ=America/New_York"
- "PGID=101" - "PGID=101"
- "PUID={{ ansible_user_uid }}" - "PUID={{ ansible_user_uid }}"
- "DOCKER_MODS=linuxserver/mods:universal-cron" #- "DOCKER_MODS=linuxserver/mods:universal-cron"
volumes: volumes:
- /var/services/homes/{{ my_username }}:/{{ my_username }} - /var/services/homes/{{ my_username }}:/{{ my_username }}
- /volume1/nate:/nate - /volume1/nate:/nate

View File

@@ -77,7 +77,7 @@ job "gitea" {
GITEA__mailer__SMTP_PORT = "{{ email_smtp_port_starttls }}" GITEA__mailer__SMTP_PORT = "{{ email_smtp_port_starttls }}"
GITEA__mailer__SUBJECT_PREFIX = "[Gitea]" GITEA__mailer__SUBJECT_PREFIX = "[Gitea]"
GITEA__mailer__USER = "{{ email_smtp_account }}" GITEA__mailer__USER = "{{ email_smtp_account }}"
GITEA__repository__DEFAULT_REPO_UNITS = "repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages,repo.actions" GITEA__repository__DEFAULT_REPO_UNITS = "repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages" # add `repo.actions` to the list if enabling actions
GITEA__server__DOMAIN = "{{ homelab_domain_name }}" GITEA__server__DOMAIN = "{{ homelab_domain_name }}"
GITEA__server__ROOT_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}" GITEA__server__ROOT_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
GITEA__server__SSH_DOMAIN = "${NOMAD_JOB_NAME}.{{ homelab_domain_name }}" GITEA__server__SSH_DOMAIN = "${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
@@ -105,15 +105,15 @@ job "gitea" {
service { service {
port = "webui" port = "webui"
name = "${NOMAD_TASK_NAME}" name = "${NOMAD_JOB_NAME}"
provider = "nomad" provider = "nomad"
tags = [ tags = [
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)", "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure", "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}", "traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true", "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare" "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
] ]
check { check {
@@ -173,181 +173,232 @@ job "gitea" {
} // group } // group
group "action-runners" { // group "action-runners" {
// constraint { // constraint {
// attribute = "${node.unique.name}" // attribute = "${node.unique.name}"
// operator = "regexp" // operator = "regexp"
// value = "rpi2" // value = "macmini"
// } // }
constraint { // constraint {
distinct_hosts = true // distinct_hosts = true
} // }
count = 3 // count = 1
restart { // restart {
attempts = 0 // attempts = 0
delay = "30s" // delay = "30s"
} // }
network { // network {
port "cache" { // port "cache" {
to = "8088" // to = "8088"
} // }
} // }
task "gitea-action-runner" { // task "await-gitea" {
env { // lifecycle {
CONFIG_FILE = "/local/config.yml" // hook = "prestart"
GITEA_INSTANCE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}" // sidecar = false
GITEA_RUNNER_NAME = "${node.unique.name}-action-runner" // }
GITEA_RUNNER_REGISTRATION_TOKEN = "{{ gitea_runner_registration_token }}"
PGID = "${meta.PGID}"
PUID = "${meta.PUID}"
TZ = "America/New_York"
}
driver = "docker" // driver = "docker"
config {
image = "gitea/act_runner:latest"
image_pull_timeout = "10m"
hostname = "${NOMAD_TASK_NAME}"
volumes = [
"/var/run/docker.sock:/var/run/docker.sock"
]
ports = ["cache"]
} // docker config
template { // config {
destination = "local/config.yml" // image = "busybox:latest"
env = false // command = "/bin/sh"
change_mode = "noop" // args = [
data = <<-EOH // "-c",
log: // "chmod 755 /local/ping.sh && /local/ping.sh"
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels: []
cache:
# Enable cache server to use actions/cache.
enabled: false
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
{% raw %}host: "{{ env "NOMAD_IP_cache" }}"{% endraw +%}
# The port of the cache server.
{% raw %}port: {{ env "NOMAD_HOST_PORT_cache" }}{% endraw +%}
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes:
- '**'
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:
EOH
}
// service {
// port = "cache"
// name = "${NOMAD_TASK_NAME}"
// provider = "nomad"
// tags = [
// "traefik.enable=true",
// "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
// "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
// "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
// "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
// "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
// "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
// ] // ]
// network_mode = "host"
// check {
// type = "tcp"
// port = "cache"
// interval = "30s"
// timeout = "4s"
// } // }
// check_restart { // template {
// limit = 0 // destination = "local/ping.sh"
// grace = "1m" // change_mode = "restart"
// data = <<-EOH
// #!/bin/sh
// {% raw -%}
// {{ range nomadService "gitea" }}
// IP="{{ .Address }}"
// PORT="{{ .Port }}"
// {{ end }}
// {% endraw -%}
// until [ -n "${IP}" ] && [ -n "${PORT}" ]; do
// echo "Waiting for Nomad to populate the service information..."
// sleep 1
// done
// echo "Waiting for Gitea to start..."
// until nc -z "${IP}" "${PORT}"; do
// echo "'nc -z ${IP} ${PORT}' is unavailable..."
// sleep 1
// done
// echo "Gitea is up! Found at ${IP}:${PORT}"
// EOH
// } // }
// } // service // }
// task "gitea-action-runner" {
// env {
// CONFIG_FILE = "/local/config.yml"
// GITEA_INSTANCE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
// GITEA_RUNNER_NAME = "${node.unique.name}-action-runner"
// GITEA_RUNNER_REGISTRATION_TOKEN = "{{ gitea_runner_registration_token }}"
// PGID = "${meta.PGID}"
// PUID = "${meta.PUID}"
// TZ = "America/New_York"
// }
// driver = "docker"
// config {
// image = "gitea/act_runner:latest"
// image_pull_timeout = "10m"
// hostname = "${NOMAD_TASK_NAME}"
// volumes = [
// "${meta.nfsStorageRoot}/pi-cluster/gitea-action-runners:/data",
// "/var/run/docker.sock:/var/run/docker.sock"
// ]
// ports = ["cache"]
// } // docker config
// template {
// destination = "local/config.yml"
// env = false
// change_mode = "noop"
// data = <<-EOH
// log:
// # The level of logging, can be trace, debug, info, warn, error, fatal
// level: info
// runner:
// # Where to store the registration result.
// {% raw %}file: .runner-{{ env "node.unique.name" }}{% endraw +%}
// # Execute how many tasks concurrently at the same time.
// capacity: 1
// # Extra environment variables to run jobs.
// envs:
// A_TEST_ENV_NAME_1: a_test_env_value_1
// A_TEST_ENV_NAME_2: a_test_env_value_2
// # Extra environment variables to run jobs from a file.
// # It will be ignored if it's empty or the file doesn't exist.
// env_file: .env
// # The timeout for a job to be finished.
// # Please note that the Gitea instance also has a timeout (3h by default) for the job.
// # So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
// timeout: 3h
// # Whether skip verifying the TLS certificate of the Gitea instance.
// insecure: false
// # The timeout for fetching the job from the Gitea instance.
// fetch_timeout: 5s
// # The interval for fetching the job from the Gitea instance.
// fetch_interval: 2s
// # The labels of a runner are used to determine which jobs the runner can run, and how to run them.
// # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
// # If it's empty when registering, it will ask for inputting labels.
// # If it's empty when execute `daemon`, will use labels in `.runner` file.
// labels: []
// cache:
// # Enable cache server to use actions/cache.
// enabled: false
// # The directory to store the cache data.
// # If it's empty, the cache data will be stored in $HOME/.cache/actcache.
// dir: ""
// # The host of the cache server.
// # It's not for the address to listen, but the address to connect from job containers.
// # So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
// {% raw %}host: "{{ env "NOMAD_IP_cache" }}"{% endraw +%}
// # The port of the cache server.
// {% raw %}port: {{ env "NOMAD_HOST_PORT_cache" }}{% endraw +%}
// # The external cache server URL. Valid only when enable is true.
// # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
// # The URL should generally end with "/".
// external_server: ""
// container:
// # Specifies the network to which the container will connect.
// # Could be host, bridge or the name of a custom network.
// # If it's empty, act_runner will create a network automatically.
// network: ""
// # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
// privileged: false
// # And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
// options:
// # The parent directory of a job's working directory.
// # If it's empty, /workspace will be used.
// workdir_parent:
// # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
// # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
// # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
// # valid_volumes:
// # - data
// # - /src/*.json
// # If you want to allow any volume, please use the following configuration:
// # valid_volumes:
// # - '**'
// valid_volumes:
// - '**'
// # overrides the docker client host with the specified one.
// # If it's empty, act_runner will find an available docker host automatically.
// # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
// # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
// docker_host: ""
// # Pull docker image(s) even if already present
// force_pull: false
// host:
// # The parent directory of a job's working directory.
// # If it's empty, $HOME/.cache/act/ will be used.
// workdir_parent:
// EOH
// }
// // service {
// // port = "cache"
// // name = "${NOMAD_TASK_NAME}"
// // provider = "nomad"
// // tags = [
// // "traefik.enable=true",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
// // "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
// // ]
// // check {
// // type = "tcp"
// // port = "cache"
// // interval = "30s"
// // timeout = "4s"
// // }
// // check_restart {
// // limit = 0
// // grace = "1m"
// // }
// // } // service
// resources { // resources {
// cpu = 100 # MHz // cpu = 400 # MHz
// memory = 300 # MB // memory = 600 # MB
// } // resources // } // resources
} // task gitea-action-runner // } // task gitea-action-runner
} // group action-runners // } // group action-runners
} // job } // job

View File

@@ -3,6 +3,10 @@ job "icloud_backup" {
datacenters = ["{{ datacenter_name }}"] datacenters = ["{{ datacenter_name }}"]
type = "service" type = "service"
// Need to authenticate within the container by running
// icloud --username=<icloud-username> --session-directory=/app/session_data
// and then entering the 2FA code that is sent to the user associated with the iCloud account.
// constraint { // constraint {
// attribute = "${node.unique.name}" // attribute = "${node.unique.name}"
// operator = "regexp" // operator = "regexp"
@@ -32,8 +36,9 @@ job "icloud_backup" {
task "icloud_backup" { task "icloud_backup" {
env { env {
PUID = "${meta.PUID}" ENV_CONFIG_FILE_PATH = "/local/icloud_backup.yaml"
PGID = "${meta.PGID}" PGID = "${meta.PGID}"
PUID = "${meta.PUID}"
TZ = "America/New_York" TZ = "America/New_York"
// ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment. // ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment.
} }
@@ -45,7 +50,6 @@ job "icloud_backup" {
volumes = [ volumes = [
"${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud", "${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud",
"${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data", "${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data",
"local/icloud_backup.yaml:/app/config.yaml",
"/etc/timezone:/etc/timezone:ro", "/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro" "/etc/localtime:/etc/localtime:ro"
] ]
@@ -57,6 +61,7 @@ job "icloud_backup" {
change_mode = "restart" change_mode = "restart"
perms = "644" perms = "644"
data = <<-EOH data = <<-EOH
---
app: app:
logger: logger:
# level - debug, info (default), warning, or error # level - debug, info (default), warning, or error
@@ -68,7 +73,6 @@ job "icloud_backup" {
username: "{{ icloud_backup_username }}" username: "{{ icloud_backup_username }}"
# Retry login interval # Retry login interval
retry_login_interval: 3600 # 1 hour retry_login_interval: 3600 # 1 hour
# Drive destination
root: "icloud" root: "icloud"
smtp: smtp:
# If you want to receive email notifications about expired/missing 2FA credentials then uncomment # If you want to receive email notifications about expired/missing 2FA credentials then uncomment
@@ -121,6 +125,8 @@ job "icloud_backup" {
destination: "photos" destination: "photos"
remove_obsolete: true remove_obsolete: true
sync_interval: 172800 # 2 days sync_interval: 172800 # 2 days
all_albums: false # Optional, default false. If true preserve album structure. If same photo is in multiple albums creates duplicates on filesystem
folder_format: "%Y-%m" # optional, if set put photos in subfolders according to format. Cheatsheet - https://strftime.org
filters: filters:
albums: albums:
# - "album1" # - "album1"

View File

@@ -137,6 +137,7 @@ job "pihole" {
service { service {
name = "piholeDNStcp" name = "piholeDNStcp"
port = "dns" port = "dns"
provider = "nomad"
check { check {
type = "tcp" type = "tcp"
port = "dns" port = "dns"

View File

@@ -0,0 +1,27 @@
job "remove_nzbs" {
region = "global"
datacenters = ["{{ datacenter_name }}"]
type = "batch"
constraint {
attribute = "${node.unique.name}"
operator = "regexp"
value = "rpi"
}
periodic {
cron = "*/15 * * * * *"
prohibit_overlap = true
time_zone = "America/New_York"
}
task "remove_nzbs" {
driver = "raw_exec"
config {
command = "/home/pi/.pyenv/shims/python"
args = ["/home/pi/repos/bin/bin-sabnzbd/removeNZBs.py"]
}
} // /task do_backups
} //job