mirror of
https://github.com/natelandau/ansible-homelab-config.git
synced 2025-11-17 09:23:40 -05:00
fix: update job files
This commit is contained in:
@@ -13,6 +13,7 @@ tdarr_installer_version: 2.00.13
|
|||||||
telegraf_version: 1.28.4
|
telegraf_version: 1.28.4
|
||||||
traefik_version: 2.10.7
|
traefik_version: 2.10.7
|
||||||
valentina_version: 2.1.0
|
valentina_version: 2.1.0
|
||||||
|
sabnzbd_version: 4.2.1
|
||||||
|
|
||||||
# ---------------------------------- SERVICE STATIC PORT MAPPINGS
|
# ---------------------------------- SERVICE STATIC PORT MAPPINGS
|
||||||
influxdb_port: "8086"
|
influxdb_port: "8086"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ version: '3.9'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
sabnzbd:
|
sabnzbd:
|
||||||
image: ghcr.io/linuxserver/sabnzbd
|
image: ghcr.io/linuxserver/sabnzbd:{{ sabnzbd_version }}
|
||||||
hostname: sabnzbd
|
hostname: sabnzbd
|
||||||
container_name: sabnzbd
|
container_name: sabnzbd
|
||||||
network_mode: "bridge"
|
network_mode: "bridge"
|
||||||
@@ -10,7 +10,7 @@ services:
|
|||||||
- "TZ=America/New_York"
|
- "TZ=America/New_York"
|
||||||
- "PGID=101"
|
- "PGID=101"
|
||||||
- "PUID={{ ansible_user_uid }}"
|
- "PUID={{ ansible_user_uid }}"
|
||||||
- "DOCKER_MODS=linuxserver/mods:universal-cron"
|
#- "DOCKER_MODS=linuxserver/mods:universal-cron"
|
||||||
volumes:
|
volumes:
|
||||||
- /var/services/homes/{{ my_username }}:/{{ my_username }}
|
- /var/services/homes/{{ my_username }}:/{{ my_username }}
|
||||||
- /volume1/nate:/nate
|
- /volume1/nate:/nate
|
||||||
|
|||||||
@@ -1,21 +1,21 @@
|
|||||||
job "backup_local_filesystems" {
|
job "backup_local_filesystems" {
|
||||||
region = "global"
|
region = "global"
|
||||||
datacenters = ["{{ datacenter_name }}"]
|
datacenters = ["{{ datacenter_name }}"]
|
||||||
type = "sysbatch"
|
type = "sysbatch"
|
||||||
|
|
||||||
periodic {
|
periodic {
|
||||||
cron = "0 */8 * * * *"
|
cron = "0 */8 * * * *"
|
||||||
prohibit_overlap = true
|
prohibit_overlap = true
|
||||||
time_zone = "America/New_York"
|
time_zone = "America/New_York"
|
||||||
}
|
|
||||||
|
|
||||||
task "do_backups" {
|
|
||||||
driver = "raw_exec"
|
|
||||||
config {
|
|
||||||
# When running a binary that exists on the host, the path must be absolute
|
|
||||||
command = "${meta.backupCommand}"
|
|
||||||
args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
|
|
||||||
}
|
}
|
||||||
} // /task do_backups
|
|
||||||
|
task "do_backups" {
|
||||||
|
driver = "raw_exec"
|
||||||
|
config {
|
||||||
|
# When running a binary that exists on the host, the path must be absolute
|
||||||
|
command = "${meta.backupCommand}"
|
||||||
|
args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
|
||||||
|
}
|
||||||
|
} // /task do_backups
|
||||||
|
|
||||||
} //job
|
} //job
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ job "gitea" {
|
|||||||
GITEA__mailer__SMTP_PORT = "{{ email_smtp_port_starttls }}"
|
GITEA__mailer__SMTP_PORT = "{{ email_smtp_port_starttls }}"
|
||||||
GITEA__mailer__SUBJECT_PREFIX = "[Gitea]"
|
GITEA__mailer__SUBJECT_PREFIX = "[Gitea]"
|
||||||
GITEA__mailer__USER = "{{ email_smtp_account }}"
|
GITEA__mailer__USER = "{{ email_smtp_account }}"
|
||||||
GITEA__repository__DEFAULT_REPO_UNITS = "repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages,repo.actions"
|
GITEA__repository__DEFAULT_REPO_UNITS = "repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages" # add `repo.actions` to the list if enabling actions
|
||||||
GITEA__server__DOMAIN = "{{ homelab_domain_name }}"
|
GITEA__server__DOMAIN = "{{ homelab_domain_name }}"
|
||||||
GITEA__server__ROOT_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
GITEA__server__ROOT_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
||||||
GITEA__server__SSH_DOMAIN = "${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
GITEA__server__SSH_DOMAIN = "${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
||||||
@@ -105,15 +105,15 @@ job "gitea" {
|
|||||||
|
|
||||||
service {
|
service {
|
||||||
port = "webui"
|
port = "webui"
|
||||||
name = "${NOMAD_TASK_NAME}"
|
name = "${NOMAD_JOB_NAME}"
|
||||||
provider = "nomad"
|
provider = "nomad"
|
||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
"traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
"traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
"traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
|
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
|
||||||
]
|
]
|
||||||
|
|
||||||
check {
|
check {
|
||||||
@@ -173,181 +173,232 @@ job "gitea" {
|
|||||||
} // group
|
} // group
|
||||||
|
|
||||||
|
|
||||||
group "action-runners" {
|
// group "action-runners" {
|
||||||
|
|
||||||
// constraint {
|
// constraint {
|
||||||
// attribute = "${node.unique.name}"
|
// attribute = "${node.unique.name}"
|
||||||
// operator = "regexp"
|
// operator = "regexp"
|
||||||
// value = "rpi2"
|
// value = "macmini"
|
||||||
// }
|
// }
|
||||||
|
|
||||||
constraint {
|
// constraint {
|
||||||
distinct_hosts = true
|
// distinct_hosts = true
|
||||||
}
|
// }
|
||||||
|
|
||||||
count = 3
|
// count = 1
|
||||||
|
|
||||||
restart {
|
// restart {
|
||||||
attempts = 0
|
// attempts = 0
|
||||||
delay = "30s"
|
// delay = "30s"
|
||||||
}
|
// }
|
||||||
|
|
||||||
network {
|
// network {
|
||||||
port "cache" {
|
// port "cache" {
|
||||||
to = "8088"
|
// to = "8088"
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
task "gitea-action-runner" {
|
// task "await-gitea" {
|
||||||
|
|
||||||
env {
|
// lifecycle {
|
||||||
CONFIG_FILE = "/local/config.yml"
|
// hook = "prestart"
|
||||||
GITEA_INSTANCE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
// sidecar = false
|
||||||
GITEA_RUNNER_NAME = "${node.unique.name}-action-runner"
|
// }
|
||||||
GITEA_RUNNER_REGISTRATION_TOKEN = "{{ gitea_runner_registration_token }}"
|
|
||||||
PGID = "${meta.PGID}"
|
|
||||||
PUID = "${meta.PUID}"
|
|
||||||
TZ = "America/New_York"
|
|
||||||
}
|
|
||||||
|
|
||||||
driver = "docker"
|
// driver = "docker"
|
||||||
config {
|
|
||||||
image = "gitea/act_runner:latest"
|
|
||||||
image_pull_timeout = "10m"
|
|
||||||
hostname = "${NOMAD_TASK_NAME}"
|
|
||||||
volumes = [
|
|
||||||
"/var/run/docker.sock:/var/run/docker.sock"
|
|
||||||
]
|
|
||||||
ports = ["cache"]
|
|
||||||
} // docker config
|
|
||||||
|
|
||||||
template {
|
// config {
|
||||||
destination = "local/config.yml"
|
// image = "busybox:latest"
|
||||||
env = false
|
// command = "/bin/sh"
|
||||||
change_mode = "noop"
|
// args = [
|
||||||
data = <<-EOH
|
// "-c",
|
||||||
log:
|
// "chmod 755 /local/ping.sh && /local/ping.sh"
|
||||||
# The level of logging, can be trace, debug, info, warn, error, fatal
|
// ]
|
||||||
level: info
|
// network_mode = "host"
|
||||||
|
// }
|
||||||
|
|
||||||
runner:
|
// template {
|
||||||
# Where to store the registration result.
|
// destination = "local/ping.sh"
|
||||||
file: .runner
|
// change_mode = "restart"
|
||||||
# Execute how many tasks concurrently at the same time.
|
// data = <<-EOH
|
||||||
capacity: 1
|
// #!/bin/sh
|
||||||
# Extra environment variables to run jobs.
|
// {% raw -%}
|
||||||
envs:
|
// {{ range nomadService "gitea" }}
|
||||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
// IP="{{ .Address }}"
|
||||||
A_TEST_ENV_NAME_2: a_test_env_value_2
|
// PORT="{{ .Port }}"
|
||||||
# Extra environment variables to run jobs from a file.
|
// {{ end }}
|
||||||
# It will be ignored if it's empty or the file doesn't exist.
|
// {% endraw -%}
|
||||||
env_file: .env
|
|
||||||
# The timeout for a job to be finished.
|
|
||||||
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
|
||||||
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
|
||||||
timeout: 3h
|
|
||||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
|
||||||
insecure: false
|
|
||||||
# The timeout for fetching the job from the Gitea instance.
|
|
||||||
fetch_timeout: 5s
|
|
||||||
# The interval for fetching the job from the Gitea instance.
|
|
||||||
fetch_interval: 2s
|
|
||||||
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
|
||||||
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
|
|
||||||
# If it's empty when registering, it will ask for inputting labels.
|
|
||||||
# If it's empty when execute `daemon`, will use labels in `.runner` file.
|
|
||||||
labels: []
|
|
||||||
|
|
||||||
cache:
|
// until [ -n "${IP}" ] && [ -n "${PORT}" ]; do
|
||||||
# Enable cache server to use actions/cache.
|
// echo "Waiting for Nomad to populate the service information..."
|
||||||
enabled: false
|
// sleep 1
|
||||||
# The directory to store the cache data.
|
// done
|
||||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
|
||||||
dir: ""
|
|
||||||
# The host of the cache server.
|
|
||||||
# It's not for the address to listen, but the address to connect from job containers.
|
|
||||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
|
||||||
{% raw %}host: "{{ env "NOMAD_IP_cache" }}"{% endraw +%}
|
|
||||||
# The port of the cache server.
|
|
||||||
{% raw %}port: {{ env "NOMAD_HOST_PORT_cache" }}{% endraw +%}
|
|
||||||
# The external cache server URL. Valid only when enable is true.
|
|
||||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
|
||||||
# The URL should generally end with "/".
|
|
||||||
external_server: ""
|
|
||||||
|
|
||||||
container:
|
// echo "Waiting for Gitea to start..."
|
||||||
# Specifies the network to which the container will connect.
|
|
||||||
# Could be host, bridge or the name of a custom network.
|
|
||||||
# If it's empty, act_runner will create a network automatically.
|
|
||||||
network: ""
|
|
||||||
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
|
||||||
privileged: false
|
|
||||||
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
|
||||||
options:
|
|
||||||
# The parent directory of a job's working directory.
|
|
||||||
# If it's empty, /workspace will be used.
|
|
||||||
workdir_parent:
|
|
||||||
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
|
||||||
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
|
||||||
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
|
||||||
# valid_volumes:
|
|
||||||
# - data
|
|
||||||
# - /src/*.json
|
|
||||||
# If you want to allow any volume, please use the following configuration:
|
|
||||||
# valid_volumes:
|
|
||||||
# - '**'
|
|
||||||
valid_volumes:
|
|
||||||
- '**'
|
|
||||||
# overrides the docker client host with the specified one.
|
|
||||||
# If it's empty, act_runner will find an available docker host automatically.
|
|
||||||
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
|
||||||
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
|
||||||
docker_host: ""
|
|
||||||
# Pull docker image(s) even if already present
|
|
||||||
force_pull: false
|
|
||||||
|
|
||||||
host:
|
// until nc -z "${IP}" "${PORT}"; do
|
||||||
# The parent directory of a job's working directory.
|
// echo "'nc -z ${IP} ${PORT}' is unavailable..."
|
||||||
# If it's empty, $HOME/.cache/act/ will be used.
|
// sleep 1
|
||||||
workdir_parent:
|
// done
|
||||||
EOH
|
|
||||||
}
|
|
||||||
|
|
||||||
// service {
|
// echo "Gitea is up! Found at ${IP}:${PORT}"
|
||||||
// port = "cache"
|
|
||||||
// name = "${NOMAD_TASK_NAME}"
|
|
||||||
// provider = "nomad"
|
|
||||||
// tags = [
|
|
||||||
// "traefik.enable=true",
|
|
||||||
// "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
|
||||||
// "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
|
||||||
// "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
|
||||||
// "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
|
||||||
// "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
|
||||||
// "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
|
||||||
// ]
|
|
||||||
|
|
||||||
// check {
|
// EOH
|
||||||
// type = "tcp"
|
// }
|
||||||
// port = "cache"
|
|
||||||
// interval = "30s"
|
|
||||||
// timeout = "4s"
|
|
||||||
// }
|
|
||||||
|
|
||||||
// check_restart {
|
// }
|
||||||
// limit = 0
|
|
||||||
// grace = "1m"
|
|
||||||
// }
|
|
||||||
|
|
||||||
// } // service
|
// task "gitea-action-runner" {
|
||||||
|
|
||||||
// resources {
|
// env {
|
||||||
// cpu = 100 # MHz
|
// CONFIG_FILE = "/local/config.yml"
|
||||||
// memory = 300 # MB
|
// GITEA_INSTANCE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
|
||||||
// } // resources
|
// GITEA_RUNNER_NAME = "${node.unique.name}-action-runner"
|
||||||
|
// GITEA_RUNNER_REGISTRATION_TOKEN = "{{ gitea_runner_registration_token }}"
|
||||||
|
// PGID = "${meta.PGID}"
|
||||||
|
// PUID = "${meta.PUID}"
|
||||||
|
// TZ = "America/New_York"
|
||||||
|
// }
|
||||||
|
|
||||||
} // task gitea-action-runner
|
// driver = "docker"
|
||||||
|
// config {
|
||||||
|
// image = "gitea/act_runner:latest"
|
||||||
|
// image_pull_timeout = "10m"
|
||||||
|
// hostname = "${NOMAD_TASK_NAME}"
|
||||||
|
// volumes = [
|
||||||
|
// "${meta.nfsStorageRoot}/pi-cluster/gitea-action-runners:/data",
|
||||||
|
// "/var/run/docker.sock:/var/run/docker.sock"
|
||||||
|
// ]
|
||||||
|
// ports = ["cache"]
|
||||||
|
// } // docker config
|
||||||
|
|
||||||
} // group action-runners
|
// template {
|
||||||
|
// destination = "local/config.yml"
|
||||||
|
// env = false
|
||||||
|
// change_mode = "noop"
|
||||||
|
// data = <<-EOH
|
||||||
|
// log:
|
||||||
|
// # The level of logging, can be trace, debug, info, warn, error, fatal
|
||||||
|
// level: info
|
||||||
|
|
||||||
|
// runner:
|
||||||
|
// # Where to store the registration result.
|
||||||
|
// {% raw %}file: .runner-{{ env "node.unique.name" }}{% endraw +%}
|
||||||
|
// # Execute how many tasks concurrently at the same time.
|
||||||
|
// capacity: 1
|
||||||
|
// # Extra environment variables to run jobs.
|
||||||
|
// envs:
|
||||||
|
// A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||||
|
// A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||||
|
// # Extra environment variables to run jobs from a file.
|
||||||
|
// # It will be ignored if it's empty or the file doesn't exist.
|
||||||
|
// env_file: .env
|
||||||
|
// # The timeout for a job to be finished.
|
||||||
|
// # Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
||||||
|
// # So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
||||||
|
// timeout: 3h
|
||||||
|
// # Whether skip verifying the TLS certificate of the Gitea instance.
|
||||||
|
// insecure: false
|
||||||
|
// # The timeout for fetching the job from the Gitea instance.
|
||||||
|
// fetch_timeout: 5s
|
||||||
|
// # The interval for fetching the job from the Gitea instance.
|
||||||
|
// fetch_interval: 2s
|
||||||
|
// # The labels of a runner are used to determine which jobs the runner can run, and how to run them.
|
||||||
|
// # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
|
||||||
|
// # If it's empty when registering, it will ask for inputting labels.
|
||||||
|
// # If it's empty when execute `daemon`, will use labels in `.runner` file.
|
||||||
|
// labels: []
|
||||||
|
|
||||||
|
// cache:
|
||||||
|
// # Enable cache server to use actions/cache.
|
||||||
|
// enabled: false
|
||||||
|
// # The directory to store the cache data.
|
||||||
|
// # If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||||
|
// dir: ""
|
||||||
|
// # The host of the cache server.
|
||||||
|
// # It's not for the address to listen, but the address to connect from job containers.
|
||||||
|
// # So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||||
|
// {% raw %}host: "{{ env "NOMAD_IP_cache" }}"{% endraw +%}
|
||||||
|
// # The port of the cache server.
|
||||||
|
// {% raw %}port: {{ env "NOMAD_HOST_PORT_cache" }}{% endraw +%}
|
||||||
|
// # The external cache server URL. Valid only when enable is true.
|
||||||
|
// # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||||
|
// # The URL should generally end with "/".
|
||||||
|
// external_server: ""
|
||||||
|
|
||||||
|
// container:
|
||||||
|
// # Specifies the network to which the container will connect.
|
||||||
|
// # Could be host, bridge or the name of a custom network.
|
||||||
|
// # If it's empty, act_runner will create a network automatically.
|
||||||
|
// network: ""
|
||||||
|
// # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||||
|
// privileged: false
|
||||||
|
// # And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
||||||
|
// options:
|
||||||
|
// # The parent directory of a job's working directory.
|
||||||
|
// # If it's empty, /workspace will be used.
|
||||||
|
// workdir_parent:
|
||||||
|
// # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
|
||||||
|
// # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
|
||||||
|
// # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
|
||||||
|
// # valid_volumes:
|
||||||
|
// # - data
|
||||||
|
// # - /src/*.json
|
||||||
|
// # If you want to allow any volume, please use the following configuration:
|
||||||
|
// # valid_volumes:
|
||||||
|
// # - '**'
|
||||||
|
// valid_volumes:
|
||||||
|
// - '**'
|
||||||
|
// # overrides the docker client host with the specified one.
|
||||||
|
// # If it's empty, act_runner will find an available docker host automatically.
|
||||||
|
// # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
|
||||||
|
// # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
|
||||||
|
// docker_host: ""
|
||||||
|
// # Pull docker image(s) even if already present
|
||||||
|
// force_pull: false
|
||||||
|
|
||||||
|
// host:
|
||||||
|
// # The parent directory of a job's working directory.
|
||||||
|
// # If it's empty, $HOME/.cache/act/ will be used.
|
||||||
|
// workdir_parent:
|
||||||
|
// EOH
|
||||||
|
// }
|
||||||
|
|
||||||
|
// // service {
|
||||||
|
// // port = "cache"
|
||||||
|
// // name = "${NOMAD_TASK_NAME}"
|
||||||
|
// // provider = "nomad"
|
||||||
|
// // tags = [
|
||||||
|
// // "traefik.enable=true",
|
||||||
|
// // "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
|
||||||
|
// // "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
|
||||||
|
// // "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
|
||||||
|
// // "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
|
||||||
|
// // "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
|
||||||
|
// // "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
|
||||||
|
// // ]
|
||||||
|
|
||||||
|
// // check {
|
||||||
|
// // type = "tcp"
|
||||||
|
// // port = "cache"
|
||||||
|
// // interval = "30s"
|
||||||
|
// // timeout = "4s"
|
||||||
|
// // }
|
||||||
|
|
||||||
|
// // check_restart {
|
||||||
|
// // limit = 0
|
||||||
|
// // grace = "1m"
|
||||||
|
// // }
|
||||||
|
|
||||||
|
// // } // service
|
||||||
|
|
||||||
|
// resources {
|
||||||
|
// cpu = 400 # MHz
|
||||||
|
// memory = 600 # MB
|
||||||
|
// } // resources
|
||||||
|
|
||||||
|
// } // task gitea-action-runner
|
||||||
|
|
||||||
|
// } // group action-runners
|
||||||
|
|
||||||
} // job
|
} // job
|
||||||
|
|||||||
@@ -3,143 +3,149 @@ job "icloud_backup" {
|
|||||||
datacenters = ["{{ datacenter_name }}"]
|
datacenters = ["{{ datacenter_name }}"]
|
||||||
type = "service"
|
type = "service"
|
||||||
|
|
||||||
|
// Need to authenticate within the container by running
|
||||||
|
// icloud --username=<icloud-username> --session-directory=/app/session_data
|
||||||
|
// and then entering the 2FA code that is sent to the user associated with the iCloud account.
|
||||||
|
|
||||||
// constraint {
|
// constraint {
|
||||||
// attribute = "${node.unique.name}"
|
// attribute = "${node.unique.name}"
|
||||||
// operator = "regexp"
|
// operator = "regexp"
|
||||||
// value = "rpi(1|2|3)"
|
// value = "rpi(1|2|3)"
|
||||||
// }
|
// }
|
||||||
|
|
||||||
update {
|
update {
|
||||||
max_parallel = 1
|
max_parallel = 1
|
||||||
health_check = "checks"
|
health_check = "checks"
|
||||||
min_healthy_time = "10s"
|
min_healthy_time = "10s"
|
||||||
healthy_deadline = "5m"
|
healthy_deadline = "5m"
|
||||||
progress_deadline = "10m"
|
progress_deadline = "10m"
|
||||||
auto_revert = true
|
auto_revert = true
|
||||||
canary = 0
|
canary = 0
|
||||||
stagger = "30s"
|
stagger = "30s"
|
||||||
}
|
|
||||||
|
|
||||||
group "icloud_backup" {
|
|
||||||
|
|
||||||
count = 1
|
|
||||||
|
|
||||||
restart {
|
|
||||||
attempts = 0
|
|
||||||
delay = "30s"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
task "icloud_backup" {
|
group "icloud_backup" {
|
||||||
|
|
||||||
env {
|
count = 1
|
||||||
PUID = "${meta.PUID}"
|
|
||||||
PGID = "${meta.PGID}"
|
|
||||||
TZ = "America/New_York"
|
|
||||||
// ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment.
|
|
||||||
}
|
|
||||||
|
|
||||||
driver = "docker"
|
restart {
|
||||||
config {
|
attempts = 0
|
||||||
image = "mandarons/icloud-drive"
|
delay = "30s"
|
||||||
hostname = "${NOMAD_TASK_NAME}"
|
}
|
||||||
volumes = [
|
|
||||||
"${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud",
|
|
||||||
"${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data",
|
|
||||||
"local/icloud_backup.yaml:/app/config.yaml",
|
|
||||||
"/etc/timezone:/etc/timezone:ro",
|
|
||||||
"/etc/localtime:/etc/localtime:ro"
|
|
||||||
]
|
|
||||||
} // docker config
|
|
||||||
|
|
||||||
template {
|
task "icloud_backup" {
|
||||||
destination = "local/icloud_backup.yaml"
|
|
||||||
env = false
|
|
||||||
change_mode = "restart"
|
|
||||||
perms = "644"
|
|
||||||
data = <<-EOH
|
|
||||||
app:
|
|
||||||
logger:
|
|
||||||
# level - debug, info (default), warning, or error
|
|
||||||
level: "info"
|
|
||||||
# log filename icloud.log (default)
|
|
||||||
filename: "icloud.log"
|
|
||||||
credentials:
|
|
||||||
# iCloud drive username
|
|
||||||
username: "{{ icloud_backup_username }}"
|
|
||||||
# Retry login interval
|
|
||||||
retry_login_interval: 3600 # 1 hour
|
|
||||||
# Drive destination
|
|
||||||
root: "icloud"
|
|
||||||
smtp:
|
|
||||||
# If you want to receive email notifications about expired/missing 2FA credentials then uncomment
|
|
||||||
email: "{{ email_smtp_account }}"
|
|
||||||
# optional, to email address. Default is sender email.
|
|
||||||
#to: "receiver@test.com"
|
|
||||||
password: "{{ icloud_backup_smtp_password }}"
|
|
||||||
host: "{{ email_smtp_host }}"
|
|
||||||
port: {{ email_smtp_port_starttls }}
|
|
||||||
# If your email provider doesn't handle TLS
|
|
||||||
no_tls: false
|
|
||||||
drive:
|
|
||||||
destination: "drive"
|
|
||||||
remove_obsolete: true
|
|
||||||
sync_interval: 172800 # 2 days
|
|
||||||
filters:
|
|
||||||
# File filters to be included in syncing iCloud drive content
|
|
||||||
folders:
|
|
||||||
- "Scanner By Readdle"
|
|
||||||
- "Documents by Readdle"
|
|
||||||
# - "folder3"
|
|
||||||
file_extensions:
|
|
||||||
# File extensions to be included
|
|
||||||
- "pdf"
|
|
||||||
- "png"
|
|
||||||
- "jpg"
|
|
||||||
- "jpeg"
|
|
||||||
- "xls"
|
|
||||||
- "xlsx"
|
|
||||||
- "docx"
|
|
||||||
- "pptx"
|
|
||||||
- "txt"
|
|
||||||
- "md"
|
|
||||||
- "html"
|
|
||||||
- "htm"
|
|
||||||
- "css"
|
|
||||||
- "js"
|
|
||||||
- "json"
|
|
||||||
- "xml"
|
|
||||||
- "yaml"
|
|
||||||
- "yml"
|
|
||||||
- "csv"
|
|
||||||
- "mp3"
|
|
||||||
- "mp4"
|
|
||||||
- "mov"
|
|
||||||
- "wav"
|
|
||||||
- "mkv"
|
|
||||||
- "m4a"
|
|
||||||
photos:
|
|
||||||
destination: "photos"
|
|
||||||
remove_obsolete: true
|
|
||||||
sync_interval: 172800 # 2 days
|
|
||||||
filters:
|
|
||||||
albums:
|
|
||||||
# - "album1"
|
|
||||||
file_sizes: # valid values are original, medium and/or thumb
|
|
||||||
- "original"
|
|
||||||
# - "medium"
|
|
||||||
# - "thumb"
|
|
||||||
EOH
|
|
||||||
} // template data
|
|
||||||
|
|
||||||
resources {
|
env {
|
||||||
cpu = 900 # MHz
|
ENV_CONFIG_FILE_PATH = "/local/icloud_backup.yaml"
|
||||||
memory = 100 # MB
|
PGID = "${meta.PGID}"
|
||||||
} // resources
|
PUID = "${meta.PUID}"
|
||||||
|
TZ = "America/New_York"
|
||||||
|
// ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment.
|
||||||
|
}
|
||||||
|
|
||||||
} // task
|
driver = "docker"
|
||||||
|
config {
|
||||||
|
image = "mandarons/icloud-drive"
|
||||||
|
hostname = "${NOMAD_TASK_NAME}"
|
||||||
|
volumes = [
|
||||||
|
"${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud",
|
||||||
|
"${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data",
|
||||||
|
"/etc/timezone:/etc/timezone:ro",
|
||||||
|
"/etc/localtime:/etc/localtime:ro"
|
||||||
|
]
|
||||||
|
} // docker config
|
||||||
|
|
||||||
|
template {
|
||||||
|
destination = "local/icloud_backup.yaml"
|
||||||
|
env = false
|
||||||
|
change_mode = "restart"
|
||||||
|
perms = "644"
|
||||||
|
data = <<-EOH
|
||||||
|
---
|
||||||
|
app:
|
||||||
|
logger:
|
||||||
|
# level - debug, info (default), warning, or error
|
||||||
|
level: "info"
|
||||||
|
# log filename icloud.log (default)
|
||||||
|
filename: "icloud.log"
|
||||||
|
credentials:
|
||||||
|
# iCloud drive username
|
||||||
|
username: "{{ icloud_backup_username }}"
|
||||||
|
# Retry login interval
|
||||||
|
retry_login_interval: 3600 # 1 hour
|
||||||
|
root: "icloud"
|
||||||
|
smtp:
|
||||||
|
# If you want to receive email notifications about expired/missing 2FA credentials then uncomment
|
||||||
|
email: "{{ email_smtp_account }}"
|
||||||
|
# optional, to email address. Default is sender email.
|
||||||
|
#to: "receiver@test.com"
|
||||||
|
password: "{{ icloud_backup_smtp_password }}"
|
||||||
|
host: "{{ email_smtp_host }}"
|
||||||
|
port: {{ email_smtp_port_starttls }}
|
||||||
|
# If your email provider doesn't handle TLS
|
||||||
|
no_tls: false
|
||||||
|
drive:
|
||||||
|
destination: "drive"
|
||||||
|
remove_obsolete: true
|
||||||
|
sync_interval: 172800 # 2 days
|
||||||
|
filters:
|
||||||
|
# File filters to be included in syncing iCloud drive content
|
||||||
|
folders:
|
||||||
|
- "Scanner By Readdle"
|
||||||
|
- "Documents by Readdle"
|
||||||
|
# - "folder3"
|
||||||
|
file_extensions:
|
||||||
|
# File extensions to be included
|
||||||
|
- "pdf"
|
||||||
|
- "png"
|
||||||
|
- "jpg"
|
||||||
|
- "jpeg"
|
||||||
|
- "xls"
|
||||||
|
- "xlsx"
|
||||||
|
- "docx"
|
||||||
|
- "pptx"
|
||||||
|
- "txt"
|
||||||
|
- "md"
|
||||||
|
- "html"
|
||||||
|
- "htm"
|
||||||
|
- "css"
|
||||||
|
- "js"
|
||||||
|
- "json"
|
||||||
|
- "xml"
|
||||||
|
- "yaml"
|
||||||
|
- "yml"
|
||||||
|
- "csv"
|
||||||
|
- "mp3"
|
||||||
|
- "mp4"
|
||||||
|
- "mov"
|
||||||
|
- "wav"
|
||||||
|
- "mkv"
|
||||||
|
- "m4a"
|
||||||
|
photos:
|
||||||
|
destination: "photos"
|
||||||
|
remove_obsolete: true
|
||||||
|
sync_interval: 172800 # 2 days
|
||||||
|
all_albums: false # Optional, default false. If true preserve album structure. If same photo is in multiple albums creates duplicates on filesystem
|
||||||
|
folder_format: "%Y-%m" # optional, if set put photos in subfolders according to format. Cheatsheet - https://strftime.org
|
||||||
|
filters:
|
||||||
|
albums:
|
||||||
|
# - "album1"
|
||||||
|
file_sizes: # valid values are original, medium and/or thumb
|
||||||
|
- "original"
|
||||||
|
# - "medium"
|
||||||
|
# - "thumb"
|
||||||
|
EOH
|
||||||
|
} // template data
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 900 # MHz
|
||||||
|
memory = 100 # MB
|
||||||
|
} // resources
|
||||||
|
|
||||||
|
} // task
|
||||||
|
|
||||||
|
|
||||||
} // group
|
} // group
|
||||||
|
|
||||||
|
|
||||||
} // job
|
} // job
|
||||||
|
|||||||
@@ -137,6 +137,7 @@ job "pihole" {
|
|||||||
service {
|
service {
|
||||||
name = "piholeDNStcp"
|
name = "piholeDNStcp"
|
||||||
port = "dns"
|
port = "dns"
|
||||||
|
provider = "nomad"
|
||||||
check {
|
check {
|
||||||
type = "tcp"
|
type = "tcp"
|
||||||
port = "dns"
|
port = "dns"
|
||||||
|
|||||||
27
templates/nomad_jobs/remove_nzbs.hcl
Normal file
27
templates/nomad_jobs/remove_nzbs.hcl
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
job "remove_nzbs" {
|
||||||
|
region = "global"
|
||||||
|
datacenters = ["{{ datacenter_name }}"]
|
||||||
|
type = "batch"
|
||||||
|
|
||||||
|
constraint {
|
||||||
|
attribute = "${node.unique.name}"
|
||||||
|
operator = "regexp"
|
||||||
|
value = "rpi"
|
||||||
|
}
|
||||||
|
|
||||||
|
periodic {
|
||||||
|
cron = "*/15 * * * * *"
|
||||||
|
prohibit_overlap = true
|
||||||
|
time_zone = "America/New_York"
|
||||||
|
}
|
||||||
|
|
||||||
|
task "remove_nzbs" {
|
||||||
|
driver = "raw_exec"
|
||||||
|
config {
|
||||||
|
command = "/home/pi/.pyenv/shims/python"
|
||||||
|
args = ["/home/pi/repos/bin/bin-sabnzbd/removeNZBs.py"]
|
||||||
|
}
|
||||||
|
|
||||||
|
} // /task do_backups
|
||||||
|
|
||||||
|
} //job
|
||||||
Reference in New Issue
Block a user