From feb1fbedf435c959671a9edb1107ebab604bc7a7 Mon Sep 17 00:00:00 2001 From: Nathaniel Landau Date: Tue, 9 Jan 2024 08:53:40 -0500 Subject: [PATCH] fix: update job files --- default_variables.yml | 1 + .../synology_sabnzbd.yml.j2 | 4 +- .../nomad_jobs/backup_local_filesystems.hcl | 32 +- templates/nomad_jobs/gitea.hcl | 377 ++++++++++-------- templates/nomad_jobs/icloud_backup.hcl | 250 ++++++------ templates/nomad_jobs/pihole.hcl | 1 + templates/nomad_jobs/remove_nzbs.hcl | 27 ++ 7 files changed, 389 insertions(+), 303 deletions(-) create mode 100644 templates/nomad_jobs/remove_nzbs.hcl diff --git a/default_variables.yml b/default_variables.yml index ffea5ea..6731fe2 100644 --- a/default_variables.yml +++ b/default_variables.yml @@ -13,6 +13,7 @@ tdarr_installer_version: 2.00.13 telegraf_version: 1.28.4 traefik_version: 2.10.7 valentina_version: 2.1.0 +sabnzbd_version: 4.2.1 # ---------------------------------- SERVICE STATIC PORT MAPPINGS influxdb_port: "8086" diff --git a/templates/docker_compose_files/synology_sabnzbd.yml.j2 b/templates/docker_compose_files/synology_sabnzbd.yml.j2 index 658465d..f08e3a4 100644 --- a/templates/docker_compose_files/synology_sabnzbd.yml.j2 +++ b/templates/docker_compose_files/synology_sabnzbd.yml.j2 @@ -2,7 +2,7 @@ version: '3.9' services: sabnzbd: - image: ghcr.io/linuxserver/sabnzbd + image: ghcr.io/linuxserver/sabnzbd:{{ sabnzbd_version }} hostname: sabnzbd container_name: sabnzbd network_mode: "bridge" @@ -10,7 +10,7 @@ services: - "TZ=America/New_York" - "PGID=101" - "PUID={{ ansible_user_uid }}" - - "DOCKER_MODS=linuxserver/mods:universal-cron" + #- "DOCKER_MODS=linuxserver/mods:universal-cron" volumes: - /var/services/homes/{{ my_username }}:/{{ my_username }} - /volume1/nate:/nate diff --git a/templates/nomad_jobs/backup_local_filesystems.hcl b/templates/nomad_jobs/backup_local_filesystems.hcl index 69ae7fe..357a16c 100644 --- a/templates/nomad_jobs/backup_local_filesystems.hcl +++ b/templates/nomad_jobs/backup_local_filesystems.hcl @@ -1,21 +1,21 @@ job "backup_local_filesystems" { - region = "global" - datacenters = ["{{ datacenter_name }}"] - type = "sysbatch" + region = "global" + datacenters = ["{{ datacenter_name }}"] + type = "sysbatch" - periodic { - cron = "0 */8 * * * *" - prohibit_overlap = true - time_zone = "America/New_York" - } - - task "do_backups" { - driver = "raw_exec" - config { - # When running a binary that exists on the host, the path must be absolute - command = "${meta.backupCommand}" - args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"] + periodic { + cron = "0 */8 * * * *" + prohibit_overlap = true + time_zone = "America/New_York" } - } // /task do_backups + + task "do_backups" { + driver = "raw_exec" + config { + # When running a binary that exists on the host, the path must be absolute + command = "${meta.backupCommand}" + args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"] + } + } // /task do_backups } //job diff --git a/templates/nomad_jobs/gitea.hcl b/templates/nomad_jobs/gitea.hcl index 92f34b3..0cc33c0 100644 --- a/templates/nomad_jobs/gitea.hcl +++ b/templates/nomad_jobs/gitea.hcl @@ -77,7 +77,7 @@ job "gitea" { GITEA__mailer__SMTP_PORT = "{{ email_smtp_port_starttls }}" GITEA__mailer__SUBJECT_PREFIX = "[Gitea]" GITEA__mailer__USER = "{{ email_smtp_account }}" - GITEA__repository__DEFAULT_REPO_UNITS = "repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages,repo.actions" + GITEA__repository__DEFAULT_REPO_UNITS = "repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages" # add `repo.actions` to the list if enabling actions GITEA__server__DOMAIN = "{{ homelab_domain_name }}" GITEA__server__ROOT_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}" GITEA__server__SSH_DOMAIN = "${NOMAD_JOB_NAME}.{{ homelab_domain_name }}" @@ -105,15 +105,15 @@ job "gitea" { service { port = "webui" - name = "${NOMAD_TASK_NAME}" + name = "${NOMAD_JOB_NAME}" provider = "nomad" tags = [ "traefik.enable=true", - "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)", - "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure", - "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}", - "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true", - "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare" + "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)", + "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure", + "traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}", + "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true", + "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare" ] check { @@ -173,181 +173,232 @@ job "gitea" { } // group - group "action-runners" { + // group "action-runners" { - // constraint { - // attribute = "${node.unique.name}" - // operator = "regexp" - // value = "rpi2" - // } + // constraint { + // attribute = "${node.unique.name}" + // operator = "regexp" + // value = "macmini" + // } - constraint { - distinct_hosts = true - } + // constraint { + // distinct_hosts = true + // } - count = 3 + // count = 1 - restart { - attempts = 0 - delay = "30s" - } + // restart { + // attempts = 0 + // delay = "30s" + // } - network { - port "cache" { - to = "8088" - } - } + // network { + // port "cache" { + // to = "8088" + // } + // } - task "gitea-action-runner" { + // task "await-gitea" { - env { - CONFIG_FILE = "/local/config.yml" - GITEA_INSTANCE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}" - GITEA_RUNNER_NAME = "${node.unique.name}-action-runner" - GITEA_RUNNER_REGISTRATION_TOKEN = "{{ gitea_runner_registration_token }}" - PGID = "${meta.PGID}" - PUID = "${meta.PUID}" - TZ = "America/New_York" - } + // lifecycle { + // hook = "prestart" + // sidecar = false + // } - driver = "docker" - config { - image = "gitea/act_runner:latest" - image_pull_timeout = "10m" - hostname = "${NOMAD_TASK_NAME}" - volumes = [ - "/var/run/docker.sock:/var/run/docker.sock" - ] - ports = ["cache"] - } // docker config + // driver = "docker" - template { - destination = "local/config.yml" - env = false - change_mode = "noop" - data = <<-EOH - log: - # The level of logging, can be trace, debug, info, warn, error, fatal - level: info + // config { + // image = "busybox:latest" + // command = "/bin/sh" + // args = [ + // "-c", + // "chmod 755 /local/ping.sh && /local/ping.sh" + // ] + // network_mode = "host" + // } - runner: - # Where to store the registration result. - file: .runner - # Execute how many tasks concurrently at the same time. - capacity: 1 - # Extra environment variables to run jobs. - envs: - A_TEST_ENV_NAME_1: a_test_env_value_1 - A_TEST_ENV_NAME_2: a_test_env_value_2 - # Extra environment variables to run jobs from a file. - # It will be ignored if it's empty or the file doesn't exist. - env_file: .env - # The timeout for a job to be finished. - # Please note that the Gitea instance also has a timeout (3h by default) for the job. - # So the job could be stopped by the Gitea instance if it's timeout is shorter than this. - timeout: 3h - # Whether skip verifying the TLS certificate of the Gitea instance. - insecure: false - # The timeout for fetching the job from the Gitea instance. - fetch_timeout: 5s - # The interval for fetching the job from the Gitea instance. - fetch_interval: 2s - # The labels of a runner are used to determine which jobs the runner can run, and how to run them. - # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"] - # If it's empty when registering, it will ask for inputting labels. - # If it's empty when execute `daemon`, will use labels in `.runner` file. - labels: [] + // template { + // destination = "local/ping.sh" + // change_mode = "restart" + // data = <<-EOH + // #!/bin/sh + // {% raw -%} + // {{ range nomadService "gitea" }} + // IP="{{ .Address }}" + // PORT="{{ .Port }}" + // {{ end }} + // {% endraw -%} - cache: - # Enable cache server to use actions/cache. - enabled: false - # The directory to store the cache data. - # If it's empty, the cache data will be stored in $HOME/.cache/actcache. - dir: "" - # The host of the cache server. - # It's not for the address to listen, but the address to connect from job containers. - # So 0.0.0.0 is a bad choice, leave it empty to detect automatically. - {% raw %}host: "{{ env "NOMAD_IP_cache" }}"{% endraw +%} - # The port of the cache server. - {% raw %}port: {{ env "NOMAD_HOST_PORT_cache" }}{% endraw +%} - # The external cache server URL. Valid only when enable is true. - # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself. - # The URL should generally end with "/". - external_server: "" + // until [ -n "${IP}" ] && [ -n "${PORT}" ]; do + // echo "Waiting for Nomad to populate the service information..." + // sleep 1 + // done - container: - # Specifies the network to which the container will connect. - # Could be host, bridge or the name of a custom network. - # If it's empty, act_runner will create a network automatically. - network: "" - # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker). - privileged: false - # And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway). - options: - # The parent directory of a job's working directory. - # If it's empty, /workspace will be used. - workdir_parent: - # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob - # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted. - # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to: - # valid_volumes: - # - data - # - /src/*.json - # If you want to allow any volume, please use the following configuration: - # valid_volumes: - # - '**' - valid_volumes: - - '**' - # overrides the docker client host with the specified one. - # If it's empty, act_runner will find an available docker host automatically. - # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers. - # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work. - docker_host: "" - # Pull docker image(s) even if already present - force_pull: false + // echo "Waiting for Gitea to start..." - host: - # The parent directory of a job's working directory. - # If it's empty, $HOME/.cache/act/ will be used. - workdir_parent: - EOH - } + // until nc -z "${IP}" "${PORT}"; do + // echo "'nc -z ${IP} ${PORT}' is unavailable..." + // sleep 1 + // done - // service { - // port = "cache" - // name = "${NOMAD_TASK_NAME}" - // provider = "nomad" - // tags = [ - // "traefik.enable=true", - // "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)", - // "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure", - // "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}", - // "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true", - // "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare", - // "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file" - // ] + // echo "Gitea is up! Found at ${IP}:${PORT}" - // check { - // type = "tcp" - // port = "cache" - // interval = "30s" - // timeout = "4s" - // } + // EOH + // } - // check_restart { - // limit = 0 - // grace = "1m" - // } + // } - // } // service + // task "gitea-action-runner" { - // resources { - // cpu = 100 # MHz - // memory = 300 # MB - // } // resources + // env { + // CONFIG_FILE = "/local/config.yml" + // GITEA_INSTANCE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}" + // GITEA_RUNNER_NAME = "${node.unique.name}-action-runner" + // GITEA_RUNNER_REGISTRATION_TOKEN = "{{ gitea_runner_registration_token }}" + // PGID = "${meta.PGID}" + // PUID = "${meta.PUID}" + // TZ = "America/New_York" + // } - } // task gitea-action-runner + // driver = "docker" + // config { + // image = "gitea/act_runner:latest" + // image_pull_timeout = "10m" + // hostname = "${NOMAD_TASK_NAME}" + // volumes = [ + // "${meta.nfsStorageRoot}/pi-cluster/gitea-action-runners:/data", + // "/var/run/docker.sock:/var/run/docker.sock" + // ] + // ports = ["cache"] + // } // docker config - } // group action-runners + // template { + // destination = "local/config.yml" + // env = false + // change_mode = "noop" + // data = <<-EOH + // log: + // # The level of logging, can be trace, debug, info, warn, error, fatal + // level: info + + // runner: + // # Where to store the registration result. + // {% raw %}file: .runner-{{ env "node.unique.name" }}{% endraw +%} + // # Execute how many tasks concurrently at the same time. + // capacity: 1 + // # Extra environment variables to run jobs. + // envs: + // A_TEST_ENV_NAME_1: a_test_env_value_1 + // A_TEST_ENV_NAME_2: a_test_env_value_2 + // # Extra environment variables to run jobs from a file. + // # It will be ignored if it's empty or the file doesn't exist. + // env_file: .env + // # The timeout for a job to be finished. + // # Please note that the Gitea instance also has a timeout (3h by default) for the job. + // # So the job could be stopped by the Gitea instance if it's timeout is shorter than this. + // timeout: 3h + // # Whether skip verifying the TLS certificate of the Gitea instance. + // insecure: false + // # The timeout for fetching the job from the Gitea instance. + // fetch_timeout: 5s + // # The interval for fetching the job from the Gitea instance. + // fetch_interval: 2s + // # The labels of a runner are used to determine which jobs the runner can run, and how to run them. + // # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"] + // # If it's empty when registering, it will ask for inputting labels. + // # If it's empty when execute `daemon`, will use labels in `.runner` file. + // labels: [] + + // cache: + // # Enable cache server to use actions/cache. + // enabled: false + // # The directory to store the cache data. + // # If it's empty, the cache data will be stored in $HOME/.cache/actcache. + // dir: "" + // # The host of the cache server. + // # It's not for the address to listen, but the address to connect from job containers. + // # So 0.0.0.0 is a bad choice, leave it empty to detect automatically. + // {% raw %}host: "{{ env "NOMAD_IP_cache" }}"{% endraw +%} + // # The port of the cache server. + // {% raw %}port: {{ env "NOMAD_HOST_PORT_cache" }}{% endraw +%} + // # The external cache server URL. Valid only when enable is true. + // # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself. + // # The URL should generally end with "/". + // external_server: "" + + // container: + // # Specifies the network to which the container will connect. + // # Could be host, bridge or the name of a custom network. + // # If it's empty, act_runner will create a network automatically. + // network: "" + // # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker). + // privileged: false + // # And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway). + // options: + // # The parent directory of a job's working directory. + // # If it's empty, /workspace will be used. + // workdir_parent: + // # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob + // # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted. + // # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to: + // # valid_volumes: + // # - data + // # - /src/*.json + // # If you want to allow any volume, please use the following configuration: + // # valid_volumes: + // # - '**' + // valid_volumes: + // - '**' + // # overrides the docker client host with the specified one. + // # If it's empty, act_runner will find an available docker host automatically. + // # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers. + // # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work. + // docker_host: "" + // # Pull docker image(s) even if already present + // force_pull: false + + // host: + // # The parent directory of a job's working directory. + // # If it's empty, $HOME/.cache/act/ will be used. + // workdir_parent: + // EOH + // } + + // // service { + // // port = "cache" + // // name = "${NOMAD_TASK_NAME}" + // // provider = "nomad" + // // tags = [ + // // "traefik.enable=true", + // // "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)", + // // "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure", + // // "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}", + // // "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true", + // // "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare", + // // "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file" + // // ] + + // // check { + // // type = "tcp" + // // port = "cache" + // // interval = "30s" + // // timeout = "4s" + // // } + + // // check_restart { + // // limit = 0 + // // grace = "1m" + // // } + + // // } // service + + // resources { + // cpu = 400 # MHz + // memory = 600 # MB + // } // resources + + // } // task gitea-action-runner + + // } // group action-runners } // job diff --git a/templates/nomad_jobs/icloud_backup.hcl b/templates/nomad_jobs/icloud_backup.hcl index e9a82af..7acf264 100644 --- a/templates/nomad_jobs/icloud_backup.hcl +++ b/templates/nomad_jobs/icloud_backup.hcl @@ -3,143 +3,149 @@ job "icloud_backup" { datacenters = ["{{ datacenter_name }}"] type = "service" + // Need to authenticate within the container by running + // icloud --username= --session-directory=/app/session_data + // and then entering the 2FA code that is sent to the user associated with the iCloud account. + // constraint { // attribute = "${node.unique.name}" // operator = "regexp" // value = "rpi(1|2|3)" // } - update { - max_parallel = 1 - health_check = "checks" - min_healthy_time = "10s" - healthy_deadline = "5m" - progress_deadline = "10m" - auto_revert = true - canary = 0 - stagger = "30s" - } - - group "icloud_backup" { - - count = 1 - - restart { - attempts = 0 - delay = "30s" + update { + max_parallel = 1 + health_check = "checks" + min_healthy_time = "10s" + healthy_deadline = "5m" + progress_deadline = "10m" + auto_revert = true + canary = 0 + stagger = "30s" } - task "icloud_backup" { + group "icloud_backup" { - env { - PUID = "${meta.PUID}" - PGID = "${meta.PGID}" - TZ = "America/New_York" - // ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment. - } + count = 1 - driver = "docker" - config { - image = "mandarons/icloud-drive" - hostname = "${NOMAD_TASK_NAME}" - volumes = [ - "${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud", - "${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data", - "local/icloud_backup.yaml:/app/config.yaml", - "/etc/timezone:/etc/timezone:ro", - "/etc/localtime:/etc/localtime:ro" - ] - } // docker config + restart { + attempts = 0 + delay = "30s" + } - template { - destination = "local/icloud_backup.yaml" - env = false - change_mode = "restart" - perms = "644" - data = <<-EOH - app: - logger: - # level - debug, info (default), warning, or error - level: "info" - # log filename icloud.log (default) - filename: "icloud.log" - credentials: - # iCloud drive username - username: "{{ icloud_backup_username }}" - # Retry login interval - retry_login_interval: 3600 # 1 hour - # Drive destination - root: "icloud" - smtp: - # If you want to receive email notifications about expired/missing 2FA credentials then uncomment - email: "{{ email_smtp_account }}" - # optional, to email address. Default is sender email. - #to: "receiver@test.com" - password: "{{ icloud_backup_smtp_password }}" - host: "{{ email_smtp_host }}" - port: {{ email_smtp_port_starttls }} - # If your email provider doesn't handle TLS - no_tls: false - drive: - destination: "drive" - remove_obsolete: true - sync_interval: 172800 # 2 days - filters: - # File filters to be included in syncing iCloud drive content - folders: - - "Scanner By Readdle" - - "Documents by Readdle" - # - "folder3" - file_extensions: - # File extensions to be included - - "pdf" - - "png" - - "jpg" - - "jpeg" - - "xls" - - "xlsx" - - "docx" - - "pptx" - - "txt" - - "md" - - "html" - - "htm" - - "css" - - "js" - - "json" - - "xml" - - "yaml" - - "yml" - - "csv" - - "mp3" - - "mp4" - - "mov" - - "wav" - - "mkv" - - "m4a" - photos: - destination: "photos" - remove_obsolete: true - sync_interval: 172800 # 2 days - filters: - albums: - # - "album1" - file_sizes: # valid values are original, medium and/or thumb - - "original" - # - "medium" - # - "thumb" - EOH - } // template data + task "icloud_backup" { - resources { - cpu = 900 # MHz - memory = 100 # MB - } // resources + env { + ENV_CONFIG_FILE_PATH = "/local/icloud_backup.yaml" + PGID = "${meta.PGID}" + PUID = "${meta.PUID}" + TZ = "America/New_York" + // ENV_ICLOUD_PASSWORD = "[icloud password]" # 2FA renders this env var useless at the moment. + } - } // task + driver = "docker" + config { + image = "mandarons/icloud-drive" + hostname = "${NOMAD_TASK_NAME}" + volumes = [ + "${meta.nfsStorageRoot}/nate/icloud_backup:/app/icloud", + "${meta.nfsStorageRoot}/pi-cluster/icloud_backup/session_data:/app/session_data", + "/etc/timezone:/etc/timezone:ro", + "/etc/localtime:/etc/localtime:ro" + ] + } // docker config + + template { + destination = "local/icloud_backup.yaml" + env = false + change_mode = "restart" + perms = "644" + data = <<-EOH + --- + app: + logger: + # level - debug, info (default), warning, or error + level: "info" + # log filename icloud.log (default) + filename: "icloud.log" + credentials: + # iCloud drive username + username: "{{ icloud_backup_username }}" + # Retry login interval + retry_login_interval: 3600 # 1 hour + root: "icloud" + smtp: + # If you want to receive email notifications about expired/missing 2FA credentials then uncomment + email: "{{ email_smtp_account }}" + # optional, to email address. Default is sender email. + #to: "receiver@test.com" + password: "{{ icloud_backup_smtp_password }}" + host: "{{ email_smtp_host }}" + port: {{ email_smtp_port_starttls }} + # If your email provider doesn't handle TLS + no_tls: false + drive: + destination: "drive" + remove_obsolete: true + sync_interval: 172800 # 2 days + filters: + # File filters to be included in syncing iCloud drive content + folders: + - "Scanner By Readdle" + - "Documents by Readdle" + # - "folder3" + file_extensions: + # File extensions to be included + - "pdf" + - "png" + - "jpg" + - "jpeg" + - "xls" + - "xlsx" + - "docx" + - "pptx" + - "txt" + - "md" + - "html" + - "htm" + - "css" + - "js" + - "json" + - "xml" + - "yaml" + - "yml" + - "csv" + - "mp3" + - "mp4" + - "mov" + - "wav" + - "mkv" + - "m4a" + photos: + destination: "photos" + remove_obsolete: true + sync_interval: 172800 # 2 days + all_albums: false # Optional, default false. If true preserve album structure. If same photo is in multiple albums creates duplicates on filesystem + folder_format: "%Y-%m" # optional, if set put photos in subfolders according to format. Cheatsheet - https://strftime.org + filters: + albums: + # - "album1" + file_sizes: # valid values are original, medium and/or thumb + - "original" + # - "medium" + # - "thumb" + EOH + } // template data + + resources { + cpu = 900 # MHz + memory = 100 # MB + } // resources + + } // task - } // group + } // group } // job diff --git a/templates/nomad_jobs/pihole.hcl b/templates/nomad_jobs/pihole.hcl index 0307c67..f28ce1e 100644 --- a/templates/nomad_jobs/pihole.hcl +++ b/templates/nomad_jobs/pihole.hcl @@ -137,6 +137,7 @@ job "pihole" { service { name = "piholeDNStcp" port = "dns" + provider = "nomad" check { type = "tcp" port = "dns" diff --git a/templates/nomad_jobs/remove_nzbs.hcl b/templates/nomad_jobs/remove_nzbs.hcl new file mode 100644 index 0000000..eb2b8a7 --- /dev/null +++ b/templates/nomad_jobs/remove_nzbs.hcl @@ -0,0 +1,27 @@ +job "remove_nzbs" { + region = "global" + datacenters = ["{{ datacenter_name }}"] + type = "batch" + + constraint { + attribute = "${node.unique.name}" + operator = "regexp" + value = "rpi" + } + + periodic { + cron = "*/15 * * * * *" + prohibit_overlap = true + time_zone = "America/New_York" + } + + task "remove_nzbs" { + driver = "raw_exec" + config { + command = "/home/pi/.pyenv/shims/python" + args = ["/home/pi/repos/bin/bin-sabnzbd/removeNZBs.py"] + } + + } // /task do_backups + +} //job