Compare commits
21 Commits
bd815e96c6
...
renovate/c
| Author | SHA1 | Date | |
|---|---|---|---|
| 20f4c2eba8 | |||
| 1b73fda11f | |||
| 96ddfa6ec5 | |||
| 6c0b1c9281 | |||
| 4f2934411b | |||
| 54dbe0c667 | |||
| a30e60b557 | |||
| 2536e855e5 | |||
| dbe11dc8fa | |||
| b27f3e58ca | |||
| 5b3f2cf8f4 | |||
| 68cf58ead5 | |||
| d3ac8a252b | |||
| 961ec128f1 | |||
| d84eb73db0 | |||
| a3eaab5a07 | |||
| cf4daacab5 | |||
| 2a038e59e8 | |||
| 443d614a66 | |||
| 2803f694e8 | |||
| bbff0f6692 |
54
.gitea/workflows/ci.yml
Normal file
54
.gitea/workflows/ci.yml
Normal file
@@ -0,0 +1,54 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
terraform-validate:
|
||||
name: Terraform fmt + validate
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: hashicorp/setup-terraform@v3
|
||||
|
||||
- name: fmt check — 1-nixos-node
|
||||
run: terraform fmt -check -recursive
|
||||
working-directory: 1-nixos-node
|
||||
|
||||
- name: fmt check — 2-nomad-config
|
||||
run: terraform fmt -check -recursive
|
||||
working-directory: 2-nomad-config
|
||||
|
||||
- name: validate — 2-nomad-config (no backend)
|
||||
run: |
|
||||
terraform init -backend=false
|
||||
terraform validate
|
||||
working-directory: 2-nomad-config
|
||||
|
||||
nomad-validate:
|
||||
name: Nomad job spec validate
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Nomad CLI
|
||||
run: |
|
||||
curl -fsSL https://apt.releases.hashicorp.com/gpg | gpg --dearmor -o /usr/share/keyrings/hashicorp.gpg
|
||||
. /etc/os-release
|
||||
echo "deb [signed-by=/usr/share/keyrings/hashicorp.gpg] https://apt.releases.hashicorp.com ${VERSION_CODENAME} main" | tee /etc/apt/sources.list.d/hashicorp.list
|
||||
apt-get update && apt-get install -y nomad
|
||||
|
||||
- name: Validate all job specs
|
||||
env:
|
||||
NOMAD_ADDR: http://jaglan-beta-m20.lan:4646
|
||||
run: |
|
||||
find 2-nomad-config -name '*.nomad.hcl' | while read f; do
|
||||
echo "==> $f"
|
||||
nomad job validate "$f"
|
||||
done
|
||||
@@ -49,6 +49,9 @@
|
||||
preferred_address_family = "ipv4";
|
||||
%{if cpu_total_compute != null ~}
|
||||
cpu_total_compute = ${cpu_total_compute};
|
||||
%{endif ~}
|
||||
%{if node_class != null ~}
|
||||
node_class = "${node_class}";
|
||||
%{endif ~}
|
||||
host_volume = {
|
||||
%{ for volume in host_volumes ~}
|
||||
@@ -61,6 +64,7 @@
|
||||
cni_path = "$${pkgs.cni-plugins}/bin";
|
||||
};
|
||||
plugin.docker.config.allow_privileged = true;
|
||||
plugin.docker.config.volumes.enabled = true;
|
||||
};
|
||||
extraPackages = with pkgs; [
|
||||
cni-plugins
|
||||
@@ -113,6 +117,61 @@
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 8081 4646 4647 4648 8300 8301 8500 ];
|
||||
networking.firewall.allowedUDPPorts = [ 8301 ];
|
||||
|
||||
# Ensure Docker daemon is available (Nomad enableDocker only configures Nomad, does not guarantee docker service)
|
||||
virtualisation.docker.enable = true;
|
||||
|
||||
%{if node_class == "latte-panda-n150" ~}
|
||||
# Enable Intel iGPU (N150 UHD Graphics) for OpenVINO / VA-API workloads running in Docker
|
||||
hardware.graphics = {
|
||||
enable = true;
|
||||
extraPackages = with pkgs; [
|
||||
intel-media-driver # VA-API (iHD)
|
||||
intel-compute-runtime # OpenCL / oneAPI
|
||||
];
|
||||
};
|
||||
|
||||
%{endif ~}
|
||||
# Proper systemd service definition for macvlan network creation
|
||||
systemd.services.docker-macvlan-network = {
|
||||
description = "Ensure macvlan Docker network exists";
|
||||
after = [ "network-online.target" "docker.service" ];
|
||||
wants = [ "network-online.target" "docker.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
};
|
||||
# Provide required binaries in PATH
|
||||
path = [ pkgs.docker pkgs.bash pkgs.coreutils pkgs.iproute2 pkgs.gnugrep ];
|
||||
script = ''
|
||||
set -euo pipefail
|
||||
NET_NAME=macvlan
|
||||
if docker network inspect "$NET_NAME" >/dev/null 2>&1; then
|
||||
echo "Docker network $NET_NAME already exists"
|
||||
exit 0
|
||||
fi
|
||||
echo "Creating Docker macvlan network $NET_NAME on interface ${bind_interface}"
|
||||
# We intentionally do NOT use --ip-range here to avoid allocating the
|
||||
# same reserved pool on every host (which could lead to collisions if
|
||||
# multiple macvlan containers are started across nodes). Instead, we
|
||||
# give critical services (like UniFi) an explicit static IP via the
|
||||
# Nomad job (Docker static assignment) and rely on manual DHCP
|
||||
# reservations to prevent conflicts.
|
||||
#
|
||||
# If you later need multiple macvlan-assigned containers per host,
|
||||
# consider one of these strategies:
|
||||
# 1. Per-host distinct network name + ip-range slice (macvlan-m01, ...)
|
||||
# 2. Parameterize an ip-range per host in Terraform and template here
|
||||
# 3. Keep a registry of allocated static IPs in Consul KV / Nomad vars
|
||||
docker network create -d macvlan \
|
||||
--subnet=192.168.1.0/24 \
|
||||
--gateway=192.168.1.1 \
|
||||
-o parent=${bind_interface} \
|
||||
"$NET_NAME"
|
||||
echo "Docker macvlan network $NET_NAME created"
|
||||
'';
|
||||
restartIfChanged = false; # Don't rerun just because comment changed
|
||||
};
|
||||
|
||||
# Copy the NixOS configuration file and link it from the resulting system
|
||||
# (/run/current-system/configuration.nix). This is useful in case you
|
||||
# accidentally delete configuration.nix.
|
||||
|
||||
@@ -21,6 +21,7 @@ variable "nodes" {
|
||||
bind_interface = string
|
||||
bootstrap = optional(bool, false) # Optional field for bootstrap nodes
|
||||
cpu_total_compute = optional(number, null) # Optional field for CPU total compute
|
||||
node_class = optional(string, null) # Optional Nomad node_class for scheduling constraints
|
||||
host_volumes = list(string)
|
||||
}))
|
||||
}
|
||||
@@ -32,6 +33,7 @@ locals {
|
||||
bind_interface = v.bind_interface
|
||||
bootstrap = v.bootstrap
|
||||
cpu_total_compute = v.cpu_total_compute
|
||||
node_class = v.node_class
|
||||
host_volumes = v.host_volumes
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
29
2-nomad-config/.terraform.lock.hcl
generated
29
2-nomad-config/.terraform.lock.hcl
generated
@@ -17,18 +17,25 @@ provider "registry.terraform.io/carlpett/sops" {
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/cloudflare/cloudflare" {
|
||||
version = "5.5.0"
|
||||
constraints = "~> 5.0"
|
||||
version = "5.18.0"
|
||||
constraints = ">= 5.0.0"
|
||||
hashes = [
|
||||
"h1:wZhU174xytOMZ1t6uDUQiLtR/XKpi2RH9OzMz0XqP9Q=",
|
||||
"zh:178f29dee2edac39252780819f34004b1841770c61ee7fb5a625afaece6495cd",
|
||||
"zh:6faf26203167ae20ca5c8ece4a8bb1c4187137505058fb7b1a4bd5095823e648",
|
||||
"zh:97c91a95819336b8c41618919786ddd2dca643d28219d52af1d80b88018c6eec",
|
||||
"zh:bbc53670fc2613e3fe81b5bf7b8674c5ad083a206fa8af34f0f055a8d06b2d01",
|
||||
"zh:d305bcb01249ada21b80e5038e371f6ca0a60d95d7052df82456e4c4963f3bfc",
|
||||
"zh:e2f9db57ead7100676b790a3e4567d88443fae0e19127e66b3505210de93e4b5",
|
||||
"zh:eb8cef2e6cbf05237b8a2f229314ae12c792ed5f8f60fe180102bdf17dc30841",
|
||||
"zh:f51a5bb0130d2f42772988ee56723f176aa230701184a0f5598dbb1c7b4c3906",
|
||||
"h1:2FKT5YVLuHLmv7BnFxDC3UtipD3hSSrb0iJ9Ei2C/ks=",
|
||||
"h1:6FoKYTGqaCvKctMEm1Y1c06mmY3I04jhCBRXEYe6mcQ=",
|
||||
"h1:AhWro37kF118sAjRjIZ27CuV6kFpg1d+XYDo/7diyjU=",
|
||||
"h1:B9eoAx4QKNVuKHDahNl8JzuSLCCeIGAJiS0MckJu5wQ=",
|
||||
"h1:KfnaaT3RFoyWvPHsNVmsrCV7QEAGPLGNyHWT9IY+bxY=",
|
||||
"h1:SPFFA6LxyFkjpEnpWbyQVyVJVXxzP8RLpehfUMRXDp4=",
|
||||
"h1:VMUOof+Cf2h4asIe2lin7Fvf15mGWQ9mQYiuGhYM1aw=",
|
||||
"h1:ny17Q/ce8iuHxppA/yIuRpCkVDtqhE+LDynWtv9/qwI=",
|
||||
"zh:47e7bdfd8eddd2685f383269c0b6936ef62edd6d8383c8d7757b0cce0a689737",
|
||||
"zh:aa23eb6aa128667883cabc449ceca4072d0181f574cd727e08ebd6d69a4bfd48",
|
||||
"zh:c3da673e05d3bd933c82e2b6ba0f85aa23c5e24fadd3932f7c066314feeb65a3",
|
||||
"zh:c59f07c017fc78b79e80554a0737c9db2a2e681c3e46ff637942d28d1f1a3924",
|
||||
"zh:d559074612835a37fa684d8d7d0cf68911487b71f4067acc59069cb00bb8baf0",
|
||||
"zh:e12290a4eda757c183a4258230245dd170f0def389c37eb771db144ce3b382dd",
|
||||
"zh:ed47e484432ba1bbbb4802061f395ebd253ae8e20be9b72552d3d830fd2ca268",
|
||||
"zh:f35e08d468408697b3e7c4a7f548b874141ac8f8d395ab8edded322201cc7047",
|
||||
"zh:f809ab383cca0a5f83072981c64208cbd7fa67e986a86ee02dd2c82333221e32",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -18,27 +18,9 @@ resource "nomad_variable" "postgres" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_database_dump" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_database_dump"
|
||||
name = "unraid_database_dump"
|
||||
|
||||
external_id = "unraid_database_dump"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/database-dump"
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
module "unraid_smb_database_dump" {
|
||||
source = "../modules/unraid_smb"
|
||||
name = "dump"
|
||||
id = "unraid_database_dump"
|
||||
share = "database-dump"
|
||||
}
|
||||
|
||||
@@ -2,30 +2,3 @@
|
||||
module "ingress" {
|
||||
source = "./2-ingress"
|
||||
}
|
||||
|
||||
# traefik.tf
|
||||
|
||||
moved {
|
||||
from = cloudflare_dns_record.othrayte-one
|
||||
to = module.ingress.cloudflare_dns_record.othrayte-one
|
||||
}
|
||||
|
||||
moved {
|
||||
from = cloudflare_dns_record.star-othrayte-one
|
||||
to = module.ingress.cloudflare_dns_record.star-othrayte-one
|
||||
}
|
||||
|
||||
moved {
|
||||
from = nomad_variable.traefik
|
||||
to = module.ingress.nomad_variable.traefik
|
||||
}
|
||||
|
||||
moved {
|
||||
from = nomad_job.traefik
|
||||
to = module.ingress.nomad_job.traefik
|
||||
}
|
||||
|
||||
moved {
|
||||
from = nomad_csi_volume_registration.unraid_appdata_traefik
|
||||
to = module.ingress.nomad_csi_volume_registration.unraid_appdata_traefik
|
||||
}
|
||||
|
||||
@@ -63,6 +63,12 @@ job "authelia" {
|
||||
data = <<EOF
|
||||
server:
|
||||
address: tcp://0.0.0.0:{{ env "NOMAD_PORT_http" }}/
|
||||
endpoints:
|
||||
authz:
|
||||
forward-auth:
|
||||
implementation: 'ForwardAuth'
|
||||
authn_strategies:
|
||||
- name: 'CookieSession'
|
||||
theme: "auto"
|
||||
identity_validation:
|
||||
reset_password:
|
||||
|
||||
@@ -6,7 +6,7 @@ terraform {
|
||||
}
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "~> 5"
|
||||
version = ">= 5"
|
||||
}
|
||||
postgresql = {
|
||||
source = "cyrilgdn/postgresql"
|
||||
|
||||
@@ -112,6 +112,9 @@ EOF
|
||||
template {
|
||||
data = <<EOF
|
||||
http:
|
||||
serversTransports:
|
||||
ignorecert:
|
||||
insecureSkipVerify: true
|
||||
middlewares:
|
||||
auth:
|
||||
forwardAuth:
|
||||
@@ -154,11 +157,6 @@ http:
|
||||
service: unraid
|
||||
middlewares:
|
||||
- auth
|
||||
frigate:
|
||||
rule: "Host(`frigate.othrayte.one`)"
|
||||
service: frigate
|
||||
middlewares:
|
||||
- auth
|
||||
kopia:
|
||||
rule: "Host(`kopia.othrayte.one`)"
|
||||
service: kopia
|
||||
@@ -173,6 +171,11 @@ http:
|
||||
hass-token:
|
||||
rule: "Host(`${hass_magic_token}-hass.othrayte.one`)"
|
||||
service: hass
|
||||
unifi-network:
|
||||
rule: "Host(`network.othrayte.one`)"
|
||||
service: unifi-network
|
||||
middlewares:
|
||||
- auth
|
||||
|
||||
services:
|
||||
traefik:
|
||||
@@ -190,19 +193,20 @@ http:
|
||||
unraid:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.192:80"
|
||||
frigate:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.192:5000"
|
||||
- url: "http://betelgeuse-seven-unraid.lan:80"
|
||||
kopia:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.192:51515"
|
||||
- url: "http://betelgeuse-seven-unraid.lan:51515"
|
||||
hass:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://192.168.1.234:8123"
|
||||
unifi-network:
|
||||
loadBalancer:
|
||||
serversTransport: ignorecert
|
||||
servers:
|
||||
- url: "https://192.168.1.50:8443"
|
||||
EOF
|
||||
|
||||
destination = "local/configs/nomad.yml"
|
||||
|
||||
@@ -32,28 +32,8 @@ resource "nomad_job" "traefik" {
|
||||
})
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_appdata_traefik" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_appdata_traefik"
|
||||
name = "unraid_appdata_traefik"
|
||||
|
||||
external_id = "unraid_appdata_traefik"
|
||||
|
||||
capability {
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/appdata"
|
||||
subDir = "traefik" # Note: Needs to be manually created on the share
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
module "appdata_traefik" {
|
||||
source = "../modules/appdata"
|
||||
name = "traefik"
|
||||
access_mode = "multi-node-multi-writer"
|
||||
}
|
||||
|
||||
66
2-nomad-config/act-runner.nomad.hcl
Normal file
66
2-nomad-config/act-runner.nomad.hcl
Normal file
@@ -0,0 +1,66 @@
|
||||
job "act-runner" {
|
||||
group "act-runner" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
# Consul Connect upstream to Gitea so the runner can register and receive jobs
|
||||
service {
|
||||
name = "act-runner"
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "code-connect"
|
||||
local_bind_port = 3000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "act-runner" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "gitea/act_runner:latest"
|
||||
volumes = ["/var/run/docker.sock:/var/run/docker.sock"]
|
||||
}
|
||||
|
||||
env = {
|
||||
GITEA_INSTANCE_URL = "https://gitea-1ef0bea6b75a4fd3e9393a9f7f7e4b02.othrayte.one"
|
||||
CONFIG_FILE = "/secrets/runner-config.yml"
|
||||
}
|
||||
|
||||
# Required SOPS key:
|
||||
# act-runner.registration_token — runner registration token from Gitea
|
||||
# Admin → Settings → Actions → Runners → Create new runner
|
||||
template {
|
||||
data = <<EOF
|
||||
GITEA_RUNNER_REGISTRATION_TOKEN={{ with nomadVar "nomad/jobs/act-runner" }}{{ .registration_token }}{{ end }}
|
||||
EOF
|
||||
destination = "secrets/runner.env"
|
||||
env = true
|
||||
}
|
||||
|
||||
# Limit which images/labels the runner will accept so it doesn't pick up
|
||||
# unrelated workloads if more runners are added later.
|
||||
template {
|
||||
data = <<EOF
|
||||
runner:
|
||||
labels:
|
||||
- "ubuntu-latest:docker://node:20-bookworm"
|
||||
- "ubuntu-22.04:docker://node:20-bookworm"
|
||||
- "ubuntu-24.04:docker://node:20-bookworm"
|
||||
EOF
|
||||
destination = "secrets/runner-config.yml"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 256
|
||||
memory_max = 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
10
2-nomad-config/act-runner.tf
Normal file
10
2-nomad-config/act-runner.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
resource "nomad_job" "act_runner" {
|
||||
jobspec = file("act-runner.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_variable" "act_runner" {
|
||||
path = "nomad/jobs/act-runner"
|
||||
items = {
|
||||
registration_token = data.sops_file.secrets.data["act-runner.registration_token"]
|
||||
}
|
||||
}
|
||||
145
2-nomad-config/deluge.nomad.hcl
Normal file
145
2-nomad-config/deluge.nomad.hcl
Normal file
@@ -0,0 +1,145 @@
|
||||
job "deluge" {
|
||||
group "deluge" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "http" {
|
||||
to = 8112
|
||||
}
|
||||
}
|
||||
|
||||
task "wireguard" {
|
||||
driver = "docker"
|
||||
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = true
|
||||
}
|
||||
|
||||
config {
|
||||
image = "thrnz/docker-wireguard-pia:latest"
|
||||
privileged = true
|
||||
ports = ["http"]
|
||||
}
|
||||
|
||||
env {
|
||||
LOC = "aus_melbourne"
|
||||
LOCAL_NETWORK = "192.168.1.0/24"
|
||||
# PORT_FORWARDING = "1" # TODO: Find a way to tell deluge the forwarded port, the wireguard container outputs it /pia-shared/port.dat
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
USER="{{ with nomadVar "nomad/jobs/deluge" }}{{ .pia_user }}{{ end }}"
|
||||
PASS="{{ with nomadVar "nomad/jobs/deluge" }}{{ .pia_pass }}{{ end }}"
|
||||
EOH
|
||||
destination = "secrets/pia.env"
|
||||
env = true # Load the file as environment variables
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 32
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Service for Traefik (external ingress)
|
||||
service {
|
||||
name = "deluge"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.deluge.middlewares=auth@file",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
# Service for Consul Connect (internal mesh communication)
|
||||
service {
|
||||
name = "deluge-api"
|
||||
port = "http"
|
||||
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
|
||||
|
||||
# tags = [
|
||||
# "traefik.enable=false",
|
||||
# ]
|
||||
|
||||
connect {
|
||||
sidecar_service {
|
||||
//tags = ["traefik.enable=false"]
|
||||
}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "deluge" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "lscr.io/linuxserver/deluge:latest"
|
||||
network_mode = "container:wireguard-${NOMAD_ALLOC_ID}" # Share namespace with VPN
|
||||
}
|
||||
|
||||
env {
|
||||
PUID = "1000"
|
||||
PGID = "1000"
|
||||
TZ = "Australia/Melbourne"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_deluge"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_media_deluge"
|
||||
destination = "/data/downloads"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 400
|
||||
memory = 2048
|
||||
memory_max = 3000
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_appdata_deluge" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_appdata_deluge"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["uid=1000", "gid=1000"]
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_media_deluge" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_media_deluge"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["uid=1000", "gid=1000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
24
2-nomad-config/deluge.tf
Normal file
24
2-nomad-config/deluge.tf
Normal file
@@ -0,0 +1,24 @@
|
||||
|
||||
resource "nomad_job" "deluge" {
|
||||
jobspec = file("deluge.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_variable" "deluge" {
|
||||
path = "nomad/jobs/deluge"
|
||||
items = {
|
||||
pia_user = data.sops_file.secrets.data["pia.user"]
|
||||
pia_pass = data.sops_file.secrets.data["pia.pass"]
|
||||
}
|
||||
}
|
||||
|
||||
module "appdata_deluge" {
|
||||
source = "./modules/appdata"
|
||||
name = "deluge"
|
||||
}
|
||||
|
||||
module "unraid_smb_deluge_media" {
|
||||
source = "./modules/unraid_smb"
|
||||
name = "deluge"
|
||||
share = "media"
|
||||
subDir = "downloads"
|
||||
}
|
||||
216
2-nomad-config/frigate.nomad.hcl
Normal file
216
2-nomad-config/frigate.nomad.hcl
Normal file
@@ -0,0 +1,216 @@
|
||||
job "frigate" {
|
||||
# Pin to N150 LattePanda nodes - Intel UHD iGPU for OpenVINO-accelerated detection.
|
||||
# hardware.graphics (intel-compute-runtime) is deployed to these nodes via configuration.nix.
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
value = "latte-panda-n150"
|
||||
}
|
||||
|
||||
group "frigate" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
port "http" {
|
||||
to = 5000
|
||||
}
|
||||
}
|
||||
|
||||
# Prestart: restore Frigate's SQLite DB from the Litestream file replica on the CIFS share.
|
||||
# Runs to completion before the frigate task starts. Safe on first boot (-if-replica-exists
|
||||
# is a no-op when no replica exists yet).
|
||||
task "litestream-restore" {
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "litestream/litestream:0.5.9"
|
||||
command = "restore"
|
||||
args = ["-if-replica-exists", "-config", "/local/litestream.yml", "/alloc/data/frigate.db"]
|
||||
}
|
||||
|
||||
# Litestream config: replicate to /config/frigate.db.litestream/ on the CIFS share.
|
||||
# Litestream writes its own segment format - no SQLite advisory locking involved.
|
||||
# Frigate must be configured with database.path: /alloc/data/frigate.db in config.yml.
|
||||
template {
|
||||
data = <<EOH
|
||||
dbs:
|
||||
- path: /alloc/data/frigate.db
|
||||
replicas:
|
||||
- url: file:///config/frigate.db.litestream
|
||||
EOH
|
||||
destination = "local/litestream.yml"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_frigate"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 64
|
||||
memory_max = 256
|
||||
}
|
||||
}
|
||||
|
||||
# Sidecar: continuously stream WAL changes from /alloc/data/frigate.db to the CIFS replica.
|
||||
# Runs alongside frigate for the lifetime of the allocation.
|
||||
task "litestream-replicate" {
|
||||
lifecycle {
|
||||
hook = "poststart"
|
||||
sidecar = true
|
||||
}
|
||||
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "litestream/litestream:0.5"
|
||||
command = "replicate"
|
||||
args = ["-config", "/local/litestream.yml"]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
dbs:
|
||||
- path: /alloc/data/frigate.db
|
||||
replicas:
|
||||
- url: file:///config/frigate.db.litestream
|
||||
EOH
|
||||
destination = "local/litestream.yml"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_frigate"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 64
|
||||
memory_max = 256
|
||||
}
|
||||
}
|
||||
|
||||
task "frigate" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "ghcr.io/blakeblackshear/frigate:0.17.1"
|
||||
ports = ["http"]
|
||||
privileged = true
|
||||
|
||||
# Shared memory for inter-process frame buffers (frigate forks detector processes).
|
||||
shm_size = 268435456 # 256 MiB
|
||||
|
||||
# Large tmpfs for decoded frame cache - avoids wearing out any storage.
|
||||
mounts = [
|
||||
{
|
||||
type = "tmpfs"
|
||||
target = "/tmp/cache"
|
||||
readonly = false
|
||||
tmpfs_options = {
|
||||
size = 1000000000 # 1 GiB in bytes
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Intel iGPU render node - Frigate's bundled OpenVINO runtime auto-detects
|
||||
# GPU device and uses it for object detection without any extra env vars.
|
||||
# Requires hardware.graphics.enable = true on the NixOS node (N150 nodes).
|
||||
devices = [
|
||||
{
|
||||
host_path = "/dev/dri/renderD128"
|
||||
container_path = "/dev/dri/renderD128"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# RTSP password injected from Nomad variables (sourced from sops secrets).
|
||||
# Reference in config.yml as: {FRIGATE_RTSP_PASSWORD}
|
||||
template {
|
||||
data = <<EOH
|
||||
FRIGATE_RTSP_PASSWORD="{{ with nomadVar "nomad/jobs/frigate" }}{{ .rtsp_password }}{{ end }}"
|
||||
EOH
|
||||
destination = "secrets/frigate.env"
|
||||
env = true
|
||||
}
|
||||
|
||||
service {
|
||||
name = "frigate"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.frigate.middlewares=auth@file",
|
||||
"traefik.http.routers.frigate-token.rule=Host(`n7gdph5cuh7bd1cakbq8s099rvrv3qhs-frigate.othrayte.one`)",
|
||||
]
|
||||
|
||||
check {
|
||||
name = "alive"
|
||||
type = "http"
|
||||
path = "/api/version"
|
||||
port = "http"
|
||||
interval = "10s"
|
||||
timeout = "5s"
|
||||
}
|
||||
}
|
||||
|
||||
env {
|
||||
TZ = "Australia/Melbourne"
|
||||
}
|
||||
|
||||
# config.yml lives here (read from CIFS). SQLite DB is at /alloc/data/frigate.db
|
||||
# (local NVMe, managed by Litestream). Requires in config.yml:
|
||||
# database:
|
||||
# path: /alloc/data/frigate.db
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_frigate"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
# Recordings, clips, and exports.
|
||||
volume_mount {
|
||||
volume = "unraid_media_frigate"
|
||||
destination = "/media/frigate"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
# GPU handles inference; CPU manages stream ingestion, motion detection, and recording.
|
||||
cpu = 2000
|
||||
memory = 2048
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_appdata_frigate" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_appdata_frigate"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["nobrl", "uid=0", "gid=0"]
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_media_frigate" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_media_frigate"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["nobrl", "uid=0", "gid=0"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
23
2-nomad-config/frigate.tf
Normal file
23
2-nomad-config/frigate.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
|
||||
resource "nomad_job" "frigate" {
|
||||
jobspec = file("frigate.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_variable" "frigate" {
|
||||
path = "nomad/jobs/frigate"
|
||||
items = {
|
||||
rtsp_password = data.sops_file.secrets.data["frigate.rtsp_password"]
|
||||
}
|
||||
}
|
||||
|
||||
module "appdata_frigate" {
|
||||
source = "./modules/appdata"
|
||||
name = "frigate"
|
||||
}
|
||||
|
||||
module "unraid_smb_frigate_media" {
|
||||
source = "./modules/unraid_smb"
|
||||
name = "frigate"
|
||||
share = "media"
|
||||
subDir = "frigate"
|
||||
}
|
||||
@@ -27,6 +27,8 @@ job "gitea" {
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.gitea.middlewares=auth@file",
|
||||
# Token subdomain — no auth middleware — used by act_runner step containers for git checkout
|
||||
"traefik.http.routers.gitea-token.rule=Host(`gitea-1ef0bea6b75a4fd3e9393a9f7f7e4b02.othrayte.one`)",
|
||||
]
|
||||
|
||||
check {
|
||||
@@ -37,6 +39,17 @@ job "gitea" {
|
||||
}
|
||||
}
|
||||
|
||||
# Separate service for Consul Connect ingress (address_mode=alloc avoids hairpin NAT issue)
|
||||
service {
|
||||
name = "code-connect"
|
||||
port = "http"
|
||||
address_mode = "alloc"
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
||||
|
||||
task "gitea" {
|
||||
driver = "docker"
|
||||
|
||||
|
||||
@@ -33,28 +33,7 @@ resource "postgresql_database" "gitea" {
|
||||
owner = postgresql_role.gitea.name
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_appdata_gitea" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_appdata_gitea"
|
||||
name = "unraid_appdata_gitea"
|
||||
|
||||
external_id = "unraid_appdata_gitea"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/appdata"
|
||||
subDir = "gitea" # Note: Needs to be manually created on the share
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
module "appdata_gitea" {
|
||||
source = "./modules/appdata"
|
||||
name = "gitea"
|
||||
}
|
||||
|
||||
@@ -34,107 +34,29 @@ resource "nomad_variable" "immich" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_appdata_immich" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_appdata_immich"
|
||||
name = "unraid_appdata_immich"
|
||||
|
||||
external_id = "unraid_appdata_immich"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/appdata"
|
||||
subDir = "immich" # Note: Needs to be manually created on the share
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
module "appdata_immich" {
|
||||
source = "./modules/appdata"
|
||||
name = "immich"
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_media_photosvideos" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_media_photosvideos"
|
||||
name = "unraid_media_photosvideos"
|
||||
|
||||
external_id = "unraid_media_photosvideos"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/media"
|
||||
subDir = "Photos and Videos" # Note: Needs to be manually created on the share
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
module "unraid_smb_immich_photosvideos" {
|
||||
source = "./modules/unraid_smb"
|
||||
name = "photosvideos"
|
||||
share = "media"
|
||||
subDir = "Photos and Videos"
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_media_immich_encodedvideo" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_media_immich_encodedvideo"
|
||||
name = "unraid_media_immich_encodedvideo"
|
||||
|
||||
external_id = "unraid_media_immich_encodedvideo"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/media"
|
||||
subDir = "immich/encoded-video" # Note: Needs to be manually created on the share
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
module "unraid_smb_immich_encodedvideo" {
|
||||
source = "./modules/unraid_smb"
|
||||
name = "immich_encodedvideo"
|
||||
share = "media"
|
||||
subDir = "immich/encoded-video"
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_mediadump_photosvideos" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_mediadump_photosvideos"
|
||||
name = "unraid_mediadump_photosvideos"
|
||||
|
||||
external_id = "unraid_mediadump_photosvideos"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/media-dump"
|
||||
subDir = "Photos and Videos" # Note: Needs to be manually created on the share
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
module "unraid_smb_immich_mediadump_photosvideos" {
|
||||
source = "./modules/unraid_smb"
|
||||
name = "photosvideos"
|
||||
id = "unraid_mediadump_photosvideos"
|
||||
share = "media-dump"
|
||||
subDir = "Photos and Videos"
|
||||
}
|
||||
|
||||
|
||||
88
2-nomad-config/jellyfin.nomad.hcl
Normal file
88
2-nomad-config/jellyfin.nomad.hcl
Normal file
@@ -0,0 +1,88 @@
|
||||
job "jellyfin" {
|
||||
group "jellyfin" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
port "http" {
|
||||
to = 8096
|
||||
}
|
||||
}
|
||||
|
||||
task "jellyfin" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "lscr.io/linuxserver/jellyfin:latest"
|
||||
ports = ["http"]
|
||||
}
|
||||
|
||||
service {
|
||||
name = "jellyfin"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.jellyfin.middlewares=auth@file",
|
||||
"traefik.http.routers.jellyfin-token.rule=Host(`c3ll7nbevl5j4j8rcnfxnr95q48fuayz-jellyfin.othrayte.one`)",
|
||||
]
|
||||
|
||||
check {
|
||||
name = "alive"
|
||||
type = "tcp"
|
||||
port = "http"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
env {
|
||||
PUID = 1000
|
||||
PGID = 1000
|
||||
TZ = "Australia/Melbourne"
|
||||
|
||||
JELLYFIN_PublishedServerUrl = "https://jellyfin.othrayte.one"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_jellyfin"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_media_jellyfin"
|
||||
destination = "/data"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500
|
||||
memory = 2048
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_appdata_jellyfin" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_appdata_jellyfin"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["uid=1000", "gid=1000"]
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_media_jellyfin" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_media_jellyfin"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["nobrl", "uid=1000", "gid=1000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
15
2-nomad-config/jellyfin.tf
Normal file
15
2-nomad-config/jellyfin.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
|
||||
resource "nomad_job" "jellyfin" {
|
||||
jobspec = file("jellyfin.nomad.hcl")
|
||||
}
|
||||
|
||||
module "appdata_jellyfin" {
|
||||
source = "./modules/appdata"
|
||||
name = "jellyfin"
|
||||
}
|
||||
|
||||
module "unraid_smb_jellyfin_media" {
|
||||
source = "./modules/unraid_smb"
|
||||
name = "jellyfin"
|
||||
share = "media"
|
||||
}
|
||||
@@ -12,7 +12,7 @@ terraform {
|
||||
}
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "~> 5"
|
||||
version = ">= 5"
|
||||
}
|
||||
postgresql = {
|
||||
source = "cyrilgdn/postgresql"
|
||||
|
||||
62
2-nomad-config/modules/appdata/main.tf
Normal file
62
2-nomad-config/modules/appdata/main.tf
Normal file
@@ -0,0 +1,62 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
sops = {
|
||||
source = "carlpett/sops"
|
||||
version = "~> 0.5"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name of the application, also used as subdir on the unraid appdata share"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "id" {
|
||||
description = "ID to use for the volume registration, defaults to name with - replaced by _"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "access_mode" {
|
||||
description = "CSI volume access mode"
|
||||
type = string
|
||||
default = "single-node-writer"
|
||||
validation {
|
||||
condition = contains(["single-node-writer", "multi-node-multi-writer"], var.access_mode)
|
||||
error_message = "access_mode must be either 'single-node-writer' or 'multi-node-multi-writer'"
|
||||
}
|
||||
}
|
||||
|
||||
data "nomad_plugin" "smb" {
|
||||
plugin_id = "smb"
|
||||
wait_for_healthy = true
|
||||
}
|
||||
|
||||
data "sops_file" "secrets" {
|
||||
source_file = "secrets/secrets.enc.json"
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "this" {
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
|
||||
name = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
|
||||
external_id = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
|
||||
|
||||
capability {
|
||||
access_mode = var.access_mode
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//betelgeuse-seven-unraid.lan/appdata"
|
||||
subDir = var.name
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
}
|
||||
62
2-nomad-config/modules/unraid_smb/main.tf
Normal file
62
2-nomad-config/modules/unraid_smb/main.tf
Normal file
@@ -0,0 +1,62 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
sops = {
|
||||
source = "carlpett/sops"
|
||||
version = "~> 0.5"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name of the volume registration"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "id" {
|
||||
description = "ID to use for the volume registration, defaults to name with - replaced by _"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "share" {
|
||||
description = "Name of the SMB share on the unraid server"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "subDir" {
|
||||
description = "Subdirectory within the SMB share"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
data "nomad_plugin" "smb" {
|
||||
plugin_id = "smb"
|
||||
wait_for_healthy = true
|
||||
}
|
||||
|
||||
data "sops_file" "secrets" {
|
||||
source_file = "secrets/secrets.enc.json"
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "this" {
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
|
||||
name = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
|
||||
external_id = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = merge({
|
||||
source = "//betelgeuse-seven-unraid.lan/${var.share}"
|
||||
}, var.subDir == null ? {} : { "subDir" = var.subDir })
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
}
|
||||
89
2-nomad-config/ntfy.nomad.hcl
Normal file
89
2-nomad-config/ntfy.nomad.hcl
Normal file
@@ -0,0 +1,89 @@
|
||||
job "ntfy" {
|
||||
group "ntfy" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "http" {
|
||||
to = 80
|
||||
}
|
||||
}
|
||||
|
||||
# Consul Connect sidecar with upstream to postgres
|
||||
service {
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "postgres"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "ntfy"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.ntfy.middlewares=auth@file",
|
||||
# Token subdomain bypasses Authelia — ntfy's own token auth is sufficient for API access
|
||||
"traefik.http.routers.ntfy-token.rule=Host(`ntfy-2e30e5869ab6bfde4961012b48761a9b.othrayte.one`)",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/healthz"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
# Users and tokens are provisioned declaratively via auth-users / auth-tokens in server.yml.
|
||||
# ntfy reads and applies them on every startup — no poststart task, no race conditions.
|
||||
#
|
||||
# Bcrypt hashes are not secrets and are hardcoded below (same as /etc/shadow — safe to commit).
|
||||
# Generate with: docker run --rm -it binwiederhier/ntfy user hash
|
||||
# or: echo "mypassword" | docker run --rm -i binwiederhier/ntfy user hash
|
||||
# Required SOPS keys:
|
||||
# ntfy.database_pw — postgres password for the ntfy role
|
||||
task "ntfy" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "binwiederhier/ntfy:latest"
|
||||
ports = ["http"]
|
||||
command = "serve"
|
||||
volumes = [
|
||||
"local/server.yml:/etc/ntfy/server.yml",
|
||||
]
|
||||
}
|
||||
|
||||
env = {
|
||||
TZ = "Australia/Melbourne"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
base-url: "https://ntfy.othrayte.one"
|
||||
listen-http: ":80"
|
||||
database-url: "postgres://ntfy:{{ with nomadVar "nomad/jobs/ntfy" }}{{ .database_pw }}{{ end }}@localhost:5432/ntfy"
|
||||
auth-default-access: "deny-all"
|
||||
behind-proxy: true
|
||||
enable-login: true
|
||||
auth-users:
|
||||
- "admin:$2a$10$rLp4qagJnsA8Es5hQlISH.WrlzwMrXE2MBaEgz7zdd2lkAVu30lMy:admin"
|
||||
EOF
|
||||
destination = "local/server.yml"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 50
|
||||
memory = 64
|
||||
memory_max = 128
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
21
2-nomad-config/ntfy.tf
Normal file
21
2-nomad-config/ntfy.tf
Normal file
@@ -0,0 +1,21 @@
|
||||
resource "nomad_job" "ntfy" {
|
||||
jobspec = file("ntfy.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_variable" "ntfy" {
|
||||
path = "nomad/jobs/ntfy"
|
||||
items = {
|
||||
database_pw = data.sops_file.secrets.data["ntfy.database_pw"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "postgresql_role" "ntfy" {
|
||||
name = "ntfy"
|
||||
password = data.sops_file.secrets.data["ntfy.database_pw"]
|
||||
login = true
|
||||
}
|
||||
|
||||
resource "postgresql_database" "ntfy" {
|
||||
name = "ntfy"
|
||||
owner = postgresql_role.ntfy.name
|
||||
}
|
||||
116
2-nomad-config/openreader.nomad.hcl
Normal file
116
2-nomad-config/openreader.nomad.hcl
Normal file
@@ -0,0 +1,116 @@
|
||||
job "openreader" {
|
||||
group "openreader" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "http" {
|
||||
to = 3003
|
||||
}
|
||||
}
|
||||
|
||||
# Consul Connect sidecar with upstream to postgres
|
||||
service {
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "postgres"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "openreader"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.openreader.middlewares=auth@file",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "openreader-api"
|
||||
port = "http"
|
||||
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "openreader" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "ghcr.io/richardr1126/openreader:v2.1.2"
|
||||
ports = ["http"]
|
||||
}
|
||||
|
||||
env = {
|
||||
TZ = "Australia/Melbourne"
|
||||
|
||||
# Use embedded SeaweedFS for blob storage (data lives in /app/docstore/seaweedfs).
|
||||
# Port 8333 is not exposed; browser uploads/downloads fall back through the app API.
|
||||
USE_EMBEDDED_WEED_MINI = "true"
|
||||
S3_ENDPOINT = "http://localhost:8333"
|
||||
S3_FORCE_PATH_STYLE = "true"
|
||||
|
||||
# Auth is intentionally disabled (no BASE_URL / AUTH_SECRET set).
|
||||
# Access is controlled by the Authelia middleware on the Traefik router above.
|
||||
|
||||
# To enable server-side library import from an Unraid share, add a second CSI volume
|
||||
# mount for the share (e.g. unraid_media_books → /app/docstore/library:ro) and set:
|
||||
# IMPORT_LIBRARY_DIR = "/app/docstore/library"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
POSTGRES_URL=postgresql://openreader:{{ with nomadVar "nomad/jobs/openreader" }}{{ .database_pw }}{{ end }}@localhost:5432/openreader
|
||||
EOF
|
||||
destination = "secrets/openreader.env"
|
||||
env = true
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_openreader"
|
||||
destination = "/app/docstore"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 750
|
||||
memory_max = 1024
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_appdata_openreader" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_appdata_openreader"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["uid=1000", "gid=1000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
26
2-nomad-config/openreader.tf
Normal file
26
2-nomad-config/openreader.tf
Normal file
@@ -0,0 +1,26 @@
|
||||
resource "nomad_job" "openreader" {
|
||||
jobspec = file("openreader.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_variable" "openreader" {
|
||||
path = "nomad/jobs/openreader"
|
||||
items = {
|
||||
database_pw = data.sops_file.secrets.data["openreader.database_pw"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "postgresql_role" "openreader" {
|
||||
name = "openreader"
|
||||
password = data.sops_file.secrets.data["openreader.database_pw"]
|
||||
login = true
|
||||
}
|
||||
|
||||
resource "postgresql_database" "openreader" {
|
||||
name = "openreader"
|
||||
owner = postgresql_role.openreader.name
|
||||
}
|
||||
|
||||
module "appdata_openreader" {
|
||||
source = "./modules/appdata"
|
||||
name = "openreader"
|
||||
}
|
||||
119
2-nomad-config/prowlarr.nomad.hcl
Normal file
119
2-nomad-config/prowlarr.nomad.hcl
Normal file
@@ -0,0 +1,119 @@
|
||||
job "prowlarr" {
|
||||
group "prowlarr" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "http" {
|
||||
to = 9696
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "postgres"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "sonarr-api"
|
||||
local_bind_port = 8989
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "prowlarr"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.prowlarr.middlewares=auth@file",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "prowlarr-api"
|
||||
port = "http"
|
||||
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "prowlarr" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "lscr.io/linuxserver/prowlarr:latest"
|
||||
ports = ["http"]
|
||||
}
|
||||
|
||||
env {
|
||||
PUID = 1000
|
||||
PGID = 1000
|
||||
TZ = "Australia/Melbourne"
|
||||
|
||||
# https://wiki.servarr.com/prowlarr/postgres-setup
|
||||
|
||||
# Disable internal auth to use Traefik + Authelia
|
||||
PROWLARR__AUTH__REQUIRED = "Enabled"
|
||||
PROWLARR__AUTH__METHOD = "External"
|
||||
|
||||
PROWLARR__POSTGRES__USER = "prowlarr"
|
||||
PROWLARR__POSTGRES__HOST = "localhost"
|
||||
PROWLARR__POSTGRES__PORT = "5432"
|
||||
PROWLARR__POSTGRES__MAINDB = "prowlarr-main"
|
||||
PROWLARR__POSTGRES__LOGDB = "prowlarr-log"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_prowlarr"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 150
|
||||
memory = 512
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
PROWLARR__POSTGRES__PASSWORD="{{ with nomadVar "nomad/jobs/prowlarr" }}{{ .database_pw }}{{ end }}"
|
||||
EOH
|
||||
destination = "secrets/db.env"
|
||||
env = true # Load the file as environment variables
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_appdata_prowlarr" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_appdata_prowlarr"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["uid=1000", "gid=1000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
32
2-nomad-config/prowlarr.tf
Normal file
32
2-nomad-config/prowlarr.tf
Normal file
@@ -0,0 +1,32 @@
|
||||
resource "nomad_job" "prowlarr" {
|
||||
jobspec = file("prowlarr.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_variable" "prowlarr" {
|
||||
path = "nomad/jobs/prowlarr"
|
||||
items = {
|
||||
database_pw = data.sops_file.secrets.data["prowlarr.database_pw"]
|
||||
}
|
||||
}
|
||||
|
||||
# https://wiki.servarr.com/prowlarr/postgres-setup
|
||||
resource "postgresql_role" "prowlarr" {
|
||||
name = "prowlarr"
|
||||
password = data.sops_file.secrets.data["prowlarr.database_pw"]
|
||||
login = true
|
||||
}
|
||||
|
||||
resource "postgresql_database" "prowlarr_main" {
|
||||
name = "prowlarr-main"
|
||||
owner = postgresql_role.prowlarr.name
|
||||
}
|
||||
|
||||
resource "postgresql_database" "prowlarr_log" {
|
||||
name = "prowlarr-log"
|
||||
owner = postgresql_role.prowlarr.name
|
||||
}
|
||||
|
||||
module "appdata_prowlarr" {
|
||||
source = "./modules/appdata"
|
||||
name = "prowlarr"
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
# Terraform State
|
||||
|
||||
Mount the state on the fileshare to 2-nomad-config/.tfstate/
|
||||
`sudo mount -t cifs //192.168.1.192/appdata/terraform /home/othrayte/Code/infra/2-nomad-config/.tfstate/ -o rw,username=othrayte,password=<pw>,uid=$(id -u),gid=$(id -g)`
|
||||
`sudo mount -t cifs //betelgeuse-seven-unraid.lan/appdata/terraform /home/othrayte/Code/infra/2-nomad-config/.tfstate/ -o rw,username=othrayte,password=<pw>,uid=$(id -u),gid=$(id -g)`
|
||||
|
||||
# Tailscale Oauth Client
|
||||
|
||||
@@ -20,6 +20,7 @@ Edit the secrets using `sops secrets/secrets.enc.json`
|
||||
# Bootstrapping (starting without PostgreSQL running)
|
||||
|
||||
terraform apply -target=module.data
|
||||
terraform apply -target=module.ingress
|
||||
|
||||
## Restoring PostgreSQL DBs
|
||||
|
||||
|
||||
67
2-nomad-config/renovate.nomad.hcl
Normal file
67
2-nomad-config/renovate.nomad.hcl
Normal file
@@ -0,0 +1,67 @@
|
||||
job "renovate" {
|
||||
type = "batch"
|
||||
|
||||
periodic {
|
||||
cron = "0 4 * * *" # Daily at 4am
|
||||
prohibit_overlap = true
|
||||
}
|
||||
|
||||
group "renovate" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
# Consul Connect sidecar with upstream to Gitea (service: code-connect, port 3000)
|
||||
service {
|
||||
name = "renovate"
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "code-connect"
|
||||
local_bind_port = 3000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "renovate" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "renovate/renovate:latest"
|
||||
}
|
||||
|
||||
env = {
|
||||
RENOVATE_PLATFORM = "gitea"
|
||||
RENOVATE_ENDPOINT = "http://localhost:3000"
|
||||
RENOVATE_GIT_URL = "endpoint"
|
||||
RENOVATE_REPOSITORIES = "othrayte/infra"
|
||||
RENOVATE_GIT_AUTHOR = "Renovate Bot <renovate@othrayte.one>"
|
||||
LOG_LEVEL = "debug"
|
||||
}
|
||||
|
||||
# Required SOPS key:
|
||||
# renovate.gitea_token — PAT for the renovate bot account in Gitea
|
||||
# Create a dedicated 'renovate' user in Gitea with these token scopes:
|
||||
# repo (read+write), user (read), issue (read+write), organization (read)
|
||||
# renovate.github_token — read-only GitHub PAT (any account) for
|
||||
# fetching changelogs and avoiding github.com API rate limits
|
||||
template {
|
||||
data = <<EOF
|
||||
RENOVATE_TOKEN={{ with nomadVar "nomad/jobs/renovate" }}{{ .gitea_token }}{{ end }}
|
||||
RENOVATE_GITHUB_COM_TOKEN={{ with nomadVar "nomad/jobs/renovate" }}{{ .github_token }}{{ end }}
|
||||
EOF
|
||||
destination = "secrets/renovate.env"
|
||||
env = true
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500
|
||||
memory = 512
|
||||
memory_max = 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
11
2-nomad-config/renovate.tf
Normal file
11
2-nomad-config/renovate.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
resource "nomad_job" "renovate" {
|
||||
jobspec = file("renovate.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_variable" "renovate" {
|
||||
path = "nomad/jobs/renovate"
|
||||
items = {
|
||||
gitea_token = data.sops_file.secrets.data["renovate.gitea_token"]
|
||||
github_token = data.sops_file.secrets.data["renovate.github_token"]
|
||||
}
|
||||
}
|
||||
@@ -33,6 +33,32 @@
|
||||
"immich": {
|
||||
"database_pw": "ENC[AES256_GCM,data:SUyMGqu7deZyZpVt,iv:asZehOvn/JamwFyS+Xl9Xpr4JFkKlJjHVw7LywYOxTc=,tag:plRvuv7+ievfEhxurBl7YQ==,type:str]"
|
||||
},
|
||||
"sonarr": {
|
||||
"database_pw": "ENC[AES256_GCM,data:TN381ZYJLeUHX9U3Jnd9+w==,iv:lKaMYHeaSGXJd0/EGxkDY2l2v62xG3xs8TVC0HwXL94=,tag:3z5rK+2RfJHJdQc7KC9KmA==,type:str]"
|
||||
},
|
||||
"pia": {
|
||||
"user": "ENC[AES256_GCM,data:kniAs2gCTq4=,iv:1Oaht02fFSQwzWmWEtjsJZCJChPJsZhwRyux8dMY2CU=,tag:NqWaUhuYTSFZZK/CpSisdg==,type:str]",
|
||||
"pass": "ENC[AES256_GCM,data:c8qWGcaI0p7MyQ==,iv:/3ehYrgdDwjzFdXyX/vKTK+zt6u7gWNRZBIdWDG1KiE=,tag:jqfIMnB1OKchBZ4U2s1o4g==,type:str]"
|
||||
},
|
||||
"prowlarr": {
|
||||
"database_pw": "ENC[AES256_GCM,data:FkW5LPoyn8bh0UfWcFq3og==,iv:SFq4Xsdz3FfCDyPjIaAmz5nsC/SPdFrR03GCr3KE/nw=,tag:PVYj7hSWDnfeE7igSXGBSA==,type:str]"
|
||||
},
|
||||
"frigate": {
|
||||
"rtsp_password": "ENC[AES256_GCM,data:8vq06/IkNOUgpHmf,iv:lj8buuIC0ub0YOUiOiaN6tokkIT2/+bBwFNz2QXmCd4=,tag:EMm/bIHdJSAtjYAlrNOCMw==,type:str]"
|
||||
},
|
||||
"openreader": {
|
||||
"database_pw": "ENC[AES256_GCM,data:2Ey9Ypb2Ked/LP/ApJhCqhKWuzognxVK7ku60nERp7I=,iv:KdLFD+fuNpYmPEU5G96SvFcQeZB0XlnOh/6uf7OfFqI=,tag:h7DQlqx5fxhiHuWyFd7svQ==,type:str]"
|
||||
},
|
||||
"ntfy": {
|
||||
"database_pw": "ENC[AES256_GCM,data:79c2KFs3tcbet1dSGnkSDlAeKLCZrh4aMYLXTROM8w==,iv:eZ4limyjl++nsvHUzPKy82hfLZEOc+XQYpO6Czo/8os=,tag:iX9SiEACQ5IM8f1jhZh5Qw==,type:str]"
|
||||
},
|
||||
"renovate": {
|
||||
"gitea_token": "ENC[AES256_GCM,data:/J3CDMgWZLe20oQ+ENKBMi8fs/+jgsARV7xihMq0OLmRk8C8ae/IXg==,iv:e7WYOanSOCZ/LhN6SKrH0VrR3xLPTTppOKpGpSl+oAc=,tag:XBAilRdK3jL7WtM+92Fsmg==,type:str]",
|
||||
"github_token": "ENC[AES256_GCM,data:omZpdsTV1aFgQ9PjIApITEyIRKk6Z8QyvD2Kp5tJnBWzFCm4v2lRAg==,iv:cKL7z+CSChzF9eZEcske2lbmx9KV6CrWw0tn7rmP/10=,tag:gon3Sc1d3ntNSbWwenHuOw==,type:str]"
|
||||
},
|
||||
"act-runner": {
|
||||
"registration_token": "ENC[AES256_GCM,data:RnDvcNh69lLlL/ms+sMPKhhc+ECtc5hUHSkAQZv8e77iTD/QPd356Q==,iv:sl2Aua8rTe6cKYQAUC7O4UyHajGy1LgG/ZNLTVP4SyE=,tag:JjdaQqZ4PaWjfoiVmBl6lQ==,type:str]"
|
||||
},
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
@@ -40,8 +66,8 @@
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByUWM4ZDVVbGFrUGdMRHBX\nUFBmU3Nlc0RBSzhFK0tHNHpkQXUvUVdiZUZJCmpRN1lFdENpWW0rcThjVlVQNUl6\nWnlLU0RnQ3FZby81Ly8xTFBrek9nMncKLS0tIFQ4UTRNOC9CRmx4OFJWem1wckZz\nUDFTSzdWZldFK3FqcTNWTWRyNDhHQ2MKS811mR5xn7qiC/aVgPFYJ5c6Q3zxRfcr\nHcvxUvB01vNJKZpRg92vvKPkV6lQO3DXCT98OdfwiymlEOvYxg71Pg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-10-13T12:19:46Z",
|
||||
"mac": "ENC[AES256_GCM,data:QJ1Prqf37xMZbvyMvjBVxZOiOr07CmCYrWmr+5hwDsEmG4eEC9sPF/UY+/Cy2OTzsMp+cHb6C3maAo09O171wj6nJIZucg3B9fjEW2+4AoO217G4vmauMl3FFkut2CuvVV9zt2B/fLAskRg/yeYYOhjzPkWA6lyeV31sV5ZQ6Kw=,iv:5WfkmNr5vdfTqp6+INjQN/Zmc7/iJNc/2auO9h3En08=,tag:snBgJyMzBXVAkV3zERkK8g==,type:str]",
|
||||
"lastmodified": "2026-04-18T07:41:42Z",
|
||||
"mac": "ENC[AES256_GCM,data:+HhhsiZXok4BZI05tG3p9veZaj51kELSQlWFYMSInv7bGfEadmOrJqCxaGrFcNkMmgVPx80jWQFrILfVLW5MUvEsHAhD4Vza2TSWeUq1HuL9DbMxsK2G9Y1fbthd12r/++dDcXxVnTUf/rCD70in/+g/zRObocAnUcFEcIqx1JE=,iv:pS+aj+47J4bYZYGlMVniQVTlLt4jtCLUT7oROJLUkZo=,tag:+lznxDhs2C3bcz5quxfHjA==,type:str]",
|
||||
"encrypted_regex": "^(.*)$",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
|
||||
140
2-nomad-config/sonarr.nomad.hcl
Normal file
140
2-nomad-config/sonarr.nomad.hcl
Normal file
@@ -0,0 +1,140 @@
|
||||
job "sonarr" {
|
||||
group "sonarr" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "http" {
|
||||
to = 8989
|
||||
}
|
||||
}
|
||||
service {
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "postgres"
|
||||
local_bind_port = 5432
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "deluge-api"
|
||||
local_bind_port = 8112
|
||||
}
|
||||
upstreams {
|
||||
destination_name = "prowlarr-api"
|
||||
local_bind_port = 9696
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "sonarr"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.sonarr.middlewares=auth@file",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "sonarr-api"
|
||||
port = "http"
|
||||
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
task "sonarr" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "lscr.io/linuxserver/sonarr:latest"
|
||||
ports = ["http"]
|
||||
}
|
||||
|
||||
env {
|
||||
PUID = 1000
|
||||
PGID = 1000
|
||||
TZ = "Australia/Melbourne"
|
||||
|
||||
# https://wiki.servarr.com/sonarr/environment-variables
|
||||
|
||||
# Disable internal auth to use Traefik + Authelia
|
||||
SONARR__AUTH__REQUIRED = "Enabled"
|
||||
SONARR__AUTH__METHOD = "External"
|
||||
|
||||
SONARR__POSTGRES__USER = "sonarr"
|
||||
SONARR__POSTGRES__HOST = "localhost"
|
||||
SONARR__POSTGRES__PORT = "5432"
|
||||
SONARR__POSTGRES__MAINDB = "sonarr-main"
|
||||
SONARR__POSTGRES__LOGDB = "sonarr-log"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_sonarr"
|
||||
destination = "/config"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_media_sonarr"
|
||||
destination = "/data"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 150
|
||||
memory = 1024
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
SONARR__POSTGRES__PASSWORD="{{ with nomadVar "nomad/jobs/sonarr" }}{{ .database_pw }}{{ end }}"
|
||||
EOH
|
||||
destination = "secrets/db.env"
|
||||
env = true # Load the file as environment variables
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_appdata_sonarr" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_appdata_sonarr"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["uid=1000", "gid=1000"]
|
||||
}
|
||||
}
|
||||
|
||||
volume "unraid_media_sonarr" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_media_sonarr"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["nobrl", "uid=1000", "gid=1000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
38
2-nomad-config/sonarr.tf
Normal file
38
2-nomad-config/sonarr.tf
Normal file
@@ -0,0 +1,38 @@
|
||||
resource "nomad_job" "sonarr" {
|
||||
jobspec = file("sonarr.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_variable" "sonarr" {
|
||||
path = "nomad/jobs/sonarr"
|
||||
items = {
|
||||
database_pw = data.sops_file.secrets.data["sonarr.database_pw"]
|
||||
}
|
||||
}
|
||||
|
||||
# https://wiki.servarr.com/sonarr/postgres-setup#schema-creation
|
||||
resource "postgresql_role" "sonarr" {
|
||||
name = "sonarr"
|
||||
password = data.sops_file.secrets.data["sonarr.database_pw"]
|
||||
login = true
|
||||
}
|
||||
|
||||
resource "postgresql_database" "sonarr_main" {
|
||||
name = "sonarr-main"
|
||||
owner = postgresql_role.sonarr.name
|
||||
}
|
||||
|
||||
resource "postgresql_database" "sonarr_log" {
|
||||
name = "sonarr-log"
|
||||
owner = postgresql_role.sonarr.name
|
||||
}
|
||||
|
||||
module "appdata_sonarr" {
|
||||
source = "./modules/appdata"
|
||||
name = "sonarr"
|
||||
}
|
||||
|
||||
module "unraid_smb_sonarr_media" {
|
||||
source = "./modules/unraid_smb"
|
||||
name = "sonarr"
|
||||
share = "media"
|
||||
}
|
||||
@@ -1,345 +0,0 @@
|
||||
job "teamsstatus" {
|
||||
group "app" {
|
||||
task "teamsstatus" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "python:3.11-slim"
|
||||
command = "/local/start.sh"
|
||||
}
|
||||
|
||||
# Template for the startup script
|
||||
template {
|
||||
data = <<EOF
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd /local
|
||||
|
||||
echo "Starting Teams Status Updater service..."
|
||||
echo "Installing required Python packages..."
|
||||
pip install msal requests
|
||||
|
||||
echo "Running Teams Status Updater script..."
|
||||
exec python teamsstatus_standalone.py 2>&1
|
||||
EOF
|
||||
destination = "local/start.sh"
|
||||
perms = "755"
|
||||
}
|
||||
|
||||
# Template for the token cache
|
||||
template {
|
||||
data = "{{ with nomadVar \"nomad/jobs/teamsstatus\" }}{{ .token_cache_json }}{{ end }}"
|
||||
destination = "local/token_cache.json"
|
||||
}
|
||||
|
||||
# Template for the Python script
|
||||
template {
|
||||
data = <<EOF
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
import random
|
||||
import json
|
||||
import msal
|
||||
import requests
|
||||
import os
|
||||
import atexit
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Embedded journey data
|
||||
JOURNEY_DATA = '''Day Start Time AEST End Time AEST Start Dist End Dist Start Location End Location
|
||||
08/06/2025 08:00:00 19:10:00 0km 973km Melbourne Port Pirie SA
|
||||
09/06/2025 07:30:00 19:35:00 973km 2119km Port Pirie SA Mundrabilla WA
|
||||
10/06/2025 06:45:00 15:38:00 2119km 2916km Mundrabilla WA Kalgoorlie WA
|
||||
11/06/2025 10:45:00 17:55:00 2916km 3512km Kalgoorlie WA Perth
|
||||
13/06/2025 07:00:00 13:58:00 3512km 4083km Perth Kalbarri WA
|
||||
15/06/2025 07:00:00 16:52:00 4083km 4862km Kalbarri WA Coral Bay WA
|
||||
18/06/2025 06:00:00 16:52:00 4862km 5554km Coral Bay WA Karijini NP WA
|
||||
21/06/2025 14:00:00 15:21:00 5554km 5686km Karijini NP WA Karijini NP WA
|
||||
22/06/2025 06:00:00 16:23:00 5686km 6559km Karijini NP WA Broome WA
|
||||
23/06/2025 06:00:00 19:10:00 6559km 7688km Broome WA Kununurra WA
|
||||
27/06/2025 06:00:00 16:29:00 7688km 8593km Kununurra WA Derby WA
|
||||
28/06/2025 07:00:00 16:06:00 8593km 9358km Derby WA Port Hedland WA
|
||||
29/06/2025 07:00:00 16:31:00 9358km 10150km Port Hedland WA Exmouth WA
|
||||
02/07/2025 07:00:00 15:13:00 10150km 10866km Exmouth WA Shark Bay WA
|
||||
05/07/2025 07:00:00 17:12:00 10866km 11712km Shark Bay WA Fremantle WA
|
||||
06/07/2025 07:00:00 15:27:00 11712km 12411km Fremantle WA Esperance WA
|
||||
08/07/2025 06:00:00 18:09:00 12411km 13144km Esperance WA Madura WA
|
||||
09/07/2025 06:45:00 16:39:00 13144km 13821km Madura WA Ceduna SA
|
||||
11/07/2025 08:30:00 17:46:00 13821km 14599km Ceduna SA Adelaide
|
||||
12/07/2025 08:30:00 18:52:00 14599km 15348km Adelaide Melbourne'''
|
||||
|
||||
# Global variables for journey segments
|
||||
_segments = []
|
||||
|
||||
def setup_token_cache(cache_filename="token_cache.json"):
|
||||
"""Set up and return a serializable token cache"""
|
||||
cache = msal.SerializableTokenCache()
|
||||
|
||||
cache.deserialize(open(cache_filename, "r").read())
|
||||
|
||||
atexit.register(
|
||||
lambda: open(cache_filename, "w").write(cache.serialize())
|
||||
if cache.has_state_changed else None
|
||||
)
|
||||
return cache
|
||||
|
||||
def get_msal_app(client_id, authority="https://login.microsoftonline.com/organizations", cache_filename="token_cache.json"):
|
||||
"""Create and return an MSAL PublicClientApplication"""
|
||||
cache = setup_token_cache(cache_filename)
|
||||
return msal.PublicClientApplication(
|
||||
client_id,
|
||||
authority=authority,
|
||||
token_cache=cache,
|
||||
)
|
||||
|
||||
def acquire_token(app, scope):
|
||||
"""Acquire a token using the MSAL app"""
|
||||
result = None
|
||||
|
||||
# Check if user account exists in cache
|
||||
accounts = app.get_accounts(username=None)
|
||||
if accounts:
|
||||
logging.debug("Account(s) exists in cache, probably with token too. Let's try.")
|
||||
logging.debug("Account(s) already signed in:")
|
||||
for a in accounts:
|
||||
logging.debug(a["username"])
|
||||
chosen = accounts[0] # Assuming the end user chose this one to proceed
|
||||
logging.debug(f"Automatically using first account: {chosen['username']}")
|
||||
# Try to find a token in cache for this account
|
||||
result = app.acquire_token_silent(scope, account=chosen)
|
||||
|
||||
# If no suitable token was found, get a new one
|
||||
if not result:
|
||||
logging.debug("No suitable token exists in cache. Let's get a new one from AAD.")
|
||||
print("A local browser window will be open for you to sign in. CTRL+C to cancel.")
|
||||
result = app.acquire_token_interactive(scope)
|
||||
|
||||
# Validate the result
|
||||
if "access_token" not in result:
|
||||
logging.error(result.get("error"))
|
||||
logging.error(result.get("error_description"))
|
||||
logging.debug(f"Correlation ID: {result.get('correlation_id')}")
|
||||
return None
|
||||
|
||||
return result["access_token"]
|
||||
|
||||
def set_teams_status_message(access_token, user_id, status_message, expiration_date_time="2025-06-01T12:00:00", time_zone="UTC"):
|
||||
"""Set the status message for a Teams user"""
|
||||
url = f"https://graph.microsoft.com/v1.0/users/{user_id}/presence/microsoft.graph.setStatusMessage"
|
||||
|
||||
payload = {
|
||||
"statusMessage": {
|
||||
"message": {
|
||||
"content": status_message + "<pinnednote></pinnednote>",
|
||||
"contentType": "text",
|
||||
}
|
||||
},
|
||||
"expirationDateTime": {
|
||||
"dateTime": expiration_date_time,
|
||||
"timeZone": time_zone
|
||||
},
|
||||
}
|
||||
|
||||
headers = {
|
||||
'Authorization': f'Bearer {access_token}',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
logging.debug(f"Setting status message for user {user_id}")
|
||||
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
logging.info(f"Teams status message set to: {status_message}")
|
||||
return True
|
||||
else:
|
||||
logging.error(f"Failed to set Teams status message: {response.status_code}")
|
||||
return False
|
||||
|
||||
def _load_segments():
|
||||
"""Load the journey segments from embedded data into memory"""
|
||||
global _segments
|
||||
if _segments: # Already loaded
|
||||
return
|
||||
|
||||
aest = timezone(timedelta(hours=10))
|
||||
|
||||
for line in JOURNEY_DATA.split('\n')[1:]: # Skip header
|
||||
day, start_time, end_time, start_dist, end_dist, start_loc, end_loc = line.strip().split('\t')
|
||||
|
||||
# Convert day and times to datetime in AEST
|
||||
day_start = datetime.strptime(f"{day} {start_time}", "%d/%m/%Y %H:%M:%S").replace(tzinfo=aest)
|
||||
day_end = datetime.strptime(f"{day} {end_time}", "%d/%m/%Y %H:%M:%S").replace(tzinfo=aest)
|
||||
|
||||
# Extract the numeric distance values
|
||||
start_dist = int(start_dist.rstrip('km'))
|
||||
end_dist = int(end_dist.rstrip('km'))
|
||||
|
||||
_segments.append({
|
||||
'start_time': day_start,
|
||||
'end_time': day_end,
|
||||
'start_dist': start_dist,
|
||||
'end_dist': end_dist,
|
||||
'start_location': start_loc,
|
||||
'end_location': end_loc
|
||||
})
|
||||
|
||||
def get_trip_info(target_datetime):
|
||||
"""Determine the distance travelled and locations for the current datetime."""
|
||||
if target_datetime.tzinfo is None:
|
||||
raise ValueError("target_datetime must be timezone-aware")
|
||||
|
||||
# Ensure data is loaded
|
||||
_load_segments()
|
||||
|
||||
# Before journey starts
|
||||
if not _segments or target_datetime < _segments[0]['start_time']:
|
||||
start_loc = end_loc = _segments[0]['start_location']
|
||||
return (0, start_loc, end_loc)
|
||||
|
||||
# During journey
|
||||
for i, segment in enumerate(_segments):
|
||||
# If target is before this segment starts
|
||||
if target_datetime < segment['start_time']:
|
||||
prev_segment = _segments[i-1]
|
||||
return (prev_segment['end_dist'], prev_segment['end_location'], prev_segment['end_location'])
|
||||
|
||||
# If target is during this segment, interpolate
|
||||
if segment['start_time'] <= target_datetime <= segment['end_time']:
|
||||
# Calculate what fraction of the segment has elapsed
|
||||
total_seconds = (segment['end_time'] - segment['start_time']).total_seconds()
|
||||
elapsed_seconds = (target_datetime - segment['start_time']).total_seconds()
|
||||
fraction = elapsed_seconds / total_seconds
|
||||
|
||||
# Interpolate the distance
|
||||
distance_delta = segment['end_dist'] - segment['start_dist']
|
||||
current_dist = segment['start_dist'] + int(distance_delta * fraction)
|
||||
return (current_dist, segment['start_location'], segment['end_location'])
|
||||
|
||||
# Between segments
|
||||
if i < len(_segments) - 1:
|
||||
next_segment = _segments[i + 1]
|
||||
if segment['end_time'] < target_datetime < next_segment['start_time']:
|
||||
return (segment['end_dist'], segment['end_location'], segment['end_location'])
|
||||
|
||||
# After journey ends
|
||||
return (_segments[-1]['end_dist'], _segments[-1]['end_location'], _segments[-1]['end_location'])
|
||||
|
||||
def build_message(distance, start_loc, end_loc):
|
||||
"""Build the status message based on distance and locations"""
|
||||
message = "On leave"
|
||||
if distance > 13144:
|
||||
message += f", driving my EV back from WA"
|
||||
elif distance > 2118:
|
||||
message += f", driving my EV around WA"
|
||||
elif distance > 0:
|
||||
message += f", driving my EV to WA"
|
||||
|
||||
if distance > 0:
|
||||
distance += random.randint(-5, 5)
|
||||
message += f", {distance}kms travelled so far"
|
||||
if start_loc != end_loc:
|
||||
message += f", next stop {end_loc}"
|
||||
else:
|
||||
message += f", near {start_loc}"
|
||||
|
||||
message += ", returning July 21st. Contacts {CIM: Grant Gorfine, Inserts: Daniel Pate, DevOps: Rob Duncan, else: Andrian Zubovic}"
|
||||
return message
|
||||
|
||||
def main():
|
||||
test_mode = False # Set to True to run in test mode
|
||||
time_scale = 1 # 1/600 # Set to 1/60 to run at 1 second per minute, 1 for normal speed
|
||||
|
||||
# Set start time to 7:30 AM AEST (UTC+10) on June 8th, 2025
|
||||
aest = timezone(timedelta(hours=10))
|
||||
start_time = datetime.now(aest)
|
||||
date_offset = datetime(2025, 6, 8, 7, 30, 0, tzinfo=aest) - start_time
|
||||
|
||||
if test_mode:
|
||||
logging.info("Running in test mode - status messages will not actually be set")
|
||||
|
||||
app = get_msal_app(client_id = "e6cda941-949f-495e-88f5-10eb45ffa0e7")
|
||||
|
||||
last_token_refresh = 0
|
||||
# Token refresh interval (60 minutes in seconds)
|
||||
TOKEN_REFRESH_INTERVAL = int(60 * 60) # Scale the 1 hour refresh interval
|
||||
|
||||
old_distance = -1
|
||||
while True:
|
||||
try:
|
||||
# Check if we need to refresh the token
|
||||
current_time = time.time()
|
||||
if current_time - last_token_refresh >= TOKEN_REFRESH_INTERVAL or last_token_refresh == 0:
|
||||
logging.info("Acquiring/refreshing access token...")
|
||||
access_token = acquire_token(app, scope = ["https://graph.microsoft.com/Presence.ReadWrite"])
|
||||
if not access_token:
|
||||
logging.error("Failed to acquire token")
|
||||
exit(1)
|
||||
last_token_refresh = current_time
|
||||
logging.info("Token successfully refreshed")
|
||||
|
||||
# Set the status message
|
||||
now = datetime.now(aest) # Get current time in AEST
|
||||
if time_scale != 1:
|
||||
# Adjust the current time based on the time scale
|
||||
now = start_time + (now - start_time) / time_scale
|
||||
now += date_offset # Adjust to the target start time
|
||||
distance, start_loc, end_loc = get_trip_info(now) # We only need distance for comparison
|
||||
if distance != old_distance:
|
||||
message = build_message(distance, start_loc, end_loc)
|
||||
timestamp = now.strftime("%Y-%m-%d %H:%M:%S %Z")
|
||||
if not test_mode:
|
||||
logging.info(f"[{timestamp}] Message: {message}")
|
||||
success = set_teams_status_message(
|
||||
access_token = access_token,
|
||||
user_id = "1b625872-d8a8-42f4-b237-dfa6d8062360",
|
||||
status_message = message,
|
||||
)
|
||||
else:
|
||||
logging.info(f"[TEST MODE] [{timestamp}] Message: {message}")
|
||||
success = True
|
||||
else:
|
||||
logging.debug("Status message has not changed, skipping update")
|
||||
success = True
|
||||
old_distance = distance
|
||||
|
||||
if success:
|
||||
wait_time = 900 * time_scale # Scale the 15 minute wait time
|
||||
logging.debug(f"Waiting {wait_time} seconds before updating status message again...")
|
||||
time.sleep(wait_time)
|
||||
else:
|
||||
last_token_refresh = 0 # Reset token refresh time on failure
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Status update interrupted by user. Exiting...")
|
||||
break
|
||||
except Exception as e:
|
||||
logging.error(f"An error occurred: {e}")
|
||||
time.sleep(300) # Wait 5 minutes before retrying
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit(main())
|
||||
|
||||
EOF
|
||||
destination = "local/teamsstatus_standalone.py"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500
|
||||
memory = 256
|
||||
}
|
||||
}
|
||||
|
||||
restart {
|
||||
attempts = 3
|
||||
interval = "5m"
|
||||
delay = "15s"
|
||||
mode = "fail"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
# Disabled
|
||||
|
||||
# resource "nomad_job" "teamsstatus" {
|
||||
# jobspec = file("${path.module}/teamsstatus.nomad.hcl")
|
||||
# }
|
||||
|
||||
# resource "nomad_variable" "teamsstatus" {
|
||||
# path = "nomad/jobs/teamsstatus"
|
||||
# items = {
|
||||
# token_cache_json = file("${path.module}/token_cache.json")
|
||||
# }
|
||||
# }
|
||||
@@ -2,53 +2,14 @@ resource "nomad_job" "transfer" {
|
||||
jobspec = file("transfer.nomad.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_transfer" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_transfer"
|
||||
name = "unraid_transfer"
|
||||
|
||||
external_id = "unraid_transfer"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/transfer"
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "anon"
|
||||
"password" = ""
|
||||
}
|
||||
module "unraid_smb_transfer" {
|
||||
source = "./modules/unraid_smb"
|
||||
name = "transfer"
|
||||
id = "unraid_transfer"
|
||||
share = "transfer"
|
||||
}
|
||||
|
||||
resource "nomad_csi_volume_registration" "unraid_appdata_transferfilebrowser" {
|
||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
||||
depends_on = [data.nomad_plugin.smb]
|
||||
plugin_id = "smb"
|
||||
|
||||
volume_id = "unraid_appdata_transferfilebrowser"
|
||||
name = "unraid_appdata_transferfilebrowser"
|
||||
|
||||
external_id = "unraid_appdata_transferfilebrowser"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
source = "//192.168.1.192/appdata"
|
||||
subDir = "transferfilebrowser" # Note: Needs to be manually created on the share
|
||||
}
|
||||
|
||||
secrets = {
|
||||
"username" = "nomad"
|
||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||
}
|
||||
module "appdata_transferfilebrowser" {
|
||||
source = "./modules/appdata"
|
||||
name = "transferfilebrowser"
|
||||
}
|
||||
|
||||
50
2-nomad-config/unifi.nomad.hcl
Normal file
50
2-nomad-config/unifi.nomad.hcl
Normal file
@@ -0,0 +1,50 @@
|
||||
job "unifi-network" {
|
||||
group "unifi-network" {
|
||||
count = 1
|
||||
|
||||
task "unifi-controller" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "jacobalberty/unifi:v9.5.21"
|
||||
|
||||
// Fixed IP on the actual network so that devices can find it
|
||||
network_mode = "macvlan"
|
||||
ipv4_address = "192.168.1.50"
|
||||
}
|
||||
|
||||
env {
|
||||
TZ = "Australia/Melbourne"
|
||||
SYSTEM_IP = "192.168.1.50"
|
||||
JVM_INIT_HEAP_SIZE = "1024M"
|
||||
JVM_MAX_HEAP_SIZE = "1024M"
|
||||
UNIFI_STDOUT = "true"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
volume = "unraid_appdata_unifi_network"
|
||||
destination = "/unifi" # Expected root directory (contains data, log, cert subdirs)
|
||||
read_only = false
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 1850
|
||||
memory_max = 2500
|
||||
}
|
||||
}
|
||||
|
||||
# CSI volume for UniFi Controller persistent data/logs
|
||||
volume "unraid_appdata_unifi_network" {
|
||||
type = "csi"
|
||||
read_only = false
|
||||
source = "unraid_appdata_unifi_network"
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
|
||||
mount_options {
|
||||
mount_flags = ["uid=0", "gid=0"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
9
2-nomad-config/unifi.tf
Normal file
9
2-nomad-config/unifi.tf
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
resource "nomad_job" "unifi_network" {
|
||||
jobspec = file("unifi.nomad.hcl")
|
||||
}
|
||||
|
||||
module "appdata_unifi_network" {
|
||||
source = "./modules/appdata"
|
||||
name = "unifi-network"
|
||||
}
|
||||
305
cicd-plan.md
Normal file
305
cicd-plan.md
Normal file
@@ -0,0 +1,305 @@
|
||||
# CI/CD Plan
|
||||
|
||||
## Overview
|
||||
|
||||
Three distinct problems, tackled in phases:
|
||||
|
||||
1. **Does the config parse/validate without errors?** (static, no credentials)
|
||||
2. **Does the new Docker image actually exist and start?** (pre-merge, needs Docker)
|
||||
3. **Does the running service stay healthy through a deployment?** (post-merge, needs Nomad canary)
|
||||
|
||||
The goal is: Renovate opens a PR → CI runs checks → you review → merge → canary starts automatically → you promote (or it auto-reverts).
|
||||
|
||||
---
|
||||
|
||||
## Phase 1 — Static Validation (proves the runner works)
|
||||
|
||||
No secrets needed. Runs on every PR.
|
||||
|
||||
### Infrastructure required
|
||||
|
||||
- `act_runner` Nomad job (see below) with a Gitea runner token
|
||||
- `.gitea/workflows/ci.yml` in this repo
|
||||
|
||||
### Checks
|
||||
|
||||
| Check | Command | Notes |
|
||||
| --------------------- | ----------------------------------------------------- | ------------------------------------------------------------------- |
|
||||
| HCL formatting | `terraform fmt -check -recursive` | Fails on whitespace/style drift |
|
||||
| Terraform syntax | `terraform init -backend=false && terraform validate` | Catches wrong resource types, missing required args, bad references |
|
||||
| Nomad job spec syntax | `nomad job validate <file>` | Catches Nomad-specific issues; needs `NOMAD_ADDR` + read token |
|
||||
|
||||
`terraform validate -backend=false` is the most valuable: it catches ~90% of real mistakes with zero secret exposure. The Nomad validate step requires a low-privilege read token — worth adding once the runner is trusted.
|
||||
|
||||
### Workflow sketch
|
||||
|
||||
```yaml
|
||||
# .gitea/workflows/ci.yml
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: hashicorp/setup-terraform@v3
|
||||
|
||||
- name: fmt check
|
||||
run: terraform fmt -check -recursive
|
||||
working-directory: 2-nomad-config
|
||||
|
||||
- name: init + validate (no backend)
|
||||
run: |
|
||||
terraform init -backend=false
|
||||
terraform validate
|
||||
working-directory: 2-nomad-config
|
||||
|
||||
- name: fmt check (nixos-node)
|
||||
run: terraform fmt -check -recursive
|
||||
working-directory: 1-nixos-node
|
||||
|
||||
nomad-validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Nomad CLI
|
||||
run: |
|
||||
curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/hashicorp.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
||||
sudo apt-get update && sudo apt-get install -y nomad
|
||||
- name: validate all job specs
|
||||
env:
|
||||
NOMAD_ADDR: ${{ secrets.NOMAD_ADDR }}
|
||||
NOMAD_TOKEN: ${{ secrets.NOMAD_TOKEN }} # read-only policy sufficient
|
||||
run: |
|
||||
find 2-nomad-config -name '*.nomad.hcl' | while read f; do
|
||||
echo "==> $f"
|
||||
nomad job validate "$f"
|
||||
done
|
||||
```
|
||||
|
||||
### act_runner Nomad job
|
||||
|
||||
```hcl
|
||||
# act-runner.nomad.hcl
|
||||
job "act-runner" {
|
||||
group "act-runner" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
# Connect upstream to Gitea
|
||||
service {
|
||||
name = "act-runner"
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams {
|
||||
destination_name = "code-connect"
|
||||
local_bind_port = 3000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task "act-runner" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "gitea/act_runner:latest"
|
||||
volumes = ["/var/run/docker.sock:/var/run/docker.sock"]
|
||||
}
|
||||
|
||||
env = {
|
||||
GITEA_INSTANCE_URL = "http://localhost:3000"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
GITEA_RUNNER_REGISTRATION_TOKEN={{ with nomadVar "nomad/jobs/act-runner" }}{{ .registration_token }}{{ end }}
|
||||
EOF
|
||||
destination = "secrets/runner.env"
|
||||
env = true
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 256
|
||||
memory_max = 512
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Security note**: mounting `/var/run/docker.sock` gives the runner root-equivalent access to the host. Acceptable for a home server. Alternative: use `docker:dind` sidecar or Nomad's `exec` driver — more complex, lower risk.
|
||||
|
||||
---
|
||||
|
||||
## Phase 2 — Docker Image Validation (pre-merge)
|
||||
|
||||
Runs on PRs that touch `.nomad.hcl` files. Catches: tag typos, deleted images, registry outages.
|
||||
|
||||
Requires the `act_runner` to have Docker access (same socket mount as above).
|
||||
|
||||
```yaml
|
||||
image-pull:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Pull changed images
|
||||
run: |
|
||||
# Extract image tags added or changed vs main
|
||||
git fetch origin main
|
||||
git diff origin/main...HEAD -- '*.nomad.hcl' \
|
||||
| grep '^\+\s*image\s*=' \
|
||||
| grep -oP '"[^"]+:[^"]+"' \
|
||||
| tr -d '"' \
|
||||
| sort -u \
|
||||
| while read image; do
|
||||
echo "==> Pulling $image"
|
||||
docker pull "$image"
|
||||
done
|
||||
```
|
||||
|
||||
This intentionally only tests _changed_ images — no value in pulling everything on every PR.
|
||||
|
||||
---
|
||||
|
||||
## Phase 3 — Nomad Canary Deployments (post-merge gate)
|
||||
|
||||
Makes "merge" mean "start canary" rather than "go live". The old allocation keeps running until you promote.
|
||||
|
||||
### Which jobs get canaries
|
||||
|
||||
Most jobs already have Consul health checks — these can use `health_check = "checks"` for automatic revert gating.
|
||||
|
||||
| Job | Health check | Shared writable volume | Canary safe? |
|
||||
| ---------- | ------------- | ----------------------- | --------------------------------------------------------------------------------- |
|
||||
| ntfy | ✅ `/healthz` | no | ✅ yes |
|
||||
| gitea | ✅ `/` | ✅ `single-node-writer` | ⚠️ volume blocks 2nd alloc from mounting — needs `max_parallel=1` rolling instead |
|
||||
| jellyfin | ✅ | ✅ `single-node-writer` | ⚠️ same — rolling |
|
||||
| immich | ✅ | ✅ `single-node-writer` | ⚠️ same — rolling |
|
||||
| sonarr | ✅ | ✅ `single-node-writer` | ⚠️ same — rolling |
|
||||
| prowlarr | ✅ | ✅ `single-node-writer` | ⚠️ same — rolling |
|
||||
| deluge | ✅ | ✅ `single-node-writer` | ⚠️ same — rolling |
|
||||
| frigate | ✅ | ✅ `single-node-writer` | ⚠️ same — rolling |
|
||||
| glance | ✅ | no | ✅ yes |
|
||||
| transfer | ✅ | ✅ `single-node-writer` | ⚠️ rolling |
|
||||
| openreader | ❌ | ✅ `single-node-writer` | ⚠️ add check first, then rolling |
|
||||
| unifi | ❌ | ✅ `single-node-writer` | ⚠️ add check first, then rolling |
|
||||
| traefik | (ingress) | ✅ | ⚠️ rolling — downtime risk, promote quickly |
|
||||
| authelia | (ingress) | ✅ | ✅ stateless config, canary fine |
|
||||
| renovate | batch job | n/a | n/a — no deployment model |
|
||||
| postgres | (data layer) | ✅ | ❌ never canary — single-writer DB |
|
||||
|
||||
### Canary stanza (stateless jobs with no volume conflict)
|
||||
|
||||
```hcl
|
||||
update {
|
||||
canary = 1
|
||||
auto_promote = false
|
||||
auto_revert = true
|
||||
health_check = "checks"
|
||||
healthy_deadline = "5m"
|
||||
min_healthy_time = "30s"
|
||||
}
|
||||
```
|
||||
|
||||
### Rolling stanza (jobs with single-node-writer volumes)
|
||||
|
||||
```hcl
|
||||
update {
|
||||
max_parallel = 1
|
||||
auto_revert = true
|
||||
health_check = "checks"
|
||||
healthy_deadline = "5m"
|
||||
min_healthy_time = "30s"
|
||||
}
|
||||
```
|
||||
|
||||
Rolling with `max_parallel=1` still gives auto-revert but doesn't attempt to run two allocations simultaneously — the old one stops before the new one mounts the volume.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4 — Automated terraform apply + Deployment Promotion
|
||||
|
||||
Full CD: merge triggers apply, which creates the canary, CI then watches it and promotes or reverts.
|
||||
|
||||
### Flow
|
||||
|
||||
```
|
||||
PR merged to main
|
||||
│
|
||||
▼
|
||||
Gitea Actions (on: push, branches: [main])
|
||||
- terraform init
|
||||
- terraform apply -auto-approve
|
||||
│
|
||||
▼
|
||||
Nomad canary starts (old allocation still live)
|
||||
│
|
||||
▼
|
||||
CI polls `nomad deployment list` for the new deployment ID
|
||||
CI waits for canary allocation to reach "healthy" in Consul
|
||||
│ healthy within deadline
|
||||
▼
|
||||
CI runs: nomad deployment promote <id>
|
||||
│ or unhealthy → nomad deployment fail <id> (auto_revert fires)
|
||||
▼
|
||||
ntfy notification: "deployment promoted" or "deployment reverted"
|
||||
```
|
||||
|
||||
### Secrets required for full CD
|
||||
|
||||
| Secret | Used by | Risk level |
|
||||
| ---------------------- | ----------------------------------- | ---------------------------------- |
|
||||
| `NOMAD_ADDR` | validate + apply + promote | Low (internal LAN addr) |
|
||||
| `NOMAD_TOKEN` | terraform apply (write) + promote | **High** — grants full infra write |
|
||||
| `CLOUDFLARE_API_TOKEN` | terraform apply | **High** — DNS write |
|
||||
| `SOPS_AGE_KEY` | terraform apply (decrypt secrets) | **High** — decrypts all secrets |
|
||||
| `PG_PASSWORD` | terraform apply (postgres provider) | High |
|
||||
|
||||
Full CD requires all of these in Gitea Actions secrets. This is acceptable for a self-hosted, non-public Gitea instance where you control runner access — but it's the trust boundary to be deliberate about. A reasonable middle ground: **Phase 1-3 are fully automated; Phase 4 (apply + promote) runs automatically but requires a manual re-trigger or approval step** (Gitea supports required reviewers on environments).
|
||||
|
||||
### Promote/revert script sketch
|
||||
|
||||
```bash
|
||||
# In CI, after terraform apply completes:
|
||||
DEPLOY_ID=$(nomad deployment list -json | jq -r '[.[] | select(.JobID == "$JOB" and .Status == "running")] | first | .ID')
|
||||
echo "Watching deployment $DEPLOY_ID..."
|
||||
|
||||
for i in $(seq 1 30); do
|
||||
STATUS=$(nomad deployment status -json "$DEPLOY_ID" | jq -r '.Status')
|
||||
HEALTHY=$(nomad deployment status -json "$DEPLOY_ID" | jq -r '.TaskGroups[].HealthyAllocs')
|
||||
echo "[$i] status=$STATUS healthy=$HEALTHY"
|
||||
if [ "$STATUS" = "successful" ]; then exit 0; fi
|
||||
if [ "$STATUS" = "failed" ]; then exit 1; fi
|
||||
# Check if canary is healthy enough to promote
|
||||
CANARY_HEALTHY=$(nomad deployment status -json "$DEPLOY_ID" | jq -r '.TaskGroups[].DesiredCanaries == .TaskGroups[].HealthyAllocs')
|
||||
if [ "$CANARY_HEALTHY" = "true" ]; then
|
||||
nomad deployment promote "$DEPLOY_ID"
|
||||
exit 0
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
nomad deployment fail "$DEPLOY_ID"
|
||||
exit 1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Order
|
||||
|
||||
- [x] **Phase 1a**: Create `act-runner.nomad.hcl` + Terraform wrapper, register runner token in Gitea, get a hello-world workflow green
|
||||
- [x] **Phase 1b**: Add `terraform fmt` + `terraform validate -backend=false` workflow — no secrets needed
|
||||
- [x] **Phase 1c**: Add Nomad validate step — add `NOMAD_ADDR` + read-only `NOMAD_TOKEN` to Gitea secrets
|
||||
- [ ] **Phase 2**: Add image pull validation step to the workflow
|
||||
- [ ] **Phase 3a**: Add `update` stanzas to ntfy and glance (simplest, no volume conflict)
|
||||
- [ ] **Phase 3b**: Add rolling `update` stanzas to remaining service jobs (jellyfin, sonarr, etc.)
|
||||
- [ ] **Phase 3c**: Add health checks to openreader and unifi before adding update stanzas
|
||||
- [ ] **Phase 4a**: Add on-push workflow that runs `terraform apply -auto-approve` using full credential set
|
||||
- [ ] **Phase 4b**: Add deployment promotion/revert polling script
|
||||
- [ ] **Phase 4c**: Wire ntfy notifications for promote/revert outcomes
|
||||
15
renovate.json
Normal file
15
renovate.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": ["config:recommended"],
|
||||
"customManagers": [
|
||||
{
|
||||
"description": "Update Docker image tags in Nomad job files",
|
||||
"customType": "regex",
|
||||
"fileMatch": ["\\.nomad\\.hcl$"],
|
||||
"matchStrings": [
|
||||
"image\\s*=\\s*\"(?<depName>[^:\"]+):(?<currentValue>[^\"]+)\""
|
||||
],
|
||||
"datasourceTemplate": "docker"
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user