Compare commits
16 Commits
bd815e96c6
...
renovate/n
| Author | SHA1 | Date | |
|---|---|---|---|
| 3b5dd00474 | |||
| a30e60b557 | |||
| 2536e855e5 | |||
| dbe11dc8fa | |||
| b27f3e58ca | |||
| 5b3f2cf8f4 | |||
| 68cf58ead5 | |||
| d3ac8a252b | |||
| 961ec128f1 | |||
| d84eb73db0 | |||
| a3eaab5a07 | |||
| cf4daacab5 | |||
| 2a038e59e8 | |||
| 443d614a66 | |||
| 2803f694e8 | |||
| bbff0f6692 |
38
1-nixos-node/.terraform.lock.hcl
generated
38
1-nixos-node/.terraform.lock.hcl
generated
@@ -2,22 +2,32 @@
|
|||||||
# Manual edits may be lost in future updates.
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
provider "registry.terraform.io/hashicorp/null" {
|
provider "registry.terraform.io/hashicorp/null" {
|
||||||
version = "3.2.1"
|
version = "3.2.4"
|
||||||
constraints = "3.2.1"
|
constraints = "3.2.4"
|
||||||
hashes = [
|
hashes = [
|
||||||
"h1:FbGfc+muBsC17Ohy5g806iuI1hQc4SIexpYCrQHQd8w=",
|
"h1:+Ag4hSb4qQjNtAS6gj2+gsGl7v0iB/Bif6zZZU8lXsw=",
|
||||||
"zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840",
|
"h1:127ts0CG8hFk1bHIfrBsKxcnt9bAYQCq3udWM+AACH8=",
|
||||||
"zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb",
|
"h1:1fD1DCOTkei4uxrLwszCkQapuZcw5sq4CatKFtRg58E=",
|
||||||
"zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5",
|
"h1:DskWuCIvJ4FBUpngJsiRCtVFiKZMQbT3WQgn9GBnFPc=",
|
||||||
"zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3",
|
"h1:L5V05xwp/Gto1leRryuesxjMfgZwjb7oool4WS1UEFQ=",
|
||||||
|
"h1:Oz/OcdizPzjor4pnGHNvcXURwyS6uDDXAZccg4R6iR4=",
|
||||||
|
"h1:TpiL9vwc23kFuJgQlFsgIlaI4WjAsX9H3LLzcZ36Yfs=",
|
||||||
|
"h1:VMNuSHZMkfsbrzvhpp6lzm2vWdmT/1vuUdW0x+Dsa0Q=",
|
||||||
|
"h1:dNVrmZwFvVPlL2FqTMDasI6mbDIr9pcn7tHexkZU9z8=",
|
||||||
|
"h1:hkf5w5B6q8e2A42ND2CjAvgvSN3puAosDmOJb3zCVQM=",
|
||||||
|
"h1:wTNrZnwQdOOT/TW9pa+7GgJeFK2OvTvDmx78VmUmZXM=",
|
||||||
|
"zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2",
|
||||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||||
"zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238",
|
"zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43",
|
||||||
"zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc",
|
"zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a",
|
||||||
"zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970",
|
"zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991",
|
||||||
"zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2",
|
"zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f",
|
||||||
"zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5",
|
"zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e",
|
||||||
"zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f",
|
"zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615",
|
||||||
"zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694",
|
"zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442",
|
||||||
|
"zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5",
|
||||||
|
"zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f",
|
||||||
|
"zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,6 +49,9 @@
|
|||||||
preferred_address_family = "ipv4";
|
preferred_address_family = "ipv4";
|
||||||
%{if cpu_total_compute != null ~}
|
%{if cpu_total_compute != null ~}
|
||||||
cpu_total_compute = ${cpu_total_compute};
|
cpu_total_compute = ${cpu_total_compute};
|
||||||
|
%{endif ~}
|
||||||
|
%{if node_class != null ~}
|
||||||
|
node_class = "${node_class}";
|
||||||
%{endif ~}
|
%{endif ~}
|
||||||
host_volume = {
|
host_volume = {
|
||||||
%{ for volume in host_volumes ~}
|
%{ for volume in host_volumes ~}
|
||||||
@@ -113,6 +116,61 @@
|
|||||||
networking.firewall.allowedTCPPorts = [ 80 443 8081 4646 4647 4648 8300 8301 8500 ];
|
networking.firewall.allowedTCPPorts = [ 80 443 8081 4646 4647 4648 8300 8301 8500 ];
|
||||||
networking.firewall.allowedUDPPorts = [ 8301 ];
|
networking.firewall.allowedUDPPorts = [ 8301 ];
|
||||||
|
|
||||||
|
# Ensure Docker daemon is available (Nomad enableDocker only configures Nomad, does not guarantee docker service)
|
||||||
|
virtualisation.docker.enable = true;
|
||||||
|
|
||||||
|
%{if node_class == "latte-panda-n150" ~}
|
||||||
|
# Enable Intel iGPU (N150 UHD Graphics) for OpenVINO / VA-API workloads running in Docker
|
||||||
|
hardware.graphics = {
|
||||||
|
enable = true;
|
||||||
|
extraPackages = with pkgs; [
|
||||||
|
intel-media-driver # VA-API (iHD)
|
||||||
|
intel-compute-runtime # OpenCL / oneAPI
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
%{endif ~}
|
||||||
|
# Proper systemd service definition for macvlan network creation
|
||||||
|
systemd.services.docker-macvlan-network = {
|
||||||
|
description = "Ensure macvlan Docker network exists";
|
||||||
|
after = [ "network-online.target" "docker.service" ];
|
||||||
|
wants = [ "network-online.target" "docker.service" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
};
|
||||||
|
# Provide required binaries in PATH
|
||||||
|
path = [ pkgs.docker pkgs.bash pkgs.coreutils pkgs.iproute2 pkgs.gnugrep ];
|
||||||
|
script = ''
|
||||||
|
set -euo pipefail
|
||||||
|
NET_NAME=macvlan
|
||||||
|
if docker network inspect "$NET_NAME" >/dev/null 2>&1; then
|
||||||
|
echo "Docker network $NET_NAME already exists"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
echo "Creating Docker macvlan network $NET_NAME on interface ${bind_interface}"
|
||||||
|
# We intentionally do NOT use --ip-range here to avoid allocating the
|
||||||
|
# same reserved pool on every host (which could lead to collisions if
|
||||||
|
# multiple macvlan containers are started across nodes). Instead, we
|
||||||
|
# give critical services (like UniFi) an explicit static IP via the
|
||||||
|
# Nomad job (Docker static assignment) and rely on manual DHCP
|
||||||
|
# reservations to prevent conflicts.
|
||||||
|
#
|
||||||
|
# If you later need multiple macvlan-assigned containers per host,
|
||||||
|
# consider one of these strategies:
|
||||||
|
# 1. Per-host distinct network name + ip-range slice (macvlan-m01, ...)
|
||||||
|
# 2. Parameterize an ip-range per host in Terraform and template here
|
||||||
|
# 3. Keep a registry of allocated static IPs in Consul KV / Nomad vars
|
||||||
|
docker network create -d macvlan \
|
||||||
|
--subnet=192.168.1.0/24 \
|
||||||
|
--gateway=192.168.1.1 \
|
||||||
|
-o parent=${bind_interface} \
|
||||||
|
"$NET_NAME"
|
||||||
|
echo "Docker macvlan network $NET_NAME created"
|
||||||
|
'';
|
||||||
|
restartIfChanged = false; # Don't rerun just because comment changed
|
||||||
|
};
|
||||||
|
|
||||||
# Copy the NixOS configuration file and link it from the resulting system
|
# Copy the NixOS configuration file and link it from the resulting system
|
||||||
# (/run/current-system/configuration.nix). This is useful in case you
|
# (/run/current-system/configuration.nix). This is useful in case you
|
||||||
# accidentally delete configuration.nix.
|
# accidentally delete configuration.nix.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
null = {
|
null = {
|
||||||
source = "hashicorp/null"
|
source = "hashicorp/null"
|
||||||
version = "3.2.1"
|
version = "3.2.4"
|
||||||
}
|
}
|
||||||
template = {
|
template = {
|
||||||
source = "hashicorp/template"
|
source = "hashicorp/template"
|
||||||
@@ -21,6 +21,7 @@ variable "nodes" {
|
|||||||
bind_interface = string
|
bind_interface = string
|
||||||
bootstrap = optional(bool, false) # Optional field for bootstrap nodes
|
bootstrap = optional(bool, false) # Optional field for bootstrap nodes
|
||||||
cpu_total_compute = optional(number, null) # Optional field for CPU total compute
|
cpu_total_compute = optional(number, null) # Optional field for CPU total compute
|
||||||
|
node_class = optional(string, null) # Optional Nomad node_class for scheduling constraints
|
||||||
host_volumes = list(string)
|
host_volumes = list(string)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@@ -32,6 +33,7 @@ locals {
|
|||||||
bind_interface = v.bind_interface
|
bind_interface = v.bind_interface
|
||||||
bootstrap = v.bootstrap
|
bootstrap = v.bootstrap
|
||||||
cpu_total_compute = v.cpu_total_compute
|
cpu_total_compute = v.cpu_total_compute
|
||||||
|
node_class = v.node_class
|
||||||
host_volumes = v.host_volumes
|
host_volumes = v.host_volumes
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -18,27 +18,9 @@ resource "nomad_variable" "postgres" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_database_dump" {
|
module "unraid_smb_database_dump" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "../modules/unraid_smb"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "dump"
|
||||||
plugin_id = "smb"
|
id = "unraid_database_dump"
|
||||||
|
share = "database-dump"
|
||||||
volume_id = "unraid_database_dump"
|
|
||||||
name = "unraid_database_dump"
|
|
||||||
|
|
||||||
external_id = "unraid_database_dump"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "single-node-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/database-dump"
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "nomad"
|
|
||||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,30 +2,3 @@
|
|||||||
module "ingress" {
|
module "ingress" {
|
||||||
source = "./2-ingress"
|
source = "./2-ingress"
|
||||||
}
|
}
|
||||||
|
|
||||||
# traefik.tf
|
|
||||||
|
|
||||||
moved {
|
|
||||||
from = cloudflare_dns_record.othrayte-one
|
|
||||||
to = module.ingress.cloudflare_dns_record.othrayte-one
|
|
||||||
}
|
|
||||||
|
|
||||||
moved {
|
|
||||||
from = cloudflare_dns_record.star-othrayte-one
|
|
||||||
to = module.ingress.cloudflare_dns_record.star-othrayte-one
|
|
||||||
}
|
|
||||||
|
|
||||||
moved {
|
|
||||||
from = nomad_variable.traefik
|
|
||||||
to = module.ingress.nomad_variable.traefik
|
|
||||||
}
|
|
||||||
|
|
||||||
moved {
|
|
||||||
from = nomad_job.traefik
|
|
||||||
to = module.ingress.nomad_job.traefik
|
|
||||||
}
|
|
||||||
|
|
||||||
moved {
|
|
||||||
from = nomad_csi_volume_registration.unraid_appdata_traefik
|
|
||||||
to = module.ingress.nomad_csi_volume_registration.unraid_appdata_traefik
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -63,6 +63,12 @@ job "authelia" {
|
|||||||
data = <<EOF
|
data = <<EOF
|
||||||
server:
|
server:
|
||||||
address: tcp://0.0.0.0:{{ env "NOMAD_PORT_http" }}/
|
address: tcp://0.0.0.0:{{ env "NOMAD_PORT_http" }}/
|
||||||
|
endpoints:
|
||||||
|
authz:
|
||||||
|
forward-auth:
|
||||||
|
implementation: 'ForwardAuth'
|
||||||
|
authn_strategies:
|
||||||
|
- name: 'CookieSession'
|
||||||
theme: "auto"
|
theme: "auto"
|
||||||
identity_validation:
|
identity_validation:
|
||||||
reset_password:
|
reset_password:
|
||||||
|
|||||||
@@ -112,6 +112,9 @@ EOF
|
|||||||
template {
|
template {
|
||||||
data = <<EOF
|
data = <<EOF
|
||||||
http:
|
http:
|
||||||
|
serversTransports:
|
||||||
|
ignorecert:
|
||||||
|
insecureSkipVerify: true
|
||||||
middlewares:
|
middlewares:
|
||||||
auth:
|
auth:
|
||||||
forwardAuth:
|
forwardAuth:
|
||||||
@@ -154,11 +157,6 @@ http:
|
|||||||
service: unraid
|
service: unraid
|
||||||
middlewares:
|
middlewares:
|
||||||
- auth
|
- auth
|
||||||
frigate:
|
|
||||||
rule: "Host(`frigate.othrayte.one`)"
|
|
||||||
service: frigate
|
|
||||||
middlewares:
|
|
||||||
- auth
|
|
||||||
kopia:
|
kopia:
|
||||||
rule: "Host(`kopia.othrayte.one`)"
|
rule: "Host(`kopia.othrayte.one`)"
|
||||||
service: kopia
|
service: kopia
|
||||||
@@ -173,6 +171,11 @@ http:
|
|||||||
hass-token:
|
hass-token:
|
||||||
rule: "Host(`${hass_magic_token}-hass.othrayte.one`)"
|
rule: "Host(`${hass_magic_token}-hass.othrayte.one`)"
|
||||||
service: hass
|
service: hass
|
||||||
|
unifi-network:
|
||||||
|
rule: "Host(`network.othrayte.one`)"
|
||||||
|
service: unifi-network
|
||||||
|
middlewares:
|
||||||
|
- auth
|
||||||
|
|
||||||
services:
|
services:
|
||||||
traefik:
|
traefik:
|
||||||
@@ -190,19 +193,20 @@ http:
|
|||||||
unraid:
|
unraid:
|
||||||
loadBalancer:
|
loadBalancer:
|
||||||
servers:
|
servers:
|
||||||
- url: "http://192.168.1.192:80"
|
- url: "http://betelgeuse-seven-unraid.lan:80"
|
||||||
frigate:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "http://192.168.1.192:5000"
|
|
||||||
kopia:
|
kopia:
|
||||||
loadBalancer:
|
loadBalancer:
|
||||||
servers:
|
servers:
|
||||||
- url: "http://192.168.1.192:51515"
|
- url: "http://betelgeuse-seven-unraid.lan:51515"
|
||||||
hass:
|
hass:
|
||||||
loadBalancer:
|
loadBalancer:
|
||||||
servers:
|
servers:
|
||||||
- url: "http://192.168.1.234:8123"
|
- url: "http://192.168.1.234:8123"
|
||||||
|
unifi-network:
|
||||||
|
loadBalancer:
|
||||||
|
serversTransport: ignorecert
|
||||||
|
servers:
|
||||||
|
- url: "https://192.168.1.50:8443"
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
destination = "local/configs/nomad.yml"
|
destination = "local/configs/nomad.yml"
|
||||||
|
|||||||
@@ -32,28 +32,8 @@ resource "nomad_job" "traefik" {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_appdata_traefik" {
|
module "appdata_traefik" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "../modules/appdata"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "traefik"
|
||||||
plugin_id = "smb"
|
|
||||||
|
|
||||||
volume_id = "unraid_appdata_traefik"
|
|
||||||
name = "unraid_appdata_traefik"
|
|
||||||
|
|
||||||
external_id = "unraid_appdata_traefik"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "multi-node-multi-writer"
|
access_mode = "multi-node-multi-writer"
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/appdata"
|
|
||||||
subDir = "traefik" # Note: Needs to be manually created on the share
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "nomad"
|
|
||||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
145
2-nomad-config/deluge.nomad.hcl
Normal file
145
2-nomad-config/deluge.nomad.hcl
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
job "deluge" {
|
||||||
|
group "deluge" {
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
port "http" {
|
||||||
|
to = 8112
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "wireguard" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
lifecycle {
|
||||||
|
hook = "prestart"
|
||||||
|
sidecar = true
|
||||||
|
}
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "thrnz/docker-wireguard-pia:latest"
|
||||||
|
privileged = true
|
||||||
|
ports = ["http"]
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
LOC = "aus_melbourne"
|
||||||
|
LOCAL_NETWORK = "192.168.1.0/24"
|
||||||
|
# PORT_FORWARDING = "1" # TODO: Find a way to tell deluge the forwarded port, the wireguard container outputs it /pia-shared/port.dat
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
data = <<EOH
|
||||||
|
USER="{{ with nomadVar "nomad/jobs/deluge" }}{{ .pia_user }}{{ end }}"
|
||||||
|
PASS="{{ with nomadVar "nomad/jobs/deluge" }}{{ .pia_pass }}{{ end }}"
|
||||||
|
EOH
|
||||||
|
destination = "secrets/pia.env"
|
||||||
|
env = true # Load the file as environment variables
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Service for Traefik (external ingress)
|
||||||
|
service {
|
||||||
|
name = "deluge"
|
||||||
|
port = "http"
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.deluge.middlewares=auth@file",
|
||||||
|
]
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Service for Consul Connect (internal mesh communication)
|
||||||
|
service {
|
||||||
|
name = "deluge-api"
|
||||||
|
port = "http"
|
||||||
|
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
|
||||||
|
|
||||||
|
# tags = [
|
||||||
|
# "traefik.enable=false",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
//tags = ["traefik.enable=false"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "deluge" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "lscr.io/linuxserver/deluge:latest"
|
||||||
|
network_mode = "container:wireguard-${NOMAD_ALLOC_ID}" # Share namespace with VPN
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
PUID = "1000"
|
||||||
|
PGID = "1000"
|
||||||
|
TZ = "Australia/Melbourne"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_deluge"
|
||||||
|
destination = "/config"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_media_deluge"
|
||||||
|
destination = "/data/downloads"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 400
|
||||||
|
memory = 2048
|
||||||
|
memory_max = 3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_appdata_deluge" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_appdata_deluge"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["uid=1000", "gid=1000"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_media_deluge" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_media_deluge"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["uid=1000", "gid=1000"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
24
2-nomad-config/deluge.tf
Normal file
24
2-nomad-config/deluge.tf
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
|
||||||
|
resource "nomad_job" "deluge" {
|
||||||
|
jobspec = file("deluge.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_variable" "deluge" {
|
||||||
|
path = "nomad/jobs/deluge"
|
||||||
|
items = {
|
||||||
|
pia_user = data.sops_file.secrets.data["pia.user"]
|
||||||
|
pia_pass = data.sops_file.secrets.data["pia.pass"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module "appdata_deluge" {
|
||||||
|
source = "./modules/appdata"
|
||||||
|
name = "deluge"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "unraid_smb_deluge_media" {
|
||||||
|
source = "./modules/unraid_smb"
|
||||||
|
name = "deluge"
|
||||||
|
share = "media"
|
||||||
|
subDir = "downloads"
|
||||||
|
}
|
||||||
216
2-nomad-config/frigate.nomad.hcl
Normal file
216
2-nomad-config/frigate.nomad.hcl
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
job "frigate" {
|
||||||
|
# Pin to N150 LattePanda nodes - Intel UHD iGPU for OpenVINO-accelerated detection.
|
||||||
|
# hardware.graphics (intel-compute-runtime) is deployed to these nodes via configuration.nix.
|
||||||
|
constraint {
|
||||||
|
attribute = "${node.class}"
|
||||||
|
value = "latte-panda-n150"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "frigate" {
|
||||||
|
count = 1
|
||||||
|
|
||||||
|
network {
|
||||||
|
port "http" {
|
||||||
|
to = 5000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Prestart: restore Frigate's SQLite DB from the Litestream file replica on the CIFS share.
|
||||||
|
# Runs to completion before the frigate task starts. Safe on first boot (-if-replica-exists
|
||||||
|
# is a no-op when no replica exists yet).
|
||||||
|
task "litestream-restore" {
|
||||||
|
lifecycle {
|
||||||
|
hook = "prestart"
|
||||||
|
sidecar = false
|
||||||
|
}
|
||||||
|
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "litestream/litestream:0.5.9"
|
||||||
|
command = "restore"
|
||||||
|
args = ["-if-replica-exists", "-config", "/local/litestream.yml", "/alloc/data/frigate.db"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Litestream config: replicate to /config/frigate.db.litestream/ on the CIFS share.
|
||||||
|
# Litestream writes its own segment format - no SQLite advisory locking involved.
|
||||||
|
# Frigate must be configured with database.path: /alloc/data/frigate.db in config.yml.
|
||||||
|
template {
|
||||||
|
data = <<EOH
|
||||||
|
dbs:
|
||||||
|
- path: /alloc/data/frigate.db
|
||||||
|
replicas:
|
||||||
|
- url: file:///config/frigate.db.litestream
|
||||||
|
EOH
|
||||||
|
destination = "local/litestream.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_frigate"
|
||||||
|
destination = "/config"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 100
|
||||||
|
memory = 64
|
||||||
|
memory_max = 256
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Sidecar: continuously stream WAL changes from /alloc/data/frigate.db to the CIFS replica.
|
||||||
|
# Runs alongside frigate for the lifetime of the allocation.
|
||||||
|
task "litestream-replicate" {
|
||||||
|
lifecycle {
|
||||||
|
hook = "poststart"
|
||||||
|
sidecar = true
|
||||||
|
}
|
||||||
|
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "litestream/litestream:0.5"
|
||||||
|
command = "replicate"
|
||||||
|
args = ["-config", "/local/litestream.yml"]
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
data = <<EOH
|
||||||
|
dbs:
|
||||||
|
- path: /alloc/data/frigate.db
|
||||||
|
replicas:
|
||||||
|
- url: file:///config/frigate.db.litestream
|
||||||
|
EOH
|
||||||
|
destination = "local/litestream.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_frigate"
|
||||||
|
destination = "/config"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 100
|
||||||
|
memory = 64
|
||||||
|
memory_max = 256
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "frigate" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "ghcr.io/blakeblackshear/frigate:0.17.1"
|
||||||
|
ports = ["http"]
|
||||||
|
privileged = true
|
||||||
|
|
||||||
|
# Shared memory for inter-process frame buffers (frigate forks detector processes).
|
||||||
|
shm_size = 268435456 # 256 MiB
|
||||||
|
|
||||||
|
# Large tmpfs for decoded frame cache - avoids wearing out any storage.
|
||||||
|
mounts = [
|
||||||
|
{
|
||||||
|
type = "tmpfs"
|
||||||
|
target = "/tmp/cache"
|
||||||
|
readonly = false
|
||||||
|
tmpfs_options = {
|
||||||
|
size = 1000000000 # 1 GiB in bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Intel iGPU render node - Frigate's bundled OpenVINO runtime auto-detects
|
||||||
|
# GPU device and uses it for object detection without any extra env vars.
|
||||||
|
# Requires hardware.graphics.enable = true on the NixOS node (N150 nodes).
|
||||||
|
devices = [
|
||||||
|
{
|
||||||
|
host_path = "/dev/dri/renderD128"
|
||||||
|
container_path = "/dev/dri/renderD128"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# RTSP password injected from Nomad variables (sourced from sops secrets).
|
||||||
|
# Reference in config.yml as: {FRIGATE_RTSP_PASSWORD}
|
||||||
|
template {
|
||||||
|
data = <<EOH
|
||||||
|
FRIGATE_RTSP_PASSWORD="{{ with nomadVar "nomad/jobs/frigate" }}{{ .rtsp_password }}{{ end }}"
|
||||||
|
EOH
|
||||||
|
destination = "secrets/frigate.env"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "frigate"
|
||||||
|
port = "http"
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.frigate.middlewares=auth@file",
|
||||||
|
"traefik.http.routers.frigate-token.rule=Host(`n7gdph5cuh7bd1cakbq8s099rvrv3qhs-frigate.othrayte.one`)",
|
||||||
|
]
|
||||||
|
|
||||||
|
check {
|
||||||
|
name = "alive"
|
||||||
|
type = "http"
|
||||||
|
path = "/api/version"
|
||||||
|
port = "http"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "5s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
TZ = "Australia/Melbourne"
|
||||||
|
}
|
||||||
|
|
||||||
|
# config.yml lives here (read from CIFS). SQLite DB is at /alloc/data/frigate.db
|
||||||
|
# (local NVMe, managed by Litestream). Requires in config.yml:
|
||||||
|
# database:
|
||||||
|
# path: /alloc/data/frigate.db
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_frigate"
|
||||||
|
destination = "/config"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
# Recordings, clips, and exports.
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_media_frigate"
|
||||||
|
destination = "/media/frigate"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
# GPU handles inference; CPU manages stream ingestion, motion detection, and recording.
|
||||||
|
cpu = 2000
|
||||||
|
memory = 2048
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_appdata_frigate" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_appdata_frigate"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["nobrl", "uid=0", "gid=0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_media_frigate" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_media_frigate"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["nobrl", "uid=0", "gid=0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
23
2-nomad-config/frigate.tf
Normal file
23
2-nomad-config/frigate.tf
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
|
||||||
|
resource "nomad_job" "frigate" {
|
||||||
|
jobspec = file("frigate.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_variable" "frigate" {
|
||||||
|
path = "nomad/jobs/frigate"
|
||||||
|
items = {
|
||||||
|
rtsp_password = data.sops_file.secrets.data["frigate.rtsp_password"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module "appdata_frigate" {
|
||||||
|
source = "./modules/appdata"
|
||||||
|
name = "frigate"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "unraid_smb_frigate_media" {
|
||||||
|
source = "./modules/unraid_smb"
|
||||||
|
name = "frigate"
|
||||||
|
share = "media"
|
||||||
|
subDir = "frigate"
|
||||||
|
}
|
||||||
@@ -37,6 +37,17 @@ job "gitea" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Separate service for Consul Connect ingress (address_mode=alloc avoids hairpin NAT issue)
|
||||||
|
service {
|
||||||
|
name = "code-connect"
|
||||||
|
port = "http"
|
||||||
|
address_mode = "alloc"
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
task "gitea" {
|
task "gitea" {
|
||||||
driver = "docker"
|
driver = "docker"
|
||||||
|
|
||||||
|
|||||||
@@ -33,28 +33,7 @@ resource "postgresql_database" "gitea" {
|
|||||||
owner = postgresql_role.gitea.name
|
owner = postgresql_role.gitea.name
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_appdata_gitea" {
|
module "appdata_gitea" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "./modules/appdata"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "gitea"
|
||||||
plugin_id = "smb"
|
|
||||||
|
|
||||||
volume_id = "unraid_appdata_gitea"
|
|
||||||
name = "unraid_appdata_gitea"
|
|
||||||
|
|
||||||
external_id = "unraid_appdata_gitea"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "single-node-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/appdata"
|
|
||||||
subDir = "gitea" # Note: Needs to be manually created on the share
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "nomad"
|
|
||||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,107 +34,29 @@ resource "nomad_variable" "immich" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_appdata_immich" {
|
module "appdata_immich" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "./modules/appdata"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "immich"
|
||||||
plugin_id = "smb"
|
|
||||||
|
|
||||||
volume_id = "unraid_appdata_immich"
|
|
||||||
name = "unraid_appdata_immich"
|
|
||||||
|
|
||||||
external_id = "unraid_appdata_immich"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "single-node-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/appdata"
|
|
||||||
subDir = "immich" # Note: Needs to be manually created on the share
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "nomad"
|
|
||||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_media_photosvideos" {
|
module "unraid_smb_immich_photosvideos" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "./modules/unraid_smb"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "photosvideos"
|
||||||
plugin_id = "smb"
|
share = "media"
|
||||||
|
subDir = "Photos and Videos"
|
||||||
volume_id = "unraid_media_photosvideos"
|
|
||||||
name = "unraid_media_photosvideos"
|
|
||||||
|
|
||||||
external_id = "unraid_media_photosvideos"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "single-node-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/media"
|
|
||||||
subDir = "Photos and Videos" # Note: Needs to be manually created on the share
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "nomad"
|
|
||||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_media_immich_encodedvideo" {
|
module "unraid_smb_immich_encodedvideo" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "./modules/unraid_smb"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "immich_encodedvideo"
|
||||||
plugin_id = "smb"
|
share = "media"
|
||||||
|
subDir = "immich/encoded-video"
|
||||||
volume_id = "unraid_media_immich_encodedvideo"
|
|
||||||
name = "unraid_media_immich_encodedvideo"
|
|
||||||
|
|
||||||
external_id = "unraid_media_immich_encodedvideo"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "single-node-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/media"
|
|
||||||
subDir = "immich/encoded-video" # Note: Needs to be manually created on the share
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "nomad"
|
|
||||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_mediadump_photosvideos" {
|
module "unraid_smb_immich_mediadump_photosvideos" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "./modules/unraid_smb"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "photosvideos"
|
||||||
plugin_id = "smb"
|
id = "unraid_mediadump_photosvideos"
|
||||||
|
share = "media-dump"
|
||||||
volume_id = "unraid_mediadump_photosvideos"
|
subDir = "Photos and Videos"
|
||||||
name = "unraid_mediadump_photosvideos"
|
|
||||||
|
|
||||||
external_id = "unraid_mediadump_photosvideos"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "single-node-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/media-dump"
|
|
||||||
subDir = "Photos and Videos" # Note: Needs to be manually created on the share
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "nomad"
|
|
||||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
88
2-nomad-config/jellyfin.nomad.hcl
Normal file
88
2-nomad-config/jellyfin.nomad.hcl
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
job "jellyfin" {
|
||||||
|
group "jellyfin" {
|
||||||
|
count = 1
|
||||||
|
|
||||||
|
network {
|
||||||
|
port "http" {
|
||||||
|
to = 8096
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "jellyfin" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "lscr.io/linuxserver/jellyfin:latest"
|
||||||
|
ports = ["http"]
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "jellyfin"
|
||||||
|
port = "http"
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.jellyfin.middlewares=auth@file",
|
||||||
|
"traefik.http.routers.jellyfin-token.rule=Host(`c3ll7nbevl5j4j8rcnfxnr95q48fuayz-jellyfin.othrayte.one`)",
|
||||||
|
]
|
||||||
|
|
||||||
|
check {
|
||||||
|
name = "alive"
|
||||||
|
type = "tcp"
|
||||||
|
port = "http"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
PUID = 1000
|
||||||
|
PGID = 1000
|
||||||
|
TZ = "Australia/Melbourne"
|
||||||
|
|
||||||
|
JELLYFIN_PublishedServerUrl = "https://jellyfin.othrayte.one"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_jellyfin"
|
||||||
|
destination = "/config"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_media_jellyfin"
|
||||||
|
destination = "/data"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 500
|
||||||
|
memory = 2048
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_appdata_jellyfin" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_appdata_jellyfin"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["uid=1000", "gid=1000"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_media_jellyfin" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_media_jellyfin"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["nobrl", "uid=1000", "gid=1000"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
15
2-nomad-config/jellyfin.tf
Normal file
15
2-nomad-config/jellyfin.tf
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
|
||||||
|
resource "nomad_job" "jellyfin" {
|
||||||
|
jobspec = file("jellyfin.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
module "appdata_jellyfin" {
|
||||||
|
source = "./modules/appdata"
|
||||||
|
name = "jellyfin"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "unraid_smb_jellyfin_media" {
|
||||||
|
source = "./modules/unraid_smb"
|
||||||
|
name = "jellyfin"
|
||||||
|
share = "media"
|
||||||
|
}
|
||||||
62
2-nomad-config/modules/appdata/main.tf
Normal file
62
2-nomad-config/modules/appdata/main.tf
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
sops = {
|
||||||
|
source = "carlpett/sops"
|
||||||
|
version = "~> 0.5"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "name" {
|
||||||
|
description = "Name of the application, also used as subdir on the unraid appdata share"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "id" {
|
||||||
|
description = "ID to use for the volume registration, defaults to name with - replaced by _"
|
||||||
|
type = string
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "access_mode" {
|
||||||
|
description = "CSI volume access mode"
|
||||||
|
type = string
|
||||||
|
default = "single-node-writer"
|
||||||
|
validation {
|
||||||
|
condition = contains(["single-node-writer", "multi-node-multi-writer"], var.access_mode)
|
||||||
|
error_message = "access_mode must be either 'single-node-writer' or 'multi-node-multi-writer'"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data "nomad_plugin" "smb" {
|
||||||
|
plugin_id = "smb"
|
||||||
|
wait_for_healthy = true
|
||||||
|
}
|
||||||
|
|
||||||
|
data "sops_file" "secrets" {
|
||||||
|
source_file = "secrets/secrets.enc.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_csi_volume_registration" "this" {
|
||||||
|
depends_on = [data.nomad_plugin.smb]
|
||||||
|
plugin_id = "smb"
|
||||||
|
|
||||||
|
volume_id = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
|
||||||
|
name = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
|
||||||
|
external_id = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
|
||||||
|
|
||||||
|
capability {
|
||||||
|
access_mode = var.access_mode
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
}
|
||||||
|
|
||||||
|
context = {
|
||||||
|
source = "//betelgeuse-seven-unraid.lan/appdata"
|
||||||
|
subDir = var.name
|
||||||
|
}
|
||||||
|
|
||||||
|
secrets = {
|
||||||
|
"username" = "nomad"
|
||||||
|
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||||
|
}
|
||||||
|
}
|
||||||
62
2-nomad-config/modules/unraid_smb/main.tf
Normal file
62
2-nomad-config/modules/unraid_smb/main.tf
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
sops = {
|
||||||
|
source = "carlpett/sops"
|
||||||
|
version = "~> 0.5"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "name" {
|
||||||
|
description = "Name of the volume registration"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "id" {
|
||||||
|
description = "ID to use for the volume registration, defaults to name with - replaced by _"
|
||||||
|
type = string
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "share" {
|
||||||
|
description = "Name of the SMB share on the unraid server"
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "subDir" {
|
||||||
|
description = "Subdirectory within the SMB share"
|
||||||
|
type = string
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
|
data "nomad_plugin" "smb" {
|
||||||
|
plugin_id = "smb"
|
||||||
|
wait_for_healthy = true
|
||||||
|
}
|
||||||
|
|
||||||
|
data "sops_file" "secrets" {
|
||||||
|
source_file = "secrets/secrets.enc.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_csi_volume_registration" "this" {
|
||||||
|
depends_on = [data.nomad_plugin.smb]
|
||||||
|
plugin_id = "smb"
|
||||||
|
|
||||||
|
volume_id = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
|
||||||
|
name = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
|
||||||
|
external_id = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
|
||||||
|
|
||||||
|
capability {
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
}
|
||||||
|
|
||||||
|
context = merge({
|
||||||
|
source = "//betelgeuse-seven-unraid.lan/${var.share}"
|
||||||
|
}, var.subDir == null ? {} : { "subDir" = var.subDir })
|
||||||
|
|
||||||
|
secrets = {
|
||||||
|
"username" = "nomad"
|
||||||
|
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
||||||
|
}
|
||||||
|
}
|
||||||
100
2-nomad-config/ntfy.nomad.hcl
Normal file
100
2-nomad-config/ntfy.nomad.hcl
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
job "ntfy" {
|
||||||
|
group "ntfy" {
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
port "http" {
|
||||||
|
to = 80
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Consul Connect sidecar with upstream to postgres
|
||||||
|
service {
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
upstreams {
|
||||||
|
destination_name = "postgres"
|
||||||
|
local_bind_port = 5432
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "ntfy"
|
||||||
|
port = "http"
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.ntfy.middlewares=auth@file",
|
||||||
|
# Token subdomain bypasses Authelia — ntfy's own token auth is sufficient for API access
|
||||||
|
"traefik.http.routers.ntfy-token.rule=Host(`ntfy-2e30e5869ab6bfde4961012b48761a9b.othrayte.one`)",
|
||||||
|
]
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/healthz"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Users and tokens are provisioned declaratively via auth-users / auth-tokens in server.yml.
|
||||||
|
# ntfy reads and applies them on every startup — no poststart task, no race conditions.
|
||||||
|
#
|
||||||
|
# Bcrypt hashes are not secrets and are hardcoded below (same as /etc/shadow — safe to commit).
|
||||||
|
# Generate with: docker run --rm -it binwiederhier/ntfy user hash
|
||||||
|
# or: echo "mypassword" | docker run --rm -i binwiederhier/ntfy user hash
|
||||||
|
# For the diun account the plaintext is irrelevant (token-only auth); use a random password:
|
||||||
|
# openssl rand -base64 32 | docker run --rm -i binwiederhier/ntfy user hash
|
||||||
|
#
|
||||||
|
# Required SOPS keys:
|
||||||
|
# ntfy.database_pw — postgres password for the ntfy role
|
||||||
|
# diun.ntfy_token — access token for Diun (actual secret — grants write access)
|
||||||
|
# Must start with "tk_" and be exactly 32 chars total.
|
||||||
|
# Generate: tok=$(openssl rand -hex 15); echo "tk_${tok:0:29}"
|
||||||
|
task "ntfy" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "binwiederhier/ntfy:latest"
|
||||||
|
ports = ["http"]
|
||||||
|
command = "serve"
|
||||||
|
volumes = [
|
||||||
|
"local/server.yml:/etc/ntfy/server.yml",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
env = {
|
||||||
|
TZ = "Australia/Melbourne"
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
data = <<EOF
|
||||||
|
base-url: "https://ntfy.othrayte.one"
|
||||||
|
listen-http: ":80"
|
||||||
|
database-url: "postgres://ntfy:{{ with nomadVar "nomad/jobs/ntfy" }}{{ .database_pw }}{{ end }}@localhost:5432/ntfy"
|
||||||
|
auth-default-access: "deny-all"
|
||||||
|
behind-proxy: true
|
||||||
|
enable-login: true
|
||||||
|
auth-users:
|
||||||
|
- "admin:$2a$10$rLp4qagJnsA8Es5hQlISH.WrlzwMrXE2MBaEgz7zdd2lkAVu30lMy:admin"
|
||||||
|
- "diun:$2y$10$4wi1VG.Vp5p3Q2OEIXaTSOmwZm.G9dpNca9BqQRMdGGnk2yQqK3Gq:user"
|
||||||
|
auth-tokens:
|
||||||
|
- "diun:{{with nomadVar "nomad/jobs/ntfy"}}{{.diun_token}}{{end}}:Diun"
|
||||||
|
auth-access:
|
||||||
|
- "diun:diun:write-only"
|
||||||
|
EOF
|
||||||
|
destination = "local/server.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 50
|
||||||
|
memory = 64
|
||||||
|
memory_max = 128
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
22
2-nomad-config/ntfy.tf
Normal file
22
2-nomad-config/ntfy.tf
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
resource "nomad_job" "ntfy" {
|
||||||
|
jobspec = file("ntfy.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_variable" "ntfy" {
|
||||||
|
path = "nomad/jobs/ntfy"
|
||||||
|
items = {
|
||||||
|
database_pw = data.sops_file.secrets.data["ntfy.database_pw"]
|
||||||
|
diun_token = data.sops_file.secrets.data["diun.ntfy_token"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "postgresql_role" "ntfy" {
|
||||||
|
name = "ntfy"
|
||||||
|
password = data.sops_file.secrets.data["ntfy.database_pw"]
|
||||||
|
login = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "postgresql_database" "ntfy" {
|
||||||
|
name = "ntfy"
|
||||||
|
owner = postgresql_role.ntfy.name
|
||||||
|
}
|
||||||
116
2-nomad-config/openreader.nomad.hcl
Normal file
116
2-nomad-config/openreader.nomad.hcl
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
job "openreader" {
|
||||||
|
group "openreader" {
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
port "http" {
|
||||||
|
to = 3003
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Consul Connect sidecar with upstream to postgres
|
||||||
|
service {
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
upstreams {
|
||||||
|
destination_name = "postgres"
|
||||||
|
local_bind_port = 5432
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "openreader"
|
||||||
|
port = "http"
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.openreader.middlewares=auth@file",
|
||||||
|
]
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "openreader-api"
|
||||||
|
port = "http"
|
||||||
|
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {}
|
||||||
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "openreader" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "ghcr.io/richardr1126/openreader:v2.1.2"
|
||||||
|
ports = ["http"]
|
||||||
|
}
|
||||||
|
|
||||||
|
env = {
|
||||||
|
TZ = "Australia/Melbourne"
|
||||||
|
|
||||||
|
# Use embedded SeaweedFS for blob storage (data lives in /app/docstore/seaweedfs).
|
||||||
|
# Port 8333 is not exposed; browser uploads/downloads fall back through the app API.
|
||||||
|
USE_EMBEDDED_WEED_MINI = "true"
|
||||||
|
S3_ENDPOINT = "http://localhost:8333"
|
||||||
|
S3_FORCE_PATH_STYLE = "true"
|
||||||
|
|
||||||
|
# Auth is intentionally disabled (no BASE_URL / AUTH_SECRET set).
|
||||||
|
# Access is controlled by the Authelia middleware on the Traefik router above.
|
||||||
|
|
||||||
|
# To enable server-side library import from an Unraid share, add a second CSI volume
|
||||||
|
# mount for the share (e.g. unraid_media_books → /app/docstore/library:ro) and set:
|
||||||
|
# IMPORT_LIBRARY_DIR = "/app/docstore/library"
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
data = <<EOF
|
||||||
|
POSTGRES_URL=postgresql://openreader:{{ with nomadVar "nomad/jobs/openreader" }}{{ .database_pw }}{{ end }}@localhost:5432/openreader
|
||||||
|
EOF
|
||||||
|
destination = "secrets/openreader.env"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_openreader"
|
||||||
|
destination = "/app/docstore"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 200
|
||||||
|
memory = 750
|
||||||
|
memory_max = 1024
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_appdata_openreader" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_appdata_openreader"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["uid=1000", "gid=1000"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
26
2-nomad-config/openreader.tf
Normal file
26
2-nomad-config/openreader.tf
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
resource "nomad_job" "openreader" {
|
||||||
|
jobspec = file("openreader.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_variable" "openreader" {
|
||||||
|
path = "nomad/jobs/openreader"
|
||||||
|
items = {
|
||||||
|
database_pw = data.sops_file.secrets.data["openreader.database_pw"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "postgresql_role" "openreader" {
|
||||||
|
name = "openreader"
|
||||||
|
password = data.sops_file.secrets.data["openreader.database_pw"]
|
||||||
|
login = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "postgresql_database" "openreader" {
|
||||||
|
name = "openreader"
|
||||||
|
owner = postgresql_role.openreader.name
|
||||||
|
}
|
||||||
|
|
||||||
|
module "appdata_openreader" {
|
||||||
|
source = "./modules/appdata"
|
||||||
|
name = "openreader"
|
||||||
|
}
|
||||||
119
2-nomad-config/prowlarr.nomad.hcl
Normal file
119
2-nomad-config/prowlarr.nomad.hcl
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
job "prowlarr" {
|
||||||
|
group "prowlarr" {
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
port "http" {
|
||||||
|
to = 9696
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
upstreams {
|
||||||
|
destination_name = "postgres"
|
||||||
|
local_bind_port = 5432
|
||||||
|
}
|
||||||
|
upstreams {
|
||||||
|
destination_name = "sonarr-api"
|
||||||
|
local_bind_port = 8989
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "prowlarr"
|
||||||
|
port = "http"
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.prowlarr.middlewares=auth@file",
|
||||||
|
]
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "prowlarr-api"
|
||||||
|
port = "http"
|
||||||
|
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {}
|
||||||
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "prowlarr" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "lscr.io/linuxserver/prowlarr:latest"
|
||||||
|
ports = ["http"]
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
PUID = 1000
|
||||||
|
PGID = 1000
|
||||||
|
TZ = "Australia/Melbourne"
|
||||||
|
|
||||||
|
# https://wiki.servarr.com/prowlarr/postgres-setup
|
||||||
|
|
||||||
|
# Disable internal auth to use Traefik + Authelia
|
||||||
|
PROWLARR__AUTH__REQUIRED = "Enabled"
|
||||||
|
PROWLARR__AUTH__METHOD = "External"
|
||||||
|
|
||||||
|
PROWLARR__POSTGRES__USER = "prowlarr"
|
||||||
|
PROWLARR__POSTGRES__HOST = "localhost"
|
||||||
|
PROWLARR__POSTGRES__PORT = "5432"
|
||||||
|
PROWLARR__POSTGRES__MAINDB = "prowlarr-main"
|
||||||
|
PROWLARR__POSTGRES__LOGDB = "prowlarr-log"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_prowlarr"
|
||||||
|
destination = "/config"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 150
|
||||||
|
memory = 512
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
data = <<EOH
|
||||||
|
PROWLARR__POSTGRES__PASSWORD="{{ with nomadVar "nomad/jobs/prowlarr" }}{{ .database_pw }}{{ end }}"
|
||||||
|
EOH
|
||||||
|
destination = "secrets/db.env"
|
||||||
|
env = true # Load the file as environment variables
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_appdata_prowlarr" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_appdata_prowlarr"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["uid=1000", "gid=1000"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
32
2-nomad-config/prowlarr.tf
Normal file
32
2-nomad-config/prowlarr.tf
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
resource "nomad_job" "prowlarr" {
|
||||||
|
jobspec = file("prowlarr.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_variable" "prowlarr" {
|
||||||
|
path = "nomad/jobs/prowlarr"
|
||||||
|
items = {
|
||||||
|
database_pw = data.sops_file.secrets.data["prowlarr.database_pw"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# https://wiki.servarr.com/prowlarr/postgres-setup
|
||||||
|
resource "postgresql_role" "prowlarr" {
|
||||||
|
name = "prowlarr"
|
||||||
|
password = data.sops_file.secrets.data["prowlarr.database_pw"]
|
||||||
|
login = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "postgresql_database" "prowlarr_main" {
|
||||||
|
name = "prowlarr-main"
|
||||||
|
owner = postgresql_role.prowlarr.name
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "postgresql_database" "prowlarr_log" {
|
||||||
|
name = "prowlarr-log"
|
||||||
|
owner = postgresql_role.prowlarr.name
|
||||||
|
}
|
||||||
|
|
||||||
|
module "appdata_prowlarr" {
|
||||||
|
source = "./modules/appdata"
|
||||||
|
name = "prowlarr"
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Terraform State
|
# Terraform State
|
||||||
|
|
||||||
Mount the state on the fileshare to 2-nomad-config/.tfstate/
|
Mount the state on the fileshare to 2-nomad-config/.tfstate/
|
||||||
`sudo mount -t cifs //192.168.1.192/appdata/terraform /home/othrayte/Code/infra/2-nomad-config/.tfstate/ -o rw,username=othrayte,password=<pw>,uid=$(id -u),gid=$(id -g)`
|
`sudo mount -t cifs //betelgeuse-seven-unraid.lan/appdata/terraform /home/othrayte/Code/infra/2-nomad-config/.tfstate/ -o rw,username=othrayte,password=<pw>,uid=$(id -u),gid=$(id -g)`
|
||||||
|
|
||||||
# Tailscale Oauth Client
|
# Tailscale Oauth Client
|
||||||
|
|
||||||
@@ -20,6 +20,7 @@ Edit the secrets using `sops secrets/secrets.enc.json`
|
|||||||
# Bootstrapping (starting without PostgreSQL running)
|
# Bootstrapping (starting without PostgreSQL running)
|
||||||
|
|
||||||
terraform apply -target=module.data
|
terraform apply -target=module.data
|
||||||
|
terraform apply -target=module.ingress
|
||||||
|
|
||||||
## Restoring PostgreSQL DBs
|
## Restoring PostgreSQL DBs
|
||||||
|
|
||||||
|
|||||||
64
2-nomad-config/renovate.nomad.hcl
Normal file
64
2-nomad-config/renovate.nomad.hcl
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
job "renovate" {
|
||||||
|
type = "batch"
|
||||||
|
|
||||||
|
periodic {
|
||||||
|
cron = "0 4 * * *" # Daily at 4am
|
||||||
|
prohibit_overlap = true
|
||||||
|
}
|
||||||
|
|
||||||
|
group "renovate" {
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Consul Connect sidecar with upstream to Gitea (service: code-connect, port 3000)
|
||||||
|
service {
|
||||||
|
name = "renovate"
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
upstreams {
|
||||||
|
destination_name = "code-connect"
|
||||||
|
local_bind_port = 3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "renovate" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "renovate/renovate:latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
env = {
|
||||||
|
RENOVATE_PLATFORM = "gitea"
|
||||||
|
RENOVATE_ENDPOINT = "http://localhost:3000"
|
||||||
|
RENOVATE_GIT_URL = "endpoint"
|
||||||
|
RENOVATE_REPOSITORIES = "othrayte/infra"
|
||||||
|
RENOVATE_GIT_AUTHOR = "Renovate Bot <renovate@othrayte.one>"
|
||||||
|
LOG_LEVEL = "debug"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Required SOPS key:
|
||||||
|
# renovate.gitea_token — PAT for the renovate bot account in Gitea
|
||||||
|
# Create a dedicated 'renovate' user in Gitea with these token scopes:
|
||||||
|
# repo (read+write), user (read), issue (read+write), organization (read)
|
||||||
|
template {
|
||||||
|
data = <<EOF
|
||||||
|
RENOVATE_TOKEN={{ with nomadVar "nomad/jobs/renovate" }}{{ .gitea_token }}{{ end }}
|
||||||
|
EOF
|
||||||
|
destination = "secrets/renovate.env"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 500
|
||||||
|
memory = 512
|
||||||
|
memory_max = 1024
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
10
2-nomad-config/renovate.tf
Normal file
10
2-nomad-config/renovate.tf
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
resource "nomad_job" "renovate" {
|
||||||
|
jobspec = file("renovate.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_variable" "renovate" {
|
||||||
|
path = "nomad/jobs/renovate"
|
||||||
|
items = {
|
||||||
|
gitea_token = data.sops_file.secrets.data["renovate.gitea_token"]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -33,6 +33,28 @@
|
|||||||
"immich": {
|
"immich": {
|
||||||
"database_pw": "ENC[AES256_GCM,data:SUyMGqu7deZyZpVt,iv:asZehOvn/JamwFyS+Xl9Xpr4JFkKlJjHVw7LywYOxTc=,tag:plRvuv7+ievfEhxurBl7YQ==,type:str]"
|
"database_pw": "ENC[AES256_GCM,data:SUyMGqu7deZyZpVt,iv:asZehOvn/JamwFyS+Xl9Xpr4JFkKlJjHVw7LywYOxTc=,tag:plRvuv7+ievfEhxurBl7YQ==,type:str]"
|
||||||
},
|
},
|
||||||
|
"sonarr": {
|
||||||
|
"database_pw": "ENC[AES256_GCM,data:TN381ZYJLeUHX9U3Jnd9+w==,iv:lKaMYHeaSGXJd0/EGxkDY2l2v62xG3xs8TVC0HwXL94=,tag:3z5rK+2RfJHJdQc7KC9KmA==,type:str]"
|
||||||
|
},
|
||||||
|
"pia": {
|
||||||
|
"user": "ENC[AES256_GCM,data:kniAs2gCTq4=,iv:1Oaht02fFSQwzWmWEtjsJZCJChPJsZhwRyux8dMY2CU=,tag:NqWaUhuYTSFZZK/CpSisdg==,type:str]",
|
||||||
|
"pass": "ENC[AES256_GCM,data:c8qWGcaI0p7MyQ==,iv:/3ehYrgdDwjzFdXyX/vKTK+zt6u7gWNRZBIdWDG1KiE=,tag:jqfIMnB1OKchBZ4U2s1o4g==,type:str]"
|
||||||
|
},
|
||||||
|
"prowlarr": {
|
||||||
|
"database_pw": "ENC[AES256_GCM,data:FkW5LPoyn8bh0UfWcFq3og==,iv:SFq4Xsdz3FfCDyPjIaAmz5nsC/SPdFrR03GCr3KE/nw=,tag:PVYj7hSWDnfeE7igSXGBSA==,type:str]"
|
||||||
|
},
|
||||||
|
"frigate": {
|
||||||
|
"rtsp_password": "ENC[AES256_GCM,data:8vq06/IkNOUgpHmf,iv:lj8buuIC0ub0YOUiOiaN6tokkIT2/+bBwFNz2QXmCd4=,tag:EMm/bIHdJSAtjYAlrNOCMw==,type:str]"
|
||||||
|
},
|
||||||
|
"openreader": {
|
||||||
|
"database_pw": "ENC[AES256_GCM,data:2Ey9Ypb2Ked/LP/ApJhCqhKWuzognxVK7ku60nERp7I=,iv:KdLFD+fuNpYmPEU5G96SvFcQeZB0XlnOh/6uf7OfFqI=,tag:h7DQlqx5fxhiHuWyFd7svQ==,type:str]"
|
||||||
|
},
|
||||||
|
"ntfy": {
|
||||||
|
"database_pw": "ENC[AES256_GCM,data:79c2KFs3tcbet1dSGnkSDlAeKLCZrh4aMYLXTROM8w==,iv:eZ4limyjl++nsvHUzPKy82hfLZEOc+XQYpO6Czo/8os=,tag:iX9SiEACQ5IM8f1jhZh5Qw==,type:str]"
|
||||||
|
},
|
||||||
|
"renovate": {
|
||||||
|
"gitea_token": "ENC[AES256_GCM,data:/J3CDMgWZLe20oQ+ENKBMi8fs/+jgsARV7xihMq0OLmRk8C8ae/IXg==,iv:e7WYOanSOCZ/LhN6SKrH0VrR3xLPTTppOKpGpSl+oAc=,tag:XBAilRdK3jL7WtM+92Fsmg==,type:str]"
|
||||||
|
},
|
||||||
"sops": {
|
"sops": {
|
||||||
"age": [
|
"age": [
|
||||||
{
|
{
|
||||||
@@ -40,8 +62,8 @@
|
|||||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByUWM4ZDVVbGFrUGdMRHBX\nUFBmU3Nlc0RBSzhFK0tHNHpkQXUvUVdiZUZJCmpRN1lFdENpWW0rcThjVlVQNUl6\nWnlLU0RnQ3FZby81Ly8xTFBrek9nMncKLS0tIFQ4UTRNOC9CRmx4OFJWem1wckZz\nUDFTSzdWZldFK3FqcTNWTWRyNDhHQ2MKS811mR5xn7qiC/aVgPFYJ5c6Q3zxRfcr\nHcvxUvB01vNJKZpRg92vvKPkV6lQO3DXCT98OdfwiymlEOvYxg71Pg==\n-----END AGE ENCRYPTED FILE-----\n"
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByUWM4ZDVVbGFrUGdMRHBX\nUFBmU3Nlc0RBSzhFK0tHNHpkQXUvUVdiZUZJCmpRN1lFdENpWW0rcThjVlVQNUl6\nWnlLU0RnQ3FZby81Ly8xTFBrek9nMncKLS0tIFQ4UTRNOC9CRmx4OFJWem1wckZz\nUDFTSzdWZldFK3FqcTNWTWRyNDhHQ2MKS811mR5xn7qiC/aVgPFYJ5c6Q3zxRfcr\nHcvxUvB01vNJKZpRg92vvKPkV6lQO3DXCT98OdfwiymlEOvYxg71Pg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"lastmodified": "2025-10-13T12:19:46Z",
|
"lastmodified": "2026-04-18T06:07:22Z",
|
||||||
"mac": "ENC[AES256_GCM,data:QJ1Prqf37xMZbvyMvjBVxZOiOr07CmCYrWmr+5hwDsEmG4eEC9sPF/UY+/Cy2OTzsMp+cHb6C3maAo09O171wj6nJIZucg3B9fjEW2+4AoO217G4vmauMl3FFkut2CuvVV9zt2B/fLAskRg/yeYYOhjzPkWA6lyeV31sV5ZQ6Kw=,iv:5WfkmNr5vdfTqp6+INjQN/Zmc7/iJNc/2auO9h3En08=,tag:snBgJyMzBXVAkV3zERkK8g==,type:str]",
|
"mac": "ENC[AES256_GCM,data:4UJMEZNS8HXtn1vIJ+qCkBJL5oh3Jp7wbaGm5hMiSNPseNq3smTYmMgh+TNK4t/K7yxfukTuhEFTUsnMfi7rIxTrbCpcTdnqJSYm/iflkdA57Tx+mHpY9iG7wtRmObow18Ea0rj6foMu+1V8pVFomzhc/ipafinTCuqodKW1a2Y=,iv:Gu/Lh8mir36ltN++qJg122ry+eJA0GKSrfijulYM7q4=,tag:5tjBzrCZcQUvc76No+E9Ow==,type:str]",
|
||||||
"encrypted_regex": "^(.*)$",
|
"encrypted_regex": "^(.*)$",
|
||||||
"version": "3.10.2"
|
"version": "3.10.2"
|
||||||
}
|
}
|
||||||
|
|||||||
140
2-nomad-config/sonarr.nomad.hcl
Normal file
140
2-nomad-config/sonarr.nomad.hcl
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
job "sonarr" {
|
||||||
|
group "sonarr" {
|
||||||
|
network {
|
||||||
|
mode = "bridge"
|
||||||
|
port "http" {
|
||||||
|
to = 8989
|
||||||
|
}
|
||||||
|
}
|
||||||
|
service {
|
||||||
|
connect {
|
||||||
|
sidecar_service {
|
||||||
|
proxy {
|
||||||
|
upstreams {
|
||||||
|
destination_name = "postgres"
|
||||||
|
local_bind_port = 5432
|
||||||
|
}
|
||||||
|
upstreams {
|
||||||
|
destination_name = "deluge-api"
|
||||||
|
local_bind_port = 8112
|
||||||
|
}
|
||||||
|
upstreams {
|
||||||
|
destination_name = "prowlarr-api"
|
||||||
|
local_bind_port = 9696
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "sonarr"
|
||||||
|
port = "http"
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.sonarr.middlewares=auth@file",
|
||||||
|
]
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "sonarr-api"
|
||||||
|
port = "http"
|
||||||
|
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
|
||||||
|
|
||||||
|
connect {
|
||||||
|
sidecar_service {}
|
||||||
|
}
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "sonarr" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "lscr.io/linuxserver/sonarr:latest"
|
||||||
|
ports = ["http"]
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
PUID = 1000
|
||||||
|
PGID = 1000
|
||||||
|
TZ = "Australia/Melbourne"
|
||||||
|
|
||||||
|
# https://wiki.servarr.com/sonarr/environment-variables
|
||||||
|
|
||||||
|
# Disable internal auth to use Traefik + Authelia
|
||||||
|
SONARR__AUTH__REQUIRED = "Enabled"
|
||||||
|
SONARR__AUTH__METHOD = "External"
|
||||||
|
|
||||||
|
SONARR__POSTGRES__USER = "sonarr"
|
||||||
|
SONARR__POSTGRES__HOST = "localhost"
|
||||||
|
SONARR__POSTGRES__PORT = "5432"
|
||||||
|
SONARR__POSTGRES__MAINDB = "sonarr-main"
|
||||||
|
SONARR__POSTGRES__LOGDB = "sonarr-log"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_sonarr"
|
||||||
|
destination = "/config"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_media_sonarr"
|
||||||
|
destination = "/data"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 150
|
||||||
|
memory = 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
data = <<EOH
|
||||||
|
SONARR__POSTGRES__PASSWORD="{{ with nomadVar "nomad/jobs/sonarr" }}{{ .database_pw }}{{ end }}"
|
||||||
|
EOH
|
||||||
|
destination = "secrets/db.env"
|
||||||
|
env = true # Load the file as environment variables
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_appdata_sonarr" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_appdata_sonarr"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["uid=1000", "gid=1000"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volume "unraid_media_sonarr" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_media_sonarr"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["nobrl", "uid=1000", "gid=1000"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
38
2-nomad-config/sonarr.tf
Normal file
38
2-nomad-config/sonarr.tf
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
resource "nomad_job" "sonarr" {
|
||||||
|
jobspec = file("sonarr.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "nomad_variable" "sonarr" {
|
||||||
|
path = "nomad/jobs/sonarr"
|
||||||
|
items = {
|
||||||
|
database_pw = data.sops_file.secrets.data["sonarr.database_pw"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# https://wiki.servarr.com/sonarr/postgres-setup#schema-creation
|
||||||
|
resource "postgresql_role" "sonarr" {
|
||||||
|
name = "sonarr"
|
||||||
|
password = data.sops_file.secrets.data["sonarr.database_pw"]
|
||||||
|
login = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "postgresql_database" "sonarr_main" {
|
||||||
|
name = "sonarr-main"
|
||||||
|
owner = postgresql_role.sonarr.name
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "postgresql_database" "sonarr_log" {
|
||||||
|
name = "sonarr-log"
|
||||||
|
owner = postgresql_role.sonarr.name
|
||||||
|
}
|
||||||
|
|
||||||
|
module "appdata_sonarr" {
|
||||||
|
source = "./modules/appdata"
|
||||||
|
name = "sonarr"
|
||||||
|
}
|
||||||
|
|
||||||
|
module "unraid_smb_sonarr_media" {
|
||||||
|
source = "./modules/unraid_smb"
|
||||||
|
name = "sonarr"
|
||||||
|
share = "media"
|
||||||
|
}
|
||||||
@@ -1,345 +0,0 @@
|
|||||||
job "teamsstatus" {
|
|
||||||
group "app" {
|
|
||||||
task "teamsstatus" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "python:3.11-slim"
|
|
||||||
command = "/local/start.sh"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Template for the startup script
|
|
||||||
template {
|
|
||||||
data = <<EOF
|
|
||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd /local
|
|
||||||
|
|
||||||
echo "Starting Teams Status Updater service..."
|
|
||||||
echo "Installing required Python packages..."
|
|
||||||
pip install msal requests
|
|
||||||
|
|
||||||
echo "Running Teams Status Updater script..."
|
|
||||||
exec python teamsstatus_standalone.py 2>&1
|
|
||||||
EOF
|
|
||||||
destination = "local/start.sh"
|
|
||||||
perms = "755"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Template for the token cache
|
|
||||||
template {
|
|
||||||
data = "{{ with nomadVar \"nomad/jobs/teamsstatus\" }}{{ .token_cache_json }}{{ end }}"
|
|
||||||
destination = "local/token_cache.json"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Template for the Python script
|
|
||||||
template {
|
|
||||||
data = <<EOF
|
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
from datetime import datetime, timedelta, timezone
|
|
||||||
import random
|
|
||||||
import json
|
|
||||||
import msal
|
|
||||||
import requests
|
|
||||||
import os
|
|
||||||
import atexit
|
|
||||||
|
|
||||||
# Configure logging
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
# Embedded journey data
|
|
||||||
JOURNEY_DATA = '''Day Start Time AEST End Time AEST Start Dist End Dist Start Location End Location
|
|
||||||
08/06/2025 08:00:00 19:10:00 0km 973km Melbourne Port Pirie SA
|
|
||||||
09/06/2025 07:30:00 19:35:00 973km 2119km Port Pirie SA Mundrabilla WA
|
|
||||||
10/06/2025 06:45:00 15:38:00 2119km 2916km Mundrabilla WA Kalgoorlie WA
|
|
||||||
11/06/2025 10:45:00 17:55:00 2916km 3512km Kalgoorlie WA Perth
|
|
||||||
13/06/2025 07:00:00 13:58:00 3512km 4083km Perth Kalbarri WA
|
|
||||||
15/06/2025 07:00:00 16:52:00 4083km 4862km Kalbarri WA Coral Bay WA
|
|
||||||
18/06/2025 06:00:00 16:52:00 4862km 5554km Coral Bay WA Karijini NP WA
|
|
||||||
21/06/2025 14:00:00 15:21:00 5554km 5686km Karijini NP WA Karijini NP WA
|
|
||||||
22/06/2025 06:00:00 16:23:00 5686km 6559km Karijini NP WA Broome WA
|
|
||||||
23/06/2025 06:00:00 19:10:00 6559km 7688km Broome WA Kununurra WA
|
|
||||||
27/06/2025 06:00:00 16:29:00 7688km 8593km Kununurra WA Derby WA
|
|
||||||
28/06/2025 07:00:00 16:06:00 8593km 9358km Derby WA Port Hedland WA
|
|
||||||
29/06/2025 07:00:00 16:31:00 9358km 10150km Port Hedland WA Exmouth WA
|
|
||||||
02/07/2025 07:00:00 15:13:00 10150km 10866km Exmouth WA Shark Bay WA
|
|
||||||
05/07/2025 07:00:00 17:12:00 10866km 11712km Shark Bay WA Fremantle WA
|
|
||||||
06/07/2025 07:00:00 15:27:00 11712km 12411km Fremantle WA Esperance WA
|
|
||||||
08/07/2025 06:00:00 18:09:00 12411km 13144km Esperance WA Madura WA
|
|
||||||
09/07/2025 06:45:00 16:39:00 13144km 13821km Madura WA Ceduna SA
|
|
||||||
11/07/2025 08:30:00 17:46:00 13821km 14599km Ceduna SA Adelaide
|
|
||||||
12/07/2025 08:30:00 18:52:00 14599km 15348km Adelaide Melbourne'''
|
|
||||||
|
|
||||||
# Global variables for journey segments
|
|
||||||
_segments = []
|
|
||||||
|
|
||||||
def setup_token_cache(cache_filename="token_cache.json"):
|
|
||||||
"""Set up and return a serializable token cache"""
|
|
||||||
cache = msal.SerializableTokenCache()
|
|
||||||
|
|
||||||
cache.deserialize(open(cache_filename, "r").read())
|
|
||||||
|
|
||||||
atexit.register(
|
|
||||||
lambda: open(cache_filename, "w").write(cache.serialize())
|
|
||||||
if cache.has_state_changed else None
|
|
||||||
)
|
|
||||||
return cache
|
|
||||||
|
|
||||||
def get_msal_app(client_id, authority="https://login.microsoftonline.com/organizations", cache_filename="token_cache.json"):
|
|
||||||
"""Create and return an MSAL PublicClientApplication"""
|
|
||||||
cache = setup_token_cache(cache_filename)
|
|
||||||
return msal.PublicClientApplication(
|
|
||||||
client_id,
|
|
||||||
authority=authority,
|
|
||||||
token_cache=cache,
|
|
||||||
)
|
|
||||||
|
|
||||||
def acquire_token(app, scope):
|
|
||||||
"""Acquire a token using the MSAL app"""
|
|
||||||
result = None
|
|
||||||
|
|
||||||
# Check if user account exists in cache
|
|
||||||
accounts = app.get_accounts(username=None)
|
|
||||||
if accounts:
|
|
||||||
logging.debug("Account(s) exists in cache, probably with token too. Let's try.")
|
|
||||||
logging.debug("Account(s) already signed in:")
|
|
||||||
for a in accounts:
|
|
||||||
logging.debug(a["username"])
|
|
||||||
chosen = accounts[0] # Assuming the end user chose this one to proceed
|
|
||||||
logging.debug(f"Automatically using first account: {chosen['username']}")
|
|
||||||
# Try to find a token in cache for this account
|
|
||||||
result = app.acquire_token_silent(scope, account=chosen)
|
|
||||||
|
|
||||||
# If no suitable token was found, get a new one
|
|
||||||
if not result:
|
|
||||||
logging.debug("No suitable token exists in cache. Let's get a new one from AAD.")
|
|
||||||
print("A local browser window will be open for you to sign in. CTRL+C to cancel.")
|
|
||||||
result = app.acquire_token_interactive(scope)
|
|
||||||
|
|
||||||
# Validate the result
|
|
||||||
if "access_token" not in result:
|
|
||||||
logging.error(result.get("error"))
|
|
||||||
logging.error(result.get("error_description"))
|
|
||||||
logging.debug(f"Correlation ID: {result.get('correlation_id')}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
return result["access_token"]
|
|
||||||
|
|
||||||
def set_teams_status_message(access_token, user_id, status_message, expiration_date_time="2025-06-01T12:00:00", time_zone="UTC"):
|
|
||||||
"""Set the status message for a Teams user"""
|
|
||||||
url = f"https://graph.microsoft.com/v1.0/users/{user_id}/presence/microsoft.graph.setStatusMessage"
|
|
||||||
|
|
||||||
payload = {
|
|
||||||
"statusMessage": {
|
|
||||||
"message": {
|
|
||||||
"content": status_message + "<pinnednote></pinnednote>",
|
|
||||||
"contentType": "text",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"expirationDateTime": {
|
|
||||||
"dateTime": expiration_date_time,
|
|
||||||
"timeZone": time_zone
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Authorization': f'Bearer {access_token}',
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
logging.debug(f"Setting status message for user {user_id}")
|
|
||||||
|
|
||||||
response = requests.post(url, json=payload, headers=headers)
|
|
||||||
|
|
||||||
if response.status_code == 200:
|
|
||||||
logging.info(f"Teams status message set to: {status_message}")
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
logging.error(f"Failed to set Teams status message: {response.status_code}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _load_segments():
|
|
||||||
"""Load the journey segments from embedded data into memory"""
|
|
||||||
global _segments
|
|
||||||
if _segments: # Already loaded
|
|
||||||
return
|
|
||||||
|
|
||||||
aest = timezone(timedelta(hours=10))
|
|
||||||
|
|
||||||
for line in JOURNEY_DATA.split('\n')[1:]: # Skip header
|
|
||||||
day, start_time, end_time, start_dist, end_dist, start_loc, end_loc = line.strip().split('\t')
|
|
||||||
|
|
||||||
# Convert day and times to datetime in AEST
|
|
||||||
day_start = datetime.strptime(f"{day} {start_time}", "%d/%m/%Y %H:%M:%S").replace(tzinfo=aest)
|
|
||||||
day_end = datetime.strptime(f"{day} {end_time}", "%d/%m/%Y %H:%M:%S").replace(tzinfo=aest)
|
|
||||||
|
|
||||||
# Extract the numeric distance values
|
|
||||||
start_dist = int(start_dist.rstrip('km'))
|
|
||||||
end_dist = int(end_dist.rstrip('km'))
|
|
||||||
|
|
||||||
_segments.append({
|
|
||||||
'start_time': day_start,
|
|
||||||
'end_time': day_end,
|
|
||||||
'start_dist': start_dist,
|
|
||||||
'end_dist': end_dist,
|
|
||||||
'start_location': start_loc,
|
|
||||||
'end_location': end_loc
|
|
||||||
})
|
|
||||||
|
|
||||||
def get_trip_info(target_datetime):
|
|
||||||
"""Determine the distance travelled and locations for the current datetime."""
|
|
||||||
if target_datetime.tzinfo is None:
|
|
||||||
raise ValueError("target_datetime must be timezone-aware")
|
|
||||||
|
|
||||||
# Ensure data is loaded
|
|
||||||
_load_segments()
|
|
||||||
|
|
||||||
# Before journey starts
|
|
||||||
if not _segments or target_datetime < _segments[0]['start_time']:
|
|
||||||
start_loc = end_loc = _segments[0]['start_location']
|
|
||||||
return (0, start_loc, end_loc)
|
|
||||||
|
|
||||||
# During journey
|
|
||||||
for i, segment in enumerate(_segments):
|
|
||||||
# If target is before this segment starts
|
|
||||||
if target_datetime < segment['start_time']:
|
|
||||||
prev_segment = _segments[i-1]
|
|
||||||
return (prev_segment['end_dist'], prev_segment['end_location'], prev_segment['end_location'])
|
|
||||||
|
|
||||||
# If target is during this segment, interpolate
|
|
||||||
if segment['start_time'] <= target_datetime <= segment['end_time']:
|
|
||||||
# Calculate what fraction of the segment has elapsed
|
|
||||||
total_seconds = (segment['end_time'] - segment['start_time']).total_seconds()
|
|
||||||
elapsed_seconds = (target_datetime - segment['start_time']).total_seconds()
|
|
||||||
fraction = elapsed_seconds / total_seconds
|
|
||||||
|
|
||||||
# Interpolate the distance
|
|
||||||
distance_delta = segment['end_dist'] - segment['start_dist']
|
|
||||||
current_dist = segment['start_dist'] + int(distance_delta * fraction)
|
|
||||||
return (current_dist, segment['start_location'], segment['end_location'])
|
|
||||||
|
|
||||||
# Between segments
|
|
||||||
if i < len(_segments) - 1:
|
|
||||||
next_segment = _segments[i + 1]
|
|
||||||
if segment['end_time'] < target_datetime < next_segment['start_time']:
|
|
||||||
return (segment['end_dist'], segment['end_location'], segment['end_location'])
|
|
||||||
|
|
||||||
# After journey ends
|
|
||||||
return (_segments[-1]['end_dist'], _segments[-1]['end_location'], _segments[-1]['end_location'])
|
|
||||||
|
|
||||||
def build_message(distance, start_loc, end_loc):
|
|
||||||
"""Build the status message based on distance and locations"""
|
|
||||||
message = "On leave"
|
|
||||||
if distance > 13144:
|
|
||||||
message += f", driving my EV back from WA"
|
|
||||||
elif distance > 2118:
|
|
||||||
message += f", driving my EV around WA"
|
|
||||||
elif distance > 0:
|
|
||||||
message += f", driving my EV to WA"
|
|
||||||
|
|
||||||
if distance > 0:
|
|
||||||
distance += random.randint(-5, 5)
|
|
||||||
message += f", {distance}kms travelled so far"
|
|
||||||
if start_loc != end_loc:
|
|
||||||
message += f", next stop {end_loc}"
|
|
||||||
else:
|
|
||||||
message += f", near {start_loc}"
|
|
||||||
|
|
||||||
message += ", returning July 21st. Contacts {CIM: Grant Gorfine, Inserts: Daniel Pate, DevOps: Rob Duncan, else: Andrian Zubovic}"
|
|
||||||
return message
|
|
||||||
|
|
||||||
def main():
|
|
||||||
test_mode = False # Set to True to run in test mode
|
|
||||||
time_scale = 1 # 1/600 # Set to 1/60 to run at 1 second per minute, 1 for normal speed
|
|
||||||
|
|
||||||
# Set start time to 7:30 AM AEST (UTC+10) on June 8th, 2025
|
|
||||||
aest = timezone(timedelta(hours=10))
|
|
||||||
start_time = datetime.now(aest)
|
|
||||||
date_offset = datetime(2025, 6, 8, 7, 30, 0, tzinfo=aest) - start_time
|
|
||||||
|
|
||||||
if test_mode:
|
|
||||||
logging.info("Running in test mode - status messages will not actually be set")
|
|
||||||
|
|
||||||
app = get_msal_app(client_id = "e6cda941-949f-495e-88f5-10eb45ffa0e7")
|
|
||||||
|
|
||||||
last_token_refresh = 0
|
|
||||||
# Token refresh interval (60 minutes in seconds)
|
|
||||||
TOKEN_REFRESH_INTERVAL = int(60 * 60) # Scale the 1 hour refresh interval
|
|
||||||
|
|
||||||
old_distance = -1
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# Check if we need to refresh the token
|
|
||||||
current_time = time.time()
|
|
||||||
if current_time - last_token_refresh >= TOKEN_REFRESH_INTERVAL or last_token_refresh == 0:
|
|
||||||
logging.info("Acquiring/refreshing access token...")
|
|
||||||
access_token = acquire_token(app, scope = ["https://graph.microsoft.com/Presence.ReadWrite"])
|
|
||||||
if not access_token:
|
|
||||||
logging.error("Failed to acquire token")
|
|
||||||
exit(1)
|
|
||||||
last_token_refresh = current_time
|
|
||||||
logging.info("Token successfully refreshed")
|
|
||||||
|
|
||||||
# Set the status message
|
|
||||||
now = datetime.now(aest) # Get current time in AEST
|
|
||||||
if time_scale != 1:
|
|
||||||
# Adjust the current time based on the time scale
|
|
||||||
now = start_time + (now - start_time) / time_scale
|
|
||||||
now += date_offset # Adjust to the target start time
|
|
||||||
distance, start_loc, end_loc = get_trip_info(now) # We only need distance for comparison
|
|
||||||
if distance != old_distance:
|
|
||||||
message = build_message(distance, start_loc, end_loc)
|
|
||||||
timestamp = now.strftime("%Y-%m-%d %H:%M:%S %Z")
|
|
||||||
if not test_mode:
|
|
||||||
logging.info(f"[{timestamp}] Message: {message}")
|
|
||||||
success = set_teams_status_message(
|
|
||||||
access_token = access_token,
|
|
||||||
user_id = "1b625872-d8a8-42f4-b237-dfa6d8062360",
|
|
||||||
status_message = message,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logging.info(f"[TEST MODE] [{timestamp}] Message: {message}")
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
logging.debug("Status message has not changed, skipping update")
|
|
||||||
success = True
|
|
||||||
old_distance = distance
|
|
||||||
|
|
||||||
if success:
|
|
||||||
wait_time = 900 * time_scale # Scale the 15 minute wait time
|
|
||||||
logging.debug(f"Waiting {wait_time} seconds before updating status message again...")
|
|
||||||
time.sleep(wait_time)
|
|
||||||
else:
|
|
||||||
last_token_refresh = 0 # Reset token refresh time on failure
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
logging.info("Status update interrupted by user. Exiting...")
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"An error occurred: {e}")
|
|
||||||
time.sleep(300) # Wait 5 minutes before retrying
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
exit(main())
|
|
||||||
|
|
||||||
EOF
|
|
||||||
destination = "local/teamsstatus_standalone.py"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 500
|
|
||||||
memory = 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
restart {
|
|
||||||
attempts = 3
|
|
||||||
interval = "5m"
|
|
||||||
delay = "15s"
|
|
||||||
mode = "fail"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
# Disabled
|
|
||||||
|
|
||||||
# resource "nomad_job" "teamsstatus" {
|
|
||||||
# jobspec = file("${path.module}/teamsstatus.nomad.hcl")
|
|
||||||
# }
|
|
||||||
|
|
||||||
# resource "nomad_variable" "teamsstatus" {
|
|
||||||
# path = "nomad/jobs/teamsstatus"
|
|
||||||
# items = {
|
|
||||||
# token_cache_json = file("${path.module}/token_cache.json")
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
@@ -2,53 +2,14 @@ resource "nomad_job" "transfer" {
|
|||||||
jobspec = file("transfer.nomad.hcl")
|
jobspec = file("transfer.nomad.hcl")
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_transfer" {
|
module "unraid_smb_transfer" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "./modules/unraid_smb"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "transfer"
|
||||||
plugin_id = "smb"
|
id = "unraid_transfer"
|
||||||
|
share = "transfer"
|
||||||
volume_id = "unraid_transfer"
|
|
||||||
name = "unraid_transfer"
|
|
||||||
|
|
||||||
external_id = "unraid_transfer"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "single-node-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/transfer"
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "anon"
|
|
||||||
"password" = ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "nomad_csi_volume_registration" "unraid_appdata_transferfilebrowser" {
|
module "appdata_transferfilebrowser" {
|
||||||
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
|
source = "./modules/appdata"
|
||||||
depends_on = [data.nomad_plugin.smb]
|
name = "transferfilebrowser"
|
||||||
plugin_id = "smb"
|
|
||||||
|
|
||||||
volume_id = "unraid_appdata_transferfilebrowser"
|
|
||||||
name = "unraid_appdata_transferfilebrowser"
|
|
||||||
|
|
||||||
external_id = "unraid_appdata_transferfilebrowser"
|
|
||||||
|
|
||||||
capability {
|
|
||||||
access_mode = "single-node-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
|
|
||||||
context = {
|
|
||||||
source = "//192.168.1.192/appdata"
|
|
||||||
subDir = "transferfilebrowser" # Note: Needs to be manually created on the share
|
|
||||||
}
|
|
||||||
|
|
||||||
secrets = {
|
|
||||||
"username" = "nomad"
|
|
||||||
"password" = data.sops_file.secrets.data["unraid.nomad"]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
50
2-nomad-config/unifi.nomad.hcl
Normal file
50
2-nomad-config/unifi.nomad.hcl
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
job "unifi-network" {
|
||||||
|
group "unifi-network" {
|
||||||
|
count = 1
|
||||||
|
|
||||||
|
task "unifi-controller" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "jacobalberty/unifi:v9.5.21"
|
||||||
|
|
||||||
|
// Fixed IP on the actual network so that devices can find it
|
||||||
|
network_mode = "macvlan"
|
||||||
|
ipv4_address = "192.168.1.50"
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
TZ = "Australia/Melbourne"
|
||||||
|
SYSTEM_IP = "192.168.1.50"
|
||||||
|
JVM_INIT_HEAP_SIZE = "1024M"
|
||||||
|
JVM_MAX_HEAP_SIZE = "1024M"
|
||||||
|
UNIFI_STDOUT = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_mount {
|
||||||
|
volume = "unraid_appdata_unifi_network"
|
||||||
|
destination = "/unifi" # Expected root directory (contains data, log, cert subdirs)
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 200
|
||||||
|
memory = 1850
|
||||||
|
memory_max = 2500
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# CSI volume for UniFi Controller persistent data/logs
|
||||||
|
volume "unraid_appdata_unifi_network" {
|
||||||
|
type = "csi"
|
||||||
|
read_only = false
|
||||||
|
source = "unraid_appdata_unifi_network"
|
||||||
|
access_mode = "single-node-writer"
|
||||||
|
attachment_mode = "file-system"
|
||||||
|
|
||||||
|
mount_options {
|
||||||
|
mount_flags = ["uid=0", "gid=0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
9
2-nomad-config/unifi.tf
Normal file
9
2-nomad-config/unifi.tf
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
resource "nomad_job" "unifi_network" {
|
||||||
|
jobspec = file("unifi.nomad.hcl")
|
||||||
|
}
|
||||||
|
|
||||||
|
module "appdata_unifi_network" {
|
||||||
|
source = "./modules/appdata"
|
||||||
|
name = "unifi-network"
|
||||||
|
}
|
||||||
15
renovate.json
Normal file
15
renovate.json
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||||
|
"extends": ["config:recommended"],
|
||||||
|
"customManagers": [
|
||||||
|
{
|
||||||
|
"description": "Update Docker image tags in Nomad job files",
|
||||||
|
"customType": "regex",
|
||||||
|
"fileMatch": ["\\.nomad\\.hcl$"],
|
||||||
|
"matchStrings": [
|
||||||
|
"image\\s*=\\s*\"(?<depName>[^:\"]+):(?<currentValue>[^\"]+)\""
|
||||||
|
],
|
||||||
|
"datasourceTemplate": "docker"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user