64 Commits

Author SHA1 Message Date
4acfdf4f1d Update litestream/litestream Docker tag to v0.5.11 2026-04-18 06:17:26 +00:00
a30e60b557 Add custom managers to renovate.json for Docker image updates 2026-04-18 16:15:18 +10:00
2536e855e5 Add renovate.json 2026-04-18 16:14:43 +10:00
dbe11dc8fa Add renovate and ntfy (unrelated) 2026-04-18 16:14:43 +10:00
b27f3e58ca Add openreader 2026-04-18 11:37:18 +10:00
5b3f2cf8f4 Move frigate into the cluster and enable GPU detector 2026-03-28 17:13:09 +11:00
68cf58ead5 Remove some state moved declarations 2025-11-20 23:45:56 +11:00
d3ac8a252b Make appdata and unraid smb modules to reduce duplication 2025-11-20 23:45:16 +11:00
961ec128f1 Setup prowlarr 2025-11-19 20:49:05 +11:00
d84eb73db0 Connect sonarr to deluge 2025-11-19 20:46:38 +11:00
a3eaab5a07 Add deluge bittorrent client 2025-11-10 23:27:50 +11:00
cf4daacab5 Add jellyfin
Customised the forward auth in authelia to ignore Authorization headers as authelia was failing to parse the Authorization: MediaBrowser headers that jellyfin uses.
2025-11-10 19:24:21 +11:00
2a038e59e8 Add sonarr 2025-11-09 15:51:25 +11:00
443d614a66 Use the hostname for the unraid server rather than the IP 2025-11-07 19:42:38 +11:00
2803f694e8 Add Unifi Network 2025-11-06 19:30:42 +11:00
bbff0f6692 Remove no longer used teams status job 2025-10-22 22:05:25 +11:00
bd815e96c6 Complete migration now that the changes are applied. 2025-10-22 22:04:37 +11:00
92f60a7572 Sort parts of the nomad intra into folders
This should make finding things easier
2025-10-22 22:02:25 +11:00
8869bd1cb2 Stop terraform thinking some settings are inconsistent 2025-10-21 21:47:39 +11:00
c473ef68fd Move secrets to subfolder 2025-10-21 21:47:05 +11:00
fd7fdd00f3 Add more nodes and update them all to the latest config 2025-10-21 21:41:15 +11:00
8a375c0133 Remove noauth setting that is probably wrong as it doesn't work 2025-10-21 21:35:35 +11:00
7302842add Refine install instructions 2025-10-20 20:52:15 +11:00
50cddcd033 Add new servers to consul retry join list 2025-10-20 20:51:22 +11:00
307f27a10b Use ipv4 for nomad and consul to reduce connectivity issues on my network 2025-10-20 20:25:58 +11:00
700046cfd1 Remove unused strip-magic-token middleware from Traefik configuration 2025-10-20 20:25:19 +11:00
c3f25d1bc5 Prepare for adding LattePanda IOTA based cluster hosts.
This required the option to configure the available CPU as nomad assumes that we should only allocate based on the base CPU frequency but the N150 reports 800Mhz base but has a boost frequency more than 4x higher at 3.6GHz which would leave the CPU under utilised, instead we allocate at 1.8GHz (x4 cores).
2025-10-15 23:38:11 +11:00
cdf2a6b72e Fixup: traefik requires the certificate secrets to be protected. Made the same change on the actual storage host. 2025-10-14 22:07:48 +11:00
f7c2752192 Explicitly enable info level logging in traefik to make it easier to find where to set it to debug level 2025-10-14 20:11:56 +11:00
bf98fb84be Make sure there is always one traefik instance up when changing the configuration so we can still access nomad via it 2025-10-14 20:11:22 +11:00
2d931f9cfa Add kopia access to traefik 2025-10-14 20:10:32 +11:00
3cc6f4827d Route to traefik directly via localhost due to in ability to route via the external port 2025-10-13 21:23:42 +11:00
4a5ad8d36b Setup storage of immich resouces and add tailscale access to allow uploading files too large to go over cloudflare (<100mb) See https://github.com/immich-app/immich/issues/17729 and https://github.com/immich-app/immich/pull/22385 2025-10-13 20:48:35 +11:00
e6c6bb0c91 Resolve authelia via consul service mesh 2025-10-12 20:58:58 +11:00
e2562ce4b0 Add an immich server 2025-10-11 14:54:07 +11:00
b53bfe4075 Improve resilience by changing routing to traefik and setting up more servers.
Some changes were required to set 3 VMs as the cluster since the NUC failed and we are waiting for new hardware to arrive.
The ingest routing from the internet was changed to use cloudflared tunnel to traefik instead of via a specific host.
2025-10-11 14:46:06 +11:00
7f3161b2bb Add magic token domain for hass to allow app access 2025-10-04 14:36:58 +10:00
facc3c64b2 Route frigate.othrayte.one to internal frigate instance 2025-10-04 14:18:16 +10:00
d64fec4bc0 Disable Nomad job configuration for Teams Status 2025-10-04 13:58:44 +10:00
8b234b8322 Fix bug in teams status (was fixed long ago) 2025-10-04 13:51:26 +10:00
612d322c4f Update session management settings for Authelia: extend inactivity duration, adjust expiration time, and set remember_me period. 2025-10-04 13:46:56 +10:00
bc2bd41018 Add camera locations and IP addresses to documentation 2025-10-04 13:46:29 +10:00
786b2c6670 Switch from tailscale authkeys to an oauth client to fix issues with key expirey 2025-09-06 22:17:24 +10:00
2d497e5195 Add Nomad job configuration for Teams Status Updater 2025-06-07 09:07:48 +10:00
8920be1ba0 Fix use of wrong port in traefik service definition 2025-05-28 00:05:46 +10:00
525e0eaf9f Bind consul on all interfaces to fix issues with nodes finding each other. Also expose traefik to make diagnostics of issues easier. 2025-05-27 23:23:25 +10:00
234c6d075c Improve use of postgres from terraform 2025-05-27 23:22:08 +10:00
e52c2c1fc5 Ensure that host volumes are only on the required nodes 2025-05-25 23:35:36 +10:00
8b0b7c1e73 Fix some networking issues and setup a second nomad host 2025-05-25 22:40:41 +10:00
376c278c11 Cleanup credentials and db use 2025-05-25 18:01:47 +10:00
ffbd240453 Fix pgbackup cron to only run once per day 2025-05-23 01:01:32 +10:00
8e586b3352 Move each service to it's own tf file 2025-05-23 00:43:59 +10:00
c1aeb11354 Use tailscale to allow ssh access to gitea 2025-05-23 00:15:04 +10:00
3f70bc62d3 Disable tailscale on the nomad host 2025-05-22 23:25:35 +10:00
f7c4defe7d Add gitea 2025-05-19 22:45:33 +10:00
3ab392b50b Move nomad var secrets to secrets.enc.json 2025-05-18 23:44:24 +10:00
d2279028be Remove the hello world services initially added for testing 2025-05-18 21:53:44 +10:00
9cdd529633 Format terraform and nomad files 2025-05-18 21:50:04 +10:00
837cfdae68 Remove comment about terraform access to nomad
it was orignally only working over tailscale, but it turned out it was just being blocked by the host firewall, that tailscale was bypassing. This was fixed back in the initial commit using nix by setting networking.firewall.allowedTCPPorts to include 4646.
2025-05-18 21:50:04 +10:00
021d22048d Cleanup terraform files
by moving core infra into it's own file
2025-05-18 21:50:04 +10:00
b10df52f1c Allow login sessions to last longer than 1hr 2025-05-18 20:45:26 +10:00
08a2e458b2 Initial work on db backups 2025-05-18 20:18:48 +10:00
c6925362a6 Add initial PostgreSQL and pgAdmin services with Nomad configuration 2025-05-16 22:43:45 +10:00
805636f44c Properly persistent configs 2025-05-15 19:06:09 +10:00
65 changed files with 3210 additions and 1534 deletions

3
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"recommendations": ["hashicorp.terraform", "fredwangwang.vscode-hcl-format"]
}

5
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,5 @@
{
"editor.tabSize": 2,
"editor.insertSpaces": true,
"editor.formatOnSave": true
}

5
0-hardware/cameras.md Normal file
View File

@@ -0,0 +1,5 @@
| Location | IP |
| --------- | ------------- |
| Doorbell | 192.168.1.108 |
| Side Gate | 192.168.1.109 |
| Ceiling | 192.168.1.110 |

View File

@@ -1,10 +1,10 @@
# Server Names
From https://namingschemes.com/Hitchhikers_Guide_Planets.
Was [google sheet](https://docs.google.com/spreadsheets/d/1aLb14nme9gprYzWOEo5IadG1vKEtf8VQk0tkCBGz9KA/edit#gid=0).
| Name | Hostname | Use |
| ---------------------- | ----------------------- | ------------------------------------------------------------- |
| --------------------- | ----------------------- | ----------------------------------------------------------------------- |
| Antares | antares-win | Win11 PC |
| Barteldan | barteldan-linux | Fedora Linux PC |
| Betelgeuse Seven | betelgeuse-seven-unraid | Unraid Storage Host |
@@ -17,7 +17,14 @@ Was [google sheet](https://docs.google.com/spreadsheets/d/1aLb14nme9gprYzWOEo5Ia
| Golgafrincham | | |
| Han Wavel | | |
| Jaglan Beta | | Compute Cluster Hosts |
| - Moon 1 | jaglan-beta-m01 | NUC Cluster Host<br>(Intel \| N5105 [4/4], 16GB ram, 1TB SSD) |
| - Moon 1 | jaglan-beta-m01 | NUC Cluster Host<br>(Intel \| N5105 [4/4], 16GB, 1TB SSD) |
| - Moon 2 | jaglan-beta-m02 | LattePanda IOTA Cluster Host<br>(Intel \| N150 [4/4], 16GB, 129GB eMMC) |
| - Moon 3 | jaglan-beta-m03 | LattePanda IOTA Cluster Host<br>(Intel \| N150 [4/4], 16GB, 129GB eMMC) |
| - Moon 4 | jaglan-beta-m04 | LattePanda IOTA Cluster Host<br>(Intel \| N150 [4/4], 16GB, 129GB eMMC) |
| - Moon 5 | jaglan-beta-m05 | LattePanda IOTA Cluster Host<br>(Intel \| N150 [4/4], 16GB, 129GB eMMC) |
| - Moon 20 | jaglan-beta-m20 | VM on unraid |
| - Moon 21 | jaglan-beta-m21 | VM on unraid |
| - Moon 22 | jaglan-beta-m22 | VM on unraid |
| - Moon 42 (Test moon) | jaglan-beta-m42 | Test server (VM) |
| Krikkit | | |
| Lamuella | | |

View File

@@ -1,8 +1,45 @@
Follow steps at https://nixos.org/manual/nixos/stable/#sec-installation-manual-summary
Determine the correct device to install to
Ensure that ssh is enabled and the hostname is set
```sh
lsblk
```
services.openssh.enable = true;
services.openssh.settings.PermitRootLogin = "yes";
networking.hostName = "jaglan-beta-m01";
Steps based on https://nixos.org/manual/nixos/stable/#sec-installation-manual-summary for UEFI
```sh
parted /dev/mmcblk0 -- mklabel gpt
# If there is an existing OS you will have to type yes to proceed
parted /dev/mmcblk0 -- mkpart root ext4 512MB -8GB
parted /dev/mmcblk0 -- mkpart swap linux-swap -8GB 100%
parted /dev/mmcblk0 -- mkpart ESP fat32 1MB 512MB
parted /dev/mmcblk0 -- set 3 esp on
mkfs.ext4 -L nixos /dev/mmcblk0p1
mkswap -L swap /dev/mmcblk0p2
swapon /dev/mmcblk0p2
mkfs.fat -F 32 -n boot /dev/mmcblk0p3
mount /dev/disk/by-label/nixos /mnt
mkdir -p /mnt/boot
mount -o umask=077 /dev/disk/by-label/boot /mnt/boot
nixos-generate-config --root /mnt
nano /mnt/etc/nixos/configuration.nix
# Set hostname networking.hostName = "jaglan-beta-mNN";
nixos-install
# Set the root password
reboot
nano /etc/nixos/configuration.nix
# Enable ssh access
# services.openssh.enable = true;
# services.openssh.settings.PermitRootLogin = "yes";
nixos-rebuild switch
```
If starting from older nixos, upgrade the OS
```sh
nix-channel --list
nix-channel --add https://channels.nixos.org/nixos-25.05 nixos
nix-channel --list
nixos-rebuild switch --upgrade
```

View File

@@ -0,0 +1,8 @@
Avoid applying change to multiple hosts at once as this could take done the cluster, instead apply to each on one at a time
terraform apply -target='null_resource.deploy_nixos["jaglan-beta-m02"]'
terraform apply -target='null_resource.deploy_nixos["jaglan-beta-m03"]'
terraform apply -target='null_resource.deploy_nixos["jaglan-beta-m04"]'
terraform apply -target='null_resource.deploy_nixos["jaglan-beta-m05"]'
terraform apply -target='null_resource.deploy_nixos["jaglan-beta-m20"]'
terraform apply -target='null_resource.deploy_nixos["jaglan-beta-m21"]'
terraform apply -target='null_resource.deploy_nixos["jaglan-beta-m22"]'

View File

@@ -11,7 +11,7 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "jaglan-beta-m01"; # Define your hostname.
networking.hostName = "${hostname}"; # Define your hostname.
time.timeZone = "Australia/Melbourne";
@@ -32,7 +32,6 @@
# List services that you want to enable:
services = {
tailscale.enable = true;
nomad = {
enable = true;
enableDocker = true;
@@ -41,30 +40,61 @@
datacenter = "jaglan-beta";
server = {
enabled = true;
%{if bootstrap ~}
bootstrap_expect = 1;
%{endif ~}
};
client = {
enabled = true;
preferred_address_family = "ipv4";
%{if cpu_total_compute != null ~}
cpu_total_compute = ${cpu_total_compute};
%{endif ~}
%{if node_class != null ~}
node_class = "${node_class}";
%{endif ~}
host_volume = {
traefik = {
path = "/opt/traefik";
%{ for volume in host_volumes ~}
${volume} = {
path = "/opt/${volume}";
read_only = false;
};
%{ endfor ~}
};
cni_path = "$${pkgs.cni-plugins}/bin";
};
plugin.docker.config.allow_privileged = true;
};
extraPackages = with pkgs; [
cni-plugins
consul
];
};
consul = {
enable = true;
webUi = true;
interface.bind = "tailscale0"; # Bind to the Tailscale interface
interface.advertise = "tailscale0"; # Advertise the Tailscale interface
interface.bind = "${bind_interface}";
interface.advertise = "${bind_interface}";
forceAddrFamily = "ipv4";
extraConfig = {
client_addr = "{{ GetPrivateInterfaces | exclude \"type\" \"ipv6\" | join \"address\" \" \" }} {{ GetAllInterfaces | include \"flags\" \"loopback\" | join \"address\" \" \" }}";
%{if bootstrap ~}
bootstrap_expect = 1;
%{endif ~}
server = true;
client_addr = "127.0.0.1 100.79.223.55";
retry_join = [
"jaglan-beta-m01"
"jaglan-beta-m02"
"jaglan-beta-m03"
"jaglan-beta-m04"
"jaglan-beta-m05"
"jaglan-beta-m20"
"jaglan-beta-m21"
"jaglan-beta-m22"
];
datacenter = "jaglan-beta";
connect.enabled = true;
ports.grpc = 8502;
};
};
openssh = {
@@ -76,12 +106,70 @@
systemd.tmpfiles.rules = [
# Fix issue where nomad needs alloc_mounts to be writable
"d /var/lib/alloc_mounts 0755 root root -"
# Create a directory for Traefik to store its data (tls certs, etc.)
"d /opt/traefik 0755 root root -"
%{ for volume in host_volumes ~}
# Create a directory for ${volume} to store its data
"d /opt/${volume} 0755 root root -"
%{ endfor ~}
];
# Open ports in the firewall. 464X are the default ports for Nomad.
networking.firewall.allowedTCPPorts = [ 80 443 4646 4647 4648 ];
# Open ports in the firewall. 80/443 are for HTTP/HTTPS (terraform), 464X are the default ports for Nomad, 830X are the default ports for Consul.
networking.firewall.allowedTCPPorts = [ 80 443 8081 4646 4647 4648 8300 8301 8500 ];
networking.firewall.allowedUDPPorts = [ 8301 ];
# Ensure Docker daemon is available (Nomad enableDocker only configures Nomad, does not guarantee docker service)
virtualisation.docker.enable = true;
%{if node_class == "latte-panda-n150" ~}
# Enable Intel iGPU (N150 UHD Graphics) for OpenVINO / VA-API workloads running in Docker
hardware.graphics = {
enable = true;
extraPackages = with pkgs; [
intel-media-driver # VA-API (iHD)
intel-compute-runtime # OpenCL / oneAPI
];
};
%{endif ~}
# Proper systemd service definition for macvlan network creation
systemd.services.docker-macvlan-network = {
description = "Ensure macvlan Docker network exists";
after = [ "network-online.target" "docker.service" ];
wants = [ "network-online.target" "docker.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
};
# Provide required binaries in PATH
path = [ pkgs.docker pkgs.bash pkgs.coreutils pkgs.iproute2 pkgs.gnugrep ];
script = ''
set -euo pipefail
NET_NAME=macvlan
if docker network inspect "$NET_NAME" >/dev/null 2>&1; then
echo "Docker network $NET_NAME already exists"
exit 0
fi
echo "Creating Docker macvlan network $NET_NAME on interface ${bind_interface}"
# We intentionally do NOT use --ip-range here to avoid allocating the
# same reserved pool on every host (which could lead to collisions if
# multiple macvlan containers are started across nodes). Instead, we
# give critical services (like UniFi) an explicit static IP via the
# Nomad job (Docker static assignment) and rely on manual DHCP
# reservations to prevent conflicts.
#
# If you later need multiple macvlan-assigned containers per host,
# consider one of these strategies:
# 1. Per-host distinct network name + ip-range slice (macvlan-m01, ...)
# 2. Parameterize an ip-range per host in Terraform and template here
# 3. Keep a registry of allocated static IPs in Consul KV / Nomad vars
docker network create -d macvlan \
--subnet=192.168.1.0/24 \
--gateway=192.168.1.1 \
-o parent=${bind_interface} \
"$NET_NAME"
echo "Docker macvlan network $NET_NAME created"
'';
restartIfChanged = false; # Don't rerun just because comment changed
};
# Copy the NixOS configuration file and link it from the resulting system
# (/run/current-system/configuration.nix). This is useful in case you

View File

@@ -13,21 +13,44 @@ terraform {
provider "template" {}
variable "ssh_password" {
description = "Password for SSH connection"
type = string
variable "nodes" {
description = "Map of nodes with host, password, bind interface, and host volumes"
type = map(object({
host = string
password = string
bind_interface = string
bootstrap = optional(bool, false) # Optional field for bootstrap nodes
cpu_total_compute = optional(number, null) # Optional field for CPU total compute
node_class = optional(string, null) # Optional Nomad node_class for scheduling constraints
host_volumes = list(string)
}))
}
locals {
config_files = { for k, v in var.nodes :
k => templatefile("${path.module}/configuration.nix", {
hostname = v.host
bind_interface = v.bind_interface
bootstrap = v.bootstrap
cpu_total_compute = v.cpu_total_compute
node_class = v.node_class
host_volumes = v.host_volumes
})
}
}
resource "null_resource" "deploy_nixos" {
for_each = var.nodes
connection {
type = "ssh"
host = "jaglan-beta-m01"
host = "${each.value.host}.lan"
user = "root"
password = var.ssh_password
password = each.value.password
}
provisioner "file" {
source = "configuration.nix"
content = local.config_files[each.key]
destination = "/tmp/configuration.nix"
}
@@ -39,6 +62,6 @@ resource "null_resource" "deploy_nixos" {
}
triggers = {
configuration_content = file("configuration.nix")
configuration_content = local.config_files[each.key]
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,6 +1,59 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/carlpett/sops" {
version = "0.7.2"
constraints = "~> 0.5"
hashes = [
"h1:eetjYKFBQb6nbgxjehD/gzzAmH2ru94ha2tEzXNiNy8=",
"zh:43f218054ea3a72c9756bf989aeebb9d0f23b66fd08e9fb4ae75d4f921295e82",
"zh:57fd326388042a6b7ecd60f740f81e5ef931546c4f068f054e7df34acf65d190",
"zh:87b970db8c137f4c2fcbff7a5705419a0aea9268ae0ac94f1ec5b978e42ab0d2",
"zh:9e3b67b89ac919f01731eb0466baa08ce0721e6cf962fe6752e7cc526ac0cba0",
"zh:c028f67ef330be0d15ce4d7ac7649a2e07a98ed3003fca52e0c72338b5f481f8",
"zh:c29362e36a44480d0d9cb7d90d1efba63fe7e0e94706b2a07884bc067c46cbc7",
"zh:d5bcfa836244718a1d564aa96eb7d733b4d361b6ecb961f7c5bcd0cadb1dfd05",
]
}
provider "registry.terraform.io/cloudflare/cloudflare" {
version = "5.5.0"
constraints = "~> 5.0"
hashes = [
"h1:wZhU174xytOMZ1t6uDUQiLtR/XKpi2RH9OzMz0XqP9Q=",
"zh:178f29dee2edac39252780819f34004b1841770c61ee7fb5a625afaece6495cd",
"zh:6faf26203167ae20ca5c8ece4a8bb1c4187137505058fb7b1a4bd5095823e648",
"zh:97c91a95819336b8c41618919786ddd2dca643d28219d52af1d80b88018c6eec",
"zh:bbc53670fc2613e3fe81b5bf7b8674c5ad083a206fa8af34f0f055a8d06b2d01",
"zh:d305bcb01249ada21b80e5038e371f6ca0a60d95d7052df82456e4c4963f3bfc",
"zh:e2f9db57ead7100676b790a3e4567d88443fae0e19127e66b3505210de93e4b5",
"zh:eb8cef2e6cbf05237b8a2f229314ae12c792ed5f8f60fe180102bdf17dc30841",
"zh:f51a5bb0130d2f42772988ee56723f176aa230701184a0f5598dbb1c7b4c3906",
"zh:f809ab383cca0a5f83072981c64208cbd7fa67e986a86ee02dd2c82333221e32",
]
}
provider "registry.terraform.io/cyrilgdn/postgresql" {
version = "1.25.0"
hashes = [
"h1:4Hlupc8gYrPnFKisesXs9lypK6LXslU4L4tjBZOhmiE=",
"zh:0f9db6e1274603d642e96b58eaf6cc4223f7118f2d7ce909dc4812d332cc002a",
"zh:1819470f0304c6a60b2b51817cb43f6ff59a49e08cc9e50644b86b3a76c91601",
"zh:27bfb544983cac101a7c7c2e4cb9939a712dffcdd7ddcab83c2f8afc334e33c5",
"zh:46166f6f05771b0495df18459fdf3a63fae8b38e95a1b2754f03d006e17ea33d",
"zh:64d53afc52f26e8214990acc3e07f3b47bef628aa6b317595a8faec05b252209",
"zh:944d7ded418c022dd3ee513246677d601376fa38d76c9c4aecff2c2eefcaa35b",
"zh:9819551b61542a6d322d6a323bbb552ce02e769ce2222fd9bb1935473c7c4b3c",
"zh:c38bd73e208fe216efab48d099c85b8ad1e51ff102b3892443febc9778e7236e",
"zh:c73de133274dcc7a03e95f598550facc59315538f355e57e14b36e222b298826",
"zh:c7af02f5338bfe7f1976e01d3fcf82e05b3551893e732539a84c568d25571a84",
"zh:d1aa3d7432c7de883873f8f70e9a6207c7b536d874486d37aee0ca8c8853a890",
"zh:e17e9809fc7cc2d6f89078b8bfe6308930117b2270be8081820da40029b04828",
"zh:e1b21b7b7022e0d468d72f4534d226d57a7bfd8c96a4c7dc2c2fa0bb0b99298d",
"zh:f24b73645d8bc225f692bdf9c035411099ef57138569f45f3605ec79ac872e3b",
]
}
provider "registry.terraform.io/hashicorp/nomad" {
version = "2.5.0"
hashes = [

19
2-nomad-config/1-data.tf Normal file
View File

@@ -0,0 +1,19 @@
module "data" {
source = "./1-data"
}
data "nomad_plugin" "smb" {
plugin_id = "smb"
wait_for_healthy = true
}
provider "postgresql" {
host = "jaglan-beta-m21.lan"
port = 5432
database = "postgres"
username = "postgres"
password = data.sops_file.secrets.data["postgres.postgres"]
sslmode = "disable"
connect_timeout = 15
}

View File

@@ -0,0 +1,5 @@
resource "nomad_job" "csi-smb" {
jobspec = file("${path.module}/csi-smb.nomad.hcl")
}

View File

@@ -0,0 +1,25 @@
terraform {
required_providers {
sops = {
source = "carlpett/sops"
version = "~> 0.5"
}
postgresql = {
source = "cyrilgdn/postgresql"
}
}
}
provider "nomad" {
address = "http://jaglan-beta-m20.lan:4646"
}
data "sops_file" "secrets" {
source_file = "secrets/secrets.enc.json"
}
data "nomad_plugin" "smb" {
plugin_id = "smb"
wait_for_healthy = true
}

View File

@@ -0,0 +1,97 @@
job "pgadmin" {
group "pgadmin" {
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
network {
mode = "bridge"
port "http" {
to = 80
}
}
task "pgadmin" {
driver = "docker"
config {
image = "dpage/pgadmin4:latest"
ports = ["http"]
volumes = [
"local/servers.json:/pgadmin4/servers.json",
"secrets/.pgpass:/home/.pgpass"
]
}
env = {
PGADMIN_DEFAULT_EMAIL = "othrayte@gmail.com"
PGADMIN_DEFAULT_PASSWORD = "admin"
PGADMIN_CONFIG_WTF_CSRF_ENABLED = "False"
PGADMIN_CONFIG_WTF_CSRF_CHECK_DEFAULT = "False"
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION = "False"
PGADMIN_CONFIG_SERVER_MODE = "False"
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED = "False"
}
resources {
cpu = 500
memory = 256
}
service {
name = "pgadmin"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.pgadmin.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
template {
data = <<EOF
{
"Servers": {
"1": {
"Group": "Servers",
"Name": "postgres",
"Host": "localhost",
"Port": 5432,
"MaintenanceDB": "postgres",
"Username": "postgres",
"PassFile": "/home/.pgpass"
}
}
}
EOF
destination = "local/servers.json"
}
template {
data = <<EOF
localhost:5432:*:postgres:{{ with nomadVar "nomad/jobs/postgres" }}{{ .postgres_password }}{{ end }}
EOF
destination = "secrets/.pgpass"
perms = "0400"
uid = 5050 # pgadmin
}
}
}
}

View File

@@ -0,0 +1,77 @@
job "pgbackup" {
type = "batch"
periodic {
# Note: To avoid issues with daylight savings, avoid scheduling jobs at 2am +/- 1 hour
cron = "0 4 * * *" # Every day at 4am
time_zone = "Australia/Melbourne"
prohibit_overlap = true
}
group "pgbackup" {
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
task "pgbackup" {
driver = "docker"
config {
image = "postgres:latest"
command = "/bin/bash"
args = ["-c", "pg_dumpall -h localhost -U postgres > /backup/all_databases.sql"]
volumes = ["secrets/postgres_password:/run/secrets/postgres_password"]
}
user = "1000"
volume_mount {
volume = "unraid_database_dump"
destination = "/backup"
read_only = false
}
env {
PGPASSFILE = "/run/secrets/postgres_password"
}
template {
data = <<EOF
localhost:5432:*:postgres:{{ with nomadVar "nomad/jobs/postgres" }}{{ .postgres_password }}{{ end }}
EOF
destination = "/secrets/postgres_password"
perms = "0400"
uid = 1000
}
resources {
cpu = 250
memory = 128
}
}
volume "unraid_database_dump" {
type = "csi"
read_only = false
source = "unraid_database_dump"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=0"]
}
}
network {
mode = "bridge"
}
}
}

View File

@@ -0,0 +1,67 @@
job "postgres" {
group "postgres" {
service {
name = "postgres"
port = "db"
connect {
sidecar_service {}
}
}
task "postgres" {
driver = "docker"
config {
# Temporarily pin to v17 as v18 moved the default data directory and immich doesn't officially support it yet
# immich also needs >= 0.3.0, < 0.5.0. https://docs.immich.app/administration/postgres-standalone/#prerequisites
#image = "postgres:17"
image = "tensorchord/vchord-postgres:pg17-v0.4.3"
ports = ["db"]
volumes = [
"secrets/postgres_password:/run/secrets/postgres_password"
]
}
volume_mount {
volume = "data"
destination = "/var/lib/postgresql/data"
read_only = false
}
env {
POSTGRES_USER = "postgres"
POSTGRES_PASSWORD_FILE = "/run/secrets/postgres_password"
POSTGRES_INITDB_ARGS = "--auth-host=md5"
}
resources {
cpu = 500
memory = 1024
}
template {
# This securely sets the initial password for the postgres user, to change it later
# you need to connect to the database and change it manually
data = <<EOF
{{ with nomadVar "nomad/jobs/postgres" }}{{ .postgres_password }}{{ end }}
EOF
destination = "secrets/postgres_password"
}
}
network {
mode = "bridge"
port "db" {
static = 5432
}
}
volume "data" {
type = "host"
read_only = false
source = "postgres"
}
}
}

View File

@@ -0,0 +1,26 @@
resource "nomad_job" "postgres" {
jobspec = file("${path.module}/postgres.nomad.hcl")
rerun_if_dead = true
}
resource "nomad_job" "pgadmin" {
jobspec = file("${path.module}/pgadmin.nomad.hcl")
}
resource "nomad_job" "pgbackup" {
jobspec = file("${path.module}/pgbackup.nomad.hcl")
}
resource "nomad_variable" "postgres" {
path = "nomad/jobs/postgres"
items = {
postgres_password = data.sops_file.secrets.data["postgres.postgres"]
}
}
module "unraid_smb_database_dump" {
source = "../modules/unraid_smb"
name = "dump"
id = "unraid_database_dump"
share = "database-dump"
}

View File

@@ -0,0 +1,4 @@
module "ingress" {
source = "./2-ingress"
}

View File

@@ -1,11 +1,25 @@
job "authelia" {
group "authelia" {
network {
mode = "bridge"
port "http" {
static = 9091
}
}
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
service {
name = "auth"
port = "http"
@@ -14,6 +28,10 @@ job "authelia" {
"traefik.enable=true",
]
connect {
sidecar_service {}
}
check {
type = "http"
path = "/health"
@@ -45,6 +63,12 @@ job "authelia" {
data = <<EOF
server:
address: tcp://0.0.0.0:{{ env "NOMAD_PORT_http" }}/
endpoints:
authz:
forward-auth:
implementation: 'ForwardAuth'
authn_strategies:
- name: 'CookieSession'
theme: "auto"
identity_validation:
reset_password:
@@ -59,22 +83,26 @@ access_control:
rules:
- domain: "*.othrayte.one"
policy: one_factor
# Disable auth for authelia
#- domain: "auth.othrayte.one"
# policy: bypass
session:
name: authelia_session
secret: "{{ with nomadVar "nomad/jobs/authelia" }}{{ .session_secret }}{{ end }}"
expiration: 3600
inactivity: '2 days and 9 hours'
expiration: '1 hour'
remember_me: '90 days'
cookies:
- domain: othrayte.one
authelia_url: "https://auth.othrayte.one"
storage:
local:
path: /config/db.sqlite3
encryption_key: "{{ with nomadVar "nomad/jobs/authelia" }}{{ .encryption_key }}{{ end }}"
postgres:
address: 'tcp://127.0.0.1:5432'
database: 'authelia'
schema: 'public'
username: 'authelia'
password: '{{ with nomadVar "nomad/jobs/authelia" }}{{ .database_pw }}{{ end }}'
timeout: '5s'
notifier:
filesystem:

View File

@@ -0,0 +1,24 @@
resource "nomad_job" "authelia" {
jobspec = file("${path.module}/authelia.nomad.hcl")
}
resource "postgresql_role" "authelia" {
name = "authelia"
password = data.sops_file.secrets.data["authelia.database_pw"]
login = true
}
resource "postgresql_database" "authelia" {
name = "authelia"
owner = postgresql_role.authelia.name
}
resource "nomad_variable" "authelia" {
path = "nomad/jobs/authelia"
items = {
session_secret = data.sops_file.secrets.data["authelia.session_secret"]
jwt_secret = data.sops_file.secrets.data["authelia.jwt_secret"]
encryption_key = data.sops_file.secrets.data["authelia.encryption_key"]
database_pw = data.sops_file.secrets.data["authelia.database_pw"]
}
}

View File

@@ -0,0 +1,33 @@
terraform {
required_providers {
sops = {
source = "carlpett/sops"
version = "~> 0.5"
}
cloudflare = {
source = "cloudflare/cloudflare"
version = "~> 5"
}
postgresql = {
source = "cyrilgdn/postgresql"
}
}
}
provider "nomad" {
address = "http://jaglan-beta-m20.lan:4646"
}
data "sops_file" "secrets" {
source_file = "secrets/secrets.enc.json"
}
provider "cloudflare" {
api_token = data.sops_file.secrets.data["cloudflare.api_token"]
}
data "nomad_plugin" "smb" {
plugin_id = "smb"
wait_for_healthy = true
}

View File

@@ -0,0 +1,252 @@
job "traefik" {
group "traefik" {
count = 2
network {
mode = "bridge"
port "http" {
static = 80
}
port "https" {
static = 443
}
port "api" {
static = 8081
}
}
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "auth"
local_bind_port = 9091
}
}
}
}
}
service {
name = "traefik"
port = "api"
check {
name = "alive"
type = "tcp"
port = "api"
interval = "10s"
timeout = "2s"
}
}
task "traefik" {
driver = "docker"
config {
image = "traefik:v3.3"
ports = ["http", "https", "api"]
volumes = [
"local/traefik.yml:/etc/traefik/traefik.yml",
"local/configs/:/etc/traefik/configs/"
]
}
volume_mount {
volume = "unraid_appdata_traefik"
destination = "/opt/traefik"
read_only = false
}
template {
data = <<EOF
log:
level: INFO
entryPoints:
web:
address: ":80"
http:
redirections:
entryPoint:
to: websecure
scheme: https
websecure:
address: ":443"
http:
tls:
certResolver: letsencrypt
traefik:
address: ":8081"
api:
dashboard: true
insecure: true
providers:
file:
directory: "/etc/traefik/configs/"
consulCatalog:
prefix: "traefik"
exposedByDefault: false
defaultRule: {{"Host(`{{ .Name }}.othrayte.one`)"}}
endpoint:
address: "{{ env "NOMAD_HOST_IP_http" }}:8500"
scheme: "http"
certificatesResolvers:
letsencrypt:
acme:
email: "othrayte@gmail.com"
storage: "/opt/traefik/acme.json"
httpChallenge:
entryPoint: web
EOF
destination = "local/traefik.yml"
}
template {
data = <<EOF
http:
serversTransports:
ignorecert:
insecureSkipVerify: true
middlewares:
auth:
forwardAuth:
address: "http://localhost:9091/api/authz/forward-auth"
trustForwardHeader: true
auth-allow-token:
chain:
middlewares:
- auth
inject-kopia-basic-auth:
headers:
customRequestHeaders:
Authorization: "Basic {{ with nomadVar "nomad/jobs/traefik" }}{{ .kopia_basic_auth }}{{ end }}"
routers:
fallback:
rule: "HostRegexp(`^.+$`)"
entryPoints:
- websecure
middlewares:
- auth
service: noop@internal # This router just applies middleware
priority: 1
traefik:
rule: "Host(`traefik.othrayte.one`)"
service: traefik
middlewares:
- auth
nomad-ui:
rule: "Host(`nomad.othrayte.one`)"
service: nomad-ui
middlewares:
- auth
consul-ui:
rule: "Host(`consul.othrayte.one`)"
service: consul-ui
middlewares:
- auth
unraid:
rule: "Host(`unraid.othrayte.one`)"
service: unraid
middlewares:
- auth
kopia:
rule: "Host(`kopia.othrayte.one`)"
service: kopia
middlewares:
- auth
- inject-kopia-basic-auth
hass:
rule: "Host(`hass.othrayte.one`)"
service: hass
middlewares:
- auth
hass-token:
rule: "Host(`${hass_magic_token}-hass.othrayte.one`)"
service: hass
unifi-network:
rule: "Host(`network.othrayte.one`)"
service: unifi-network
middlewares:
- auth
services:
traefik:
loadBalancer:
servers:
- url: "http://localhost:8081"
nomad-ui:
loadBalancer:
servers:
- url: "http://{{ env "NOMAD_HOST_IP_http" }}:4646"
consul-ui:
loadBalancer:
servers:
- url: "http://{{ env "NOMAD_HOST_IP_http" }}:8500"
unraid:
loadBalancer:
servers:
- url: "http://betelgeuse-seven-unraid.lan:80"
kopia:
loadBalancer:
servers:
- url: "http://betelgeuse-seven-unraid.lan:51515"
hass:
loadBalancer:
servers:
- url: "http://192.168.1.234:8123"
unifi-network:
loadBalancer:
serversTransport: ignorecert
servers:
- url: "https://192.168.1.50:8443"
EOF
destination = "local/configs/nomad.yml"
}
resources {
cpu = 100
memory = 128
}
}
volume "unraid_appdata_traefik" {
type = "csi"
read_only = false
source = "unraid_appdata_traefik"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["file_mode=0600", "uid=1000", "gid=1000"]
}
}
task "cloudflared" {
driver = "docker"
config {
image = "cloudflare/cloudflared:latest"
args = [
"tunnel", "--no-autoupdate", "run"
]
}
template {
data = <<EOH
TUNNEL_TOKEN="{{ with nomadVar "nomad/jobs/traefik" }}{{ .cf_tunnel_token }}{{ end }}"
EOH
destination = "secrets/tunnel.env"
env = true # Load the file as environment variables
}
}
}
}

View File

@@ -0,0 +1,39 @@
resource "cloudflare_dns_record" "othrayte-one" {
comment = "othrayte.one proxy via cloudflared tunnel to traefik"
zone_id = "2616ab2a44d0645b03fbc3106c79bd99"
type = "CNAME"
name = "othrayte.one"
content = "59ca3eb1-5f0b-45e1-97ff-e373569c6689.cfargotunnel.com"
proxied = true
ttl = 1 # Auto
}
resource "cloudflare_dns_record" "star-othrayte-one" {
comment = "*.othrayte.one proxy via cloudflared tunnel to traefik"
zone_id = "2616ab2a44d0645b03fbc3106c79bd99"
type = "CNAME"
name = "*.othrayte.one"
content = "59ca3eb1-5f0b-45e1-97ff-e373569c6689.cfargotunnel.com"
proxied = true
ttl = 1 # Auto
}
resource "nomad_variable" "traefik" {
path = "nomad/jobs/traefik"
items = {
cf_tunnel_token = data.sops_file.secrets.data["traefik.cf_tunnel_token"]
kopia_basic_auth = data.sops_file.secrets.data["traefik.kopia_basic_auth"]
}
}
resource "nomad_job" "traefik" {
jobspec = templatefile("${path.module}/traefik.nomad.hcl", {
hass_magic_token = nonsensitive(data.sops_file.secrets.data["hass.magic-token"])
})
}
module "appdata_traefik" {
source = "../modules/appdata"
name = "traefik"
access_mode = "multi-node-multi-writer"
}

View File

@@ -0,0 +1,145 @@
job "deluge" {
group "deluge" {
network {
mode = "bridge"
port "http" {
to = 8112
}
}
task "wireguard" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "thrnz/docker-wireguard-pia:latest"
privileged = true
ports = ["http"]
}
env {
LOC = "aus_melbourne"
LOCAL_NETWORK = "192.168.1.0/24"
# PORT_FORWARDING = "1" # TODO: Find a way to tell deluge the forwarded port, the wireguard container outputs it /pia-shared/port.dat
}
template {
data = <<EOH
USER="{{ with nomadVar "nomad/jobs/deluge" }}{{ .pia_user }}{{ end }}"
PASS="{{ with nomadVar "nomad/jobs/deluge" }}{{ .pia_pass }}{{ end }}"
EOH
destination = "secrets/pia.env"
env = true # Load the file as environment variables
}
resources {
cpu = 50
memory = 32
}
}
# Service for Traefik (external ingress)
service {
name = "deluge"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.deluge.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
# Service for Consul Connect (internal mesh communication)
service {
name = "deluge-api"
port = "http"
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
# tags = [
# "traefik.enable=false",
# ]
connect {
sidecar_service {
//tags = ["traefik.enable=false"]
}
}
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "deluge" {
driver = "docker"
config {
image = "lscr.io/linuxserver/deluge:latest"
network_mode = "container:wireguard-${NOMAD_ALLOC_ID}" # Share namespace with VPN
}
env {
PUID = "1000"
PGID = "1000"
TZ = "Australia/Melbourne"
}
volume_mount {
volume = "unraid_appdata_deluge"
destination = "/config"
read_only = false
}
volume_mount {
volume = "unraid_media_deluge"
destination = "/data/downloads"
read_only = false
}
resources {
cpu = 400
memory = 2048
memory_max = 3000
}
}
volume "unraid_appdata_deluge" {
type = "csi"
read_only = false
source = "unraid_appdata_deluge"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
volume "unraid_media_deluge" {
type = "csi"
read_only = false
source = "unraid_media_deluge"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
}
}

24
2-nomad-config/deluge.tf Normal file
View File

@@ -0,0 +1,24 @@
resource "nomad_job" "deluge" {
jobspec = file("deluge.nomad.hcl")
}
resource "nomad_variable" "deluge" {
path = "nomad/jobs/deluge"
items = {
pia_user = data.sops_file.secrets.data["pia.user"]
pia_pass = data.sops_file.secrets.data["pia.pass"]
}
}
module "appdata_deluge" {
source = "./modules/appdata"
name = "deluge"
}
module "unraid_smb_deluge_media" {
source = "./modules/unraid_smb"
name = "deluge"
share = "media"
subDir = "downloads"
}

View File

@@ -0,0 +1,216 @@
job "frigate" {
# Pin to N150 LattePanda nodes - Intel UHD iGPU for OpenVINO-accelerated detection.
# hardware.graphics (intel-compute-runtime) is deployed to these nodes via configuration.nix.
constraint {
attribute = "${node.class}"
value = "latte-panda-n150"
}
group "frigate" {
count = 1
network {
port "http" {
to = 5000
}
}
# Prestart: restore Frigate's SQLite DB from the Litestream file replica on the CIFS share.
# Runs to completion before the frigate task starts. Safe on first boot (-if-replica-exists
# is a no-op when no replica exists yet).
task "litestream-restore" {
lifecycle {
hook = "prestart"
sidecar = false
}
driver = "docker"
config {
image = "litestream/litestream:0.5.11"
command = "restore"
args = ["-if-replica-exists", "-config", "/local/litestream.yml", "/alloc/data/frigate.db"]
}
# Litestream config: replicate to /config/frigate.db.litestream/ on the CIFS share.
# Litestream writes its own segment format - no SQLite advisory locking involved.
# Frigate must be configured with database.path: /alloc/data/frigate.db in config.yml.
template {
data = <<EOH
dbs:
- path: /alloc/data/frigate.db
replicas:
- url: file:///config/frigate.db.litestream
EOH
destination = "local/litestream.yml"
}
volume_mount {
volume = "unraid_appdata_frigate"
destination = "/config"
read_only = false
}
resources {
cpu = 100
memory = 64
memory_max = 256
}
}
# Sidecar: continuously stream WAL changes from /alloc/data/frigate.db to the CIFS replica.
# Runs alongside frigate for the lifetime of the allocation.
task "litestream-replicate" {
lifecycle {
hook = "poststart"
sidecar = true
}
driver = "docker"
config {
image = "litestream/litestream:0.5"
command = "replicate"
args = ["-config", "/local/litestream.yml"]
}
template {
data = <<EOH
dbs:
- path: /alloc/data/frigate.db
replicas:
- url: file:///config/frigate.db.litestream
EOH
destination = "local/litestream.yml"
}
volume_mount {
volume = "unraid_appdata_frigate"
destination = "/config"
read_only = false
}
resources {
cpu = 100
memory = 64
memory_max = 256
}
}
task "frigate" {
driver = "docker"
config {
image = "ghcr.io/blakeblackshear/frigate:0.17.1"
ports = ["http"]
privileged = true
# Shared memory for inter-process frame buffers (frigate forks detector processes).
shm_size = 268435456 # 256 MiB
# Large tmpfs for decoded frame cache - avoids wearing out any storage.
mounts = [
{
type = "tmpfs"
target = "/tmp/cache"
readonly = false
tmpfs_options = {
size = 1000000000 # 1 GiB in bytes
}
}
]
# Intel iGPU render node - Frigate's bundled OpenVINO runtime auto-detects
# GPU device and uses it for object detection without any extra env vars.
# Requires hardware.graphics.enable = true on the NixOS node (N150 nodes).
devices = [
{
host_path = "/dev/dri/renderD128"
container_path = "/dev/dri/renderD128"
}
]
}
# RTSP password injected from Nomad variables (sourced from sops secrets).
# Reference in config.yml as: {FRIGATE_RTSP_PASSWORD}
template {
data = <<EOH
FRIGATE_RTSP_PASSWORD="{{ with nomadVar "nomad/jobs/frigate" }}{{ .rtsp_password }}{{ end }}"
EOH
destination = "secrets/frigate.env"
env = true
}
service {
name = "frigate"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.frigate.middlewares=auth@file",
"traefik.http.routers.frigate-token.rule=Host(`n7gdph5cuh7bd1cakbq8s099rvrv3qhs-frigate.othrayte.one`)",
]
check {
name = "alive"
type = "http"
path = "/api/version"
port = "http"
interval = "10s"
timeout = "5s"
}
}
env {
TZ = "Australia/Melbourne"
}
# config.yml lives here (read from CIFS). SQLite DB is at /alloc/data/frigate.db
# (local NVMe, managed by Litestream). Requires in config.yml:
# database:
# path: /alloc/data/frigate.db
volume_mount {
volume = "unraid_appdata_frigate"
destination = "/config"
read_only = false
}
# Recordings, clips, and exports.
volume_mount {
volume = "unraid_media_frigate"
destination = "/media/frigate"
read_only = false
}
resources {
# GPU handles inference; CPU manages stream ingestion, motion detection, and recording.
cpu = 2000
memory = 2048
}
}
volume "unraid_appdata_frigate" {
type = "csi"
read_only = false
source = "unraid_appdata_frigate"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["nobrl", "uid=0", "gid=0"]
}
}
volume "unraid_media_frigate" {
type = "csi"
read_only = false
source = "unraid_media_frigate"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["nobrl", "uid=0", "gid=0"]
}
}
}
}

23
2-nomad-config/frigate.tf Normal file
View File

@@ -0,0 +1,23 @@
resource "nomad_job" "frigate" {
jobspec = file("frigate.nomad.hcl")
}
resource "nomad_variable" "frigate" {
path = "nomad/jobs/frigate"
items = {
rtsp_password = data.sops_file.secrets.data["frigate.rtsp_password"]
}
}
module "appdata_frigate" {
source = "./modules/appdata"
name = "frigate"
}
module "unraid_smb_frigate_media" {
source = "./modules/unraid_smb"
name = "frigate"
share = "media"
subDir = "frigate"
}

View File

@@ -0,0 +1,154 @@
job "gitea" {
group "gitea" {
network {
mode = "bridge"
port "http" {
to = 3000
}
}
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
service {
name = "code"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.gitea.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
# Separate service for Consul Connect ingress (address_mode=alloc avoids hairpin NAT issue)
service {
name = "code-connect"
port = "http"
address_mode = "alloc"
connect {
sidecar_service {}
}
}
task "gitea" {
driver = "docker"
config {
image = "gitea/gitea:latest"
ports = ["http"]
volumes = ["local/app.ini:/data/gitea/conf/app.ini"]
}
env = {
USER_UID = "1000"
USER_GID = "1000"
}
resources {
cpu = 500
memory = 512
}
volume_mount {
volume = "unraid_appdata_gitea"
destination = "/data"
read_only = false
}
template {
data = <<EOF
# Gitea configuration file
WORK_PATH = /data/
[database]
DB_TYPE = postgres
HOST = localhost:5432
NAME = gitea
USER = gitea
PASSWD = {{ with nomadVar "nomad/jobs/gitea" }}{{ .database_pw }}{{ end }}
[repository]
ROOT = /data/git/repositories
[server]
DOMAIN = code.othrayte.one
SSH_DOMAIN = git.othrayte.one
ROOT_URL = https://code.othrayte.one/
[lfs]
PATH = /data/git/lfs
[log]
MODE = console
LEVEL = info
ROOT_PATH = /data/gitea/log
[security]
INSTALL_LOCK = true
INTERNAL_TOKEN = {{ with nomadVar "nomad/jobs/gitea" }}{{ .internal_token }}{{ end }}
PASSWORD_HASH_ALGO = pbkdf2
[oauth2]
JWT_SECRET = {{ with nomadVar "nomad/jobs/gitea" }}{{ .jwt_secret }}{{ end }}
EOF
destination = "local/app.ini"
}
}
task "tailscale" {
driver = "docker"
config {
image = "tailscale/tailscale:latest"
}
env = {
TS_HOSTNAME = "git"
TS_AUTHKEY = "${ts_oauthsecret}?ephemeral=true"
TS_EXTRA_ARGS = "--advertise-tags=tag:nomad"
}
resources {
cpu = 100
memory = 64
}
lifecycle {
hook = "prestart"
sidecar = true
}
}
volume "unraid_appdata_gitea" {
type = "csi"
read_only = false
source = "unraid_appdata_gitea"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
}
}

39
2-nomad-config/gitea.tf Normal file
View File

@@ -0,0 +1,39 @@
resource "cloudflare_dns_record" "git-othrayte-one" {
comment = "git.othrayte.one maps to tailscale fqdn"
zone_id = "2616ab2a44d0645b03fbc3106c79bd99"
type = "CNAME"
name = "git.othrayte.one"
content = "git.tail15856.ts.net"
ttl = 1 # Auto
}
resource "nomad_job" "gitea" {
jobspec = templatefile("gitea.nomad.hcl", {
ts_oauthsecret = data.sops_file.secrets.data["tailscale.oauthsecret"]
})
}
resource "nomad_variable" "gitea" {
path = "nomad/jobs/gitea"
items = {
internal_token = data.sops_file.secrets.data["gitea.internal_token"]
jwt_secret = data.sops_file.secrets.data["gitea.jwt_secret"]
database_pw = data.sops_file.secrets.data["gitea.database_pw"]
}
}
resource "postgresql_role" "gitea" {
name = "gitea"
password = data.sops_file.secrets.data["gitea.database_pw"]
login = true
}
resource "postgresql_database" "gitea" {
name = "gitea"
owner = postgresql_role.gitea.name
}
module "appdata_gitea" {
source = "./modules/appdata"
name = "gitea"
}

3
2-nomad-config/glance.tf Normal file
View File

@@ -0,0 +1,3 @@
resource "nomad_job" "glance" {
jobspec = file("glance.nomad.hcl")
}

View File

@@ -1,61 +0,0 @@
job "hello-world" {
group "servers" {
network {
port "www" {
to = -1
}
}
service {
name = "hello-world"
port = "www"
tags = [
"traefik.enable=true",
"traefik.http.routers.hello-world.middlewares=auth@file",
]
check {
name = "alive"
type = "tcp"
port = "www"
interval = "10s"
timeout = "2s"
}
}
# Tasks are individual units of work that are run by Nomad.
task "web" {
# This particular task starts a simple web server within a Docker container
driver = "docker"
config {
image = "busybox:1"
command = "httpd"
args = ["-v", "-f", "-p", "${NOMAD_PORT_www}", "-h", "/local"]
ports = ["www"]
}
template {
data = <<-EOF
<h1>Hello, Nomad!</h1>
<ul>
<li>Task: {{env "NOMAD_TASK_NAME"}}</li>
<li>Group: {{env "NOMAD_GROUP_NAME"}}</li>
<li>Job: {{env "NOMAD_JOB_NAME"}}</li>
<li>Metadata value for foo: {{env "NOMAD_META_foo"}}</li>
<li>Currently running on port: {{env "NOMAD_PORT_www"}}</li>
</ul>
EOF
destination = "local/index.html"
}
# Specify the maximum resources required to run the task
resources {
cpu = 50
memory = 64
}
}
}
}

View File

@@ -0,0 +1,209 @@
job "immich" {
group "immich" {
network {
mode = "bridge"
port "http" {
to = 2283
}
}
service {
connect {
sidecar_service {
proxy {
# TODO https://docs.immich.app/administration/postgres-standalone#prerequisites
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
task "immich-server" {
driver = "docker"
config {
image = "ghcr.io/immich-app/immich-server:release"
ports = ["http"]
}
service {
name = "immich"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.immich.middlewares=auth@file",
"traefik.http.routers.immich-token.rule=Host(`c3ll7nbevl5j4j8rcnfxnr95q48fuayz-immich.othrayte.one`)",
]
# Use the Immich server ping endpoint
# See: https://api.immich.app/endpoints/server/pingServer
check {
name = "alive"
type = "http"
path = "/api/server/ping"
method = "GET"
port = "http"
interval = "10s"
timeout = "2s"
}
}
env {
IMMICH_LOG_LEVEL = "log"
TZ = "Australia/Melbourne"
REDIS_HOSTNAME = "localhost"
DB_HOSTNAME = "localhost"
IMMICH_IGNORE_MOUNT_CHECK_ERRORS = "true" # Let immich start whilst we figure out what's wrong with the mount permissions
}
volume_mount {
volume = "unraid_appdata_immich"
destination = "/data"
read_only = false
}
volume_mount {
volume = "unraid_media_photosvideos"
destination = "/data/library"
read_only = false
}
volume_mount {
volume = "unraid_media_immich_encodedvideo"
destination = "/data/encoded-video"
read_only = false
}
volume_mount {
volume = "unraid_mediadump_photosvideos"
destination = "/data/upload"
read_only = false
}
resources {
cpu = 200
memory = 512
memory_max = 1500
}
template {
data = <<EOH
DB_USERNAME="immich"
DB_PASSWORD="{{ with nomadVar "nomad/jobs/immich" }}{{ .database_pw }}{{ end }}"
EOH
destination = "secrets/db.env"
env = true # Load the file as environment variables
}
}
volume "unraid_appdata_immich" {
type = "csi"
read_only = false
source = "unraid_appdata_immich"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
volume "unraid_media_photosvideos" {
type = "csi"
read_only = false
source = "unraid_media_photosvideos"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
volume "unraid_media_immich_encodedvideo" {
type = "csi"
read_only = false
source = "unraid_media_immich_encodedvideo"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
volume "unraid_mediadump_photosvideos" {
type = "csi"
read_only = false
source = "unraid_mediadump_photosvideos"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
task "immich-machine-learning" {
driver = "docker"
config {
image = "ghcr.io/immich-app/immich-machine-learning:release"
}
resources {
cpu = 200
memory = 500
memory_max = 2000
}
}
task "redis" {
driver = "docker"
config {
image = "docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571"
}
resources {
cpu = 30
memory = 10
memory_max = 50
}
}
task "tailscale" {
driver = "docker"
config {
image = "tailscale/tailscale:latest"
}
env = {
TS_HOSTNAME = "immich"
TS_EXTRA_ARGS = "--advertise-tags=tag:nomad"
}
resources {
cpu = 100
memory = 100
memory_max = 300
}
lifecycle {
hook = "prestart"
sidecar = true
}
template {
data = <<EOH
TS_AUTHKEY="{{ with nomadVar "nomad/jobs/immich" }}{{ .tailscale_oauthsecret }}{{ end }}?ephemeral=true"
EOH
destination = "secrets/ts_oauth.env"
env = true # Load the file as environment variables
}
}
}
}

62
2-nomad-config/immich.tf Normal file
View File

@@ -0,0 +1,62 @@
resource "nomad_job" "immich" {
jobspec = file("immich.nomad.hcl")
}
resource "postgresql_role" "immich" {
name = "immich"
password = data.sops_file.secrets.data["immich.database_pw"]
login = true
}
resource "postgresql_database" "immich" {
name = "immich"
owner = postgresql_role.immich.name
}
resource "postgresql_extension" "immich_vchord" {
name = "vchord"
database = postgresql_database.immich.name
create_cascade = true
}
resource "postgresql_extension" "immich_earthdistance" {
name = "earthdistance"
database = postgresql_database.immich.name
create_cascade = true
}
resource "nomad_variable" "immich" {
path = "nomad/jobs/immich"
items = {
database_pw = data.sops_file.secrets.data["immich.database_pw"]
tailscale_oauthsecret = data.sops_file.secrets.data["tailscale.oauthsecret"]
}
}
module "appdata_immich" {
source = "./modules/appdata"
name = "immich"
}
module "unraid_smb_immich_photosvideos" {
source = "./modules/unraid_smb"
name = "photosvideos"
share = "media"
subDir = "Photos and Videos"
}
module "unraid_smb_immich_encodedvideo" {
source = "./modules/unraid_smb"
name = "immich_encodedvideo"
share = "media"
subDir = "immich/encoded-video"
}
module "unraid_smb_immich_mediadump_photosvideos" {
source = "./modules/unraid_smb"
name = "photosvideos"
id = "unraid_mediadump_photosvideos"
share = "media-dump"
subDir = "Photos and Videos"
}

View File

@@ -0,0 +1,88 @@
job "jellyfin" {
group "jellyfin" {
count = 1
network {
port "http" {
to = 8096
}
}
task "jellyfin" {
driver = "docker"
config {
image = "lscr.io/linuxserver/jellyfin:latest"
ports = ["http"]
}
service {
name = "jellyfin"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.jellyfin.middlewares=auth@file",
"traefik.http.routers.jellyfin-token.rule=Host(`c3ll7nbevl5j4j8rcnfxnr95q48fuayz-jellyfin.othrayte.one`)",
]
check {
name = "alive"
type = "tcp"
port = "http"
interval = "10s"
timeout = "2s"
}
}
env {
PUID = 1000
PGID = 1000
TZ = "Australia/Melbourne"
JELLYFIN_PublishedServerUrl = "https://jellyfin.othrayte.one"
}
volume_mount {
volume = "unraid_appdata_jellyfin"
destination = "/config"
read_only = false
}
volume_mount {
volume = "unraid_media_jellyfin"
destination = "/data"
read_only = false
}
resources {
cpu = 500
memory = 2048
}
}
volume "unraid_appdata_jellyfin" {
type = "csi"
read_only = false
source = "unraid_appdata_jellyfin"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
volume "unraid_media_jellyfin" {
type = "csi"
read_only = false
source = "unraid_media_jellyfin"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["nobrl", "uid=1000", "gid=1000"]
}
}
}
}

View File

@@ -0,0 +1,15 @@
resource "nomad_job" "jellyfin" {
jobspec = file("jellyfin.nomad.hcl")
}
module "appdata_jellyfin" {
source = "./modules/appdata"
name = "jellyfin"
}
module "unraid_smb_jellyfin_media" {
source = "./modules/unraid_smb"
name = "jellyfin"
share = "media"
}

44
2-nomad-config/main.tf Normal file
View File

@@ -0,0 +1,44 @@
terraform {
backend "local" {
path = "./.tfstate/terraform.tfstate"
}
}
terraform {
required_providers {
sops = {
source = "carlpett/sops"
version = "~> 0.5"
}
cloudflare = {
source = "cloudflare/cloudflare"
version = "~> 5"
}
postgresql = {
source = "cyrilgdn/postgresql"
}
}
}
provider "nomad" {
address = "http://jaglan-beta-m20.lan:4646"
}
data "sops_file" "secrets" {
source_file = "secrets/secrets.enc.json"
}
provider "cloudflare" {
api_token = data.sops_file.secrets.data["cloudflare.api_token"]
}
resource "nomad_scheduler_config" "config" {
memory_oversubscription_enabled = true
preemption_config = {
batch_scheduler_enabled = false
service_scheduler_enabled = false
sysbatch_scheduler_enabled = false
system_scheduler_enabled = false
}
}

View File

@@ -0,0 +1,62 @@
terraform {
required_providers {
sops = {
source = "carlpett/sops"
version = "~> 0.5"
}
}
}
variable "name" {
description = "Name of the application, also used as subdir on the unraid appdata share"
type = string
}
variable "id" {
description = "ID to use for the volume registration, defaults to name with - replaced by _"
type = string
default = null
}
variable "access_mode" {
description = "CSI volume access mode"
type = string
default = "single-node-writer"
validation {
condition = contains(["single-node-writer", "multi-node-multi-writer"], var.access_mode)
error_message = "access_mode must be either 'single-node-writer' or 'multi-node-multi-writer'"
}
}
data "nomad_plugin" "smb" {
plugin_id = "smb"
wait_for_healthy = true
}
data "sops_file" "secrets" {
source_file = "secrets/secrets.enc.json"
}
resource "nomad_csi_volume_registration" "this" {
depends_on = [data.nomad_plugin.smb]
plugin_id = "smb"
volume_id = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
name = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
external_id = var.id != null ? var.id : "unraid_appdata_${replace(var.name, "-", "_")}"
capability {
access_mode = var.access_mode
attachment_mode = "file-system"
}
context = {
source = "//betelgeuse-seven-unraid.lan/appdata"
subDir = var.name
}
secrets = {
"username" = "nomad"
"password" = data.sops_file.secrets.data["unraid.nomad"]
}
}

View File

@@ -0,0 +1,62 @@
terraform {
required_providers {
sops = {
source = "carlpett/sops"
version = "~> 0.5"
}
}
}
variable "name" {
description = "Name of the volume registration"
type = string
}
variable "id" {
description = "ID to use for the volume registration, defaults to name with - replaced by _"
type = string
default = null
}
variable "share" {
description = "Name of the SMB share on the unraid server"
type = string
}
variable "subDir" {
description = "Subdirectory within the SMB share"
type = string
default = null
}
data "nomad_plugin" "smb" {
plugin_id = "smb"
wait_for_healthy = true
}
data "sops_file" "secrets" {
source_file = "secrets/secrets.enc.json"
}
resource "nomad_csi_volume_registration" "this" {
depends_on = [data.nomad_plugin.smb]
plugin_id = "smb"
volume_id = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
name = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
external_id = var.id != null ? var.id : "unraid_${var.share}_${replace(var.name, "-", "_")}"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = merge({
source = "//betelgeuse-seven-unraid.lan/${var.share}"
}, var.subDir == null ? {} : { "subDir" = var.subDir })
secrets = {
"username" = "nomad"
"password" = data.sops_file.secrets.data["unraid.nomad"]
}
}

View File

@@ -0,0 +1,100 @@
job "ntfy" {
group "ntfy" {
network {
mode = "bridge"
port "http" {
to = 80
}
}
# Consul Connect sidecar with upstream to postgres
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
service {
name = "ntfy"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.ntfy.middlewares=auth@file",
# Token subdomain bypasses Authelia — ntfy's own token auth is sufficient for API access
"traefik.http.routers.ntfy-token.rule=Host(`ntfy-2e30e5869ab6bfde4961012b48761a9b.othrayte.one`)",
]
check {
type = "http"
path = "/healthz"
interval = "10s"
timeout = "2s"
}
}
# Users and tokens are provisioned declaratively via auth-users / auth-tokens in server.yml.
# ntfy reads and applies them on every startup — no poststart task, no race conditions.
#
# Bcrypt hashes are not secrets and are hardcoded below (same as /etc/shadow — safe to commit).
# Generate with: docker run --rm -it binwiederhier/ntfy user hash
# or: echo "mypassword" | docker run --rm -i binwiederhier/ntfy user hash
# For the diun account the plaintext is irrelevant (token-only auth); use a random password:
# openssl rand -base64 32 | docker run --rm -i binwiederhier/ntfy user hash
#
# Required SOPS keys:
# ntfy.database_pw — postgres password for the ntfy role
# diun.ntfy_token — access token for Diun (actual secret — grants write access)
# Must start with "tk_" and be exactly 32 chars total.
# Generate: tok=$(openssl rand -hex 15); echo "tk_${tok:0:29}"
task "ntfy" {
driver = "docker"
config {
image = "binwiederhier/ntfy:latest"
ports = ["http"]
command = "serve"
volumes = [
"local/server.yml:/etc/ntfy/server.yml",
]
}
env = {
TZ = "Australia/Melbourne"
}
template {
data = <<EOF
base-url: "https://ntfy.othrayte.one"
listen-http: ":80"
database-url: "postgres://ntfy:{{ with nomadVar "nomad/jobs/ntfy" }}{{ .database_pw }}{{ end }}@localhost:5432/ntfy"
auth-default-access: "deny-all"
behind-proxy: true
enable-login: true
auth-users:
- "admin:$2a$10$rLp4qagJnsA8Es5hQlISH.WrlzwMrXE2MBaEgz7zdd2lkAVu30lMy:admin"
- "diun:$2y$10$4wi1VG.Vp5p3Q2OEIXaTSOmwZm.G9dpNca9BqQRMdGGnk2yQqK3Gq:user"
auth-tokens:
- "diun:{{with nomadVar "nomad/jobs/ntfy"}}{{.diun_token}}{{end}}:Diun"
auth-access:
- "diun:diun:write-only"
EOF
destination = "local/server.yml"
}
resources {
cpu = 50
memory = 64
memory_max = 128
}
}
}
}

22
2-nomad-config/ntfy.tf Normal file
View File

@@ -0,0 +1,22 @@
resource "nomad_job" "ntfy" {
jobspec = file("ntfy.nomad.hcl")
}
resource "nomad_variable" "ntfy" {
path = "nomad/jobs/ntfy"
items = {
database_pw = data.sops_file.secrets.data["ntfy.database_pw"]
diun_token = data.sops_file.secrets.data["diun.ntfy_token"]
}
}
resource "postgresql_role" "ntfy" {
name = "ntfy"
password = data.sops_file.secrets.data["ntfy.database_pw"]
login = true
}
resource "postgresql_database" "ntfy" {
name = "ntfy"
owner = postgresql_role.ntfy.name
}

View File

@@ -0,0 +1,116 @@
job "openreader" {
group "openreader" {
network {
mode = "bridge"
port "http" {
to = 3003
}
}
# Consul Connect sidecar with upstream to postgres
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
service {
name = "openreader"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.openreader.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
service {
name = "openreader-api"
port = "http"
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
connect {
sidecar_service {}
}
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "openreader" {
driver = "docker"
config {
image = "ghcr.io/richardr1126/openreader:v2.1.2"
ports = ["http"]
}
env = {
TZ = "Australia/Melbourne"
# Use embedded SeaweedFS for blob storage (data lives in /app/docstore/seaweedfs).
# Port 8333 is not exposed; browser uploads/downloads fall back through the app API.
USE_EMBEDDED_WEED_MINI = "true"
S3_ENDPOINT = "http://localhost:8333"
S3_FORCE_PATH_STYLE = "true"
# Auth is intentionally disabled (no BASE_URL / AUTH_SECRET set).
# Access is controlled by the Authelia middleware on the Traefik router above.
# To enable server-side library import from an Unraid share, add a second CSI volume
# mount for the share (e.g. unraid_media_books → /app/docstore/library:ro) and set:
# IMPORT_LIBRARY_DIR = "/app/docstore/library"
}
template {
data = <<EOF
POSTGRES_URL=postgresql://openreader:{{ with nomadVar "nomad/jobs/openreader" }}{{ .database_pw }}{{ end }}@localhost:5432/openreader
EOF
destination = "secrets/openreader.env"
env = true
}
volume_mount {
volume = "unraid_appdata_openreader"
destination = "/app/docstore"
read_only = false
}
resources {
cpu = 200
memory = 750
memory_max = 1024
}
}
volume "unraid_appdata_openreader" {
type = "csi"
read_only = false
source = "unraid_appdata_openreader"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
}
}

View File

@@ -0,0 +1,26 @@
resource "nomad_job" "openreader" {
jobspec = file("openreader.nomad.hcl")
}
resource "nomad_variable" "openreader" {
path = "nomad/jobs/openreader"
items = {
database_pw = data.sops_file.secrets.data["openreader.database_pw"]
}
}
resource "postgresql_role" "openreader" {
name = "openreader"
password = data.sops_file.secrets.data["openreader.database_pw"]
login = true
}
resource "postgresql_database" "openreader" {
name = "openreader"
owner = postgresql_role.openreader.name
}
module "appdata_openreader" {
source = "./modules/appdata"
name = "openreader"
}

View File

@@ -0,0 +1,119 @@
job "prowlarr" {
group "prowlarr" {
network {
mode = "bridge"
port "http" {
to = 9696
}
}
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
upstreams {
destination_name = "sonarr-api"
local_bind_port = 8989
}
}
}
}
}
service {
name = "prowlarr"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.prowlarr.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
service {
name = "prowlarr-api"
port = "http"
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
connect {
sidecar_service {}
}
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "prowlarr" {
driver = "docker"
config {
image = "lscr.io/linuxserver/prowlarr:latest"
ports = ["http"]
}
env {
PUID = 1000
PGID = 1000
TZ = "Australia/Melbourne"
# https://wiki.servarr.com/prowlarr/postgres-setup
# Disable internal auth to use Traefik + Authelia
PROWLARR__AUTH__REQUIRED = "Enabled"
PROWLARR__AUTH__METHOD = "External"
PROWLARR__POSTGRES__USER = "prowlarr"
PROWLARR__POSTGRES__HOST = "localhost"
PROWLARR__POSTGRES__PORT = "5432"
PROWLARR__POSTGRES__MAINDB = "prowlarr-main"
PROWLARR__POSTGRES__LOGDB = "prowlarr-log"
}
volume_mount {
volume = "unraid_appdata_prowlarr"
destination = "/config"
read_only = false
}
resources {
cpu = 150
memory = 512
}
template {
data = <<EOH
PROWLARR__POSTGRES__PASSWORD="{{ with nomadVar "nomad/jobs/prowlarr" }}{{ .database_pw }}{{ end }}"
EOH
destination = "secrets/db.env"
env = true # Load the file as environment variables
}
}
volume "unraid_appdata_prowlarr" {
type = "csi"
read_only = false
source = "unraid_appdata_prowlarr"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
}
}

View File

@@ -0,0 +1,32 @@
resource "nomad_job" "prowlarr" {
jobspec = file("prowlarr.nomad.hcl")
}
resource "nomad_variable" "prowlarr" {
path = "nomad/jobs/prowlarr"
items = {
database_pw = data.sops_file.secrets.data["prowlarr.database_pw"]
}
}
# https://wiki.servarr.com/prowlarr/postgres-setup
resource "postgresql_role" "prowlarr" {
name = "prowlarr"
password = data.sops_file.secrets.data["prowlarr.database_pw"]
login = true
}
resource "postgresql_database" "prowlarr_main" {
name = "prowlarr-main"
owner = postgresql_role.prowlarr.name
}
resource "postgresql_database" "prowlarr_log" {
name = "prowlarr-log"
owner = postgresql_role.prowlarr.name
}
module "appdata_prowlarr" {
source = "./modules/appdata"
name = "prowlarr"
}

32
2-nomad-config/readme.md Normal file
View File

@@ -0,0 +1,32 @@
# Terraform State
Mount the state on the fileshare to 2-nomad-config/.tfstate/
`sudo mount -t cifs //betelgeuse-seven-unraid.lan/appdata/terraform /home/othrayte/Code/infra/2-nomad-config/.tfstate/ -o rw,username=othrayte,password=<pw>,uid=$(id -u),gid=$(id -g)`
# Tailscale Oauth Client
We use a Tailscale oauth client secret to allow our containers to connect to tailscale. We created an oauth client called `nomad` with the `auth_keys` (write) scope for the tag `nomad` and stored the secret in our secrets file.
# Secrets
The secrets file is encrypted using sops and will be automatically decrypted in the terraform provider.
Put the age keys in /home/<user>/.config/sops/age/keys.txt
## Adding Secrets
Edit the secrets using `sops secrets/secrets.enc.json`
# Bootstrapping (starting without PostgreSQL running)
terraform apply -target=module.data
terraform apply -target=module.ingress
## Restoring PostgreSQL DBs
`psql -h jaglan-beta-m21 -p 5432 -U postgres -f ~/Downloads/all_databases.sql postgres`
# Deploying and testing changes
Sometimes the nomad job fails but the solution is to fix another job and so we need to tell nomad to retry the unchanged job.
`nomad job eval -force-reschedule glance`

View File

@@ -0,0 +1,64 @@
job "renovate" {
type = "batch"
periodic {
cron = "0 4 * * *" # Daily at 4am
prohibit_overlap = true
}
group "renovate" {
network {
mode = "bridge"
}
# Consul Connect sidecar with upstream to Gitea (service: code-connect, port 3000)
service {
name = "renovate"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "code-connect"
local_bind_port = 3000
}
}
}
}
}
task "renovate" {
driver = "docker"
config {
image = "renovate/renovate:latest"
}
env = {
RENOVATE_PLATFORM = "gitea"
RENOVATE_ENDPOINT = "http://localhost:3000"
RENOVATE_GIT_URL = "endpoint"
RENOVATE_REPOSITORIES = "othrayte/infra"
RENOVATE_GIT_AUTHOR = "Renovate Bot <renovate@othrayte.one>"
LOG_LEVEL = "debug"
}
# Required SOPS key:
# renovate.gitea_token — PAT for the renovate bot account in Gitea
# Create a dedicated 'renovate' user in Gitea with these token scopes:
# repo (read+write), user (read), issue (read+write), organization (read)
template {
data = <<EOF
RENOVATE_TOKEN={{ with nomadVar "nomad/jobs/renovate" }}{{ .gitea_token }}{{ end }}
EOF
destination = "secrets/renovate.env"
env = true
}
resources {
cpu = 500
memory = 512
memory_max = 1024
}
}
}
}

View File

@@ -0,0 +1,10 @@
resource "nomad_job" "renovate" {
jobspec = file("renovate.nomad.hcl")
}
resource "nomad_variable" "renovate" {
path = "nomad/jobs/renovate"
items = {
gitea_token = data.sops_file.secrets.data["renovate.gitea_token"]
}
}

View File

@@ -0,0 +1,13 @@
export AGE_VERSION=v1.2.1
# Download the archive
wget https://github.com/FiloSottile/age/releases/download/$AGE_VERSION/age-$AGE_VERSION-linux-amd64.tar.gz
# Extract the contents of the archive
tar -xvf age-$AGE_VERSION-linux-amd64.tar.gz
# Move the binaries to a directory in our PATH
sudo mv age/age* /usr/local/bin/
# Make the binaries executable
sudo chmod +x /usr/local/bin/age*

View File

@@ -0,0 +1,70 @@
{
"unraid": {
"nomad": "ENC[AES256_GCM,data:FCGEs+XCSuunLxVPyzE=,iv:j8Ey+l8iJiPY7CbE5IoT0ZgNklnv+4odSZkorJQ/nr8=,tag:7PoizENid+vgWC/eb5MOaQ==,type:str]"
},
"cloudflare": {
"api_token": "ENC[AES256_GCM,data:445wM+3yHRnMfiAHuBg3dWzLA3jB0dpNBaHrxl1bb036sFZnzN+gOg==,iv:g8tMdxY8XFTPA2W8/RtMtDhnyCzNLY6dJDWWC2ZeIZQ=,tag:04uf/y3DWY3HIXOJ2HenJw==,type:str]",
"direct_ip6": "ENC[AES256_GCM,data:E/V1pFjBp7c0PRhUa4cxqAVl8xZKsZzn,iv:Gw0qz2x1pMaieZaCcp4dD9sEVtQfcuEqRP3UpA2Bj/0=,tag:LpsPH3cJAlPCFX6EPabWnQ==,type:str]"
},
"tailscale": {
"oauthsecret": "ENC[AES256_GCM,data:c2GtA+FaDcAKqUtQquP35W650lo1soivNCJc7KzCoQws0hTkt3zICFomOArhIfpHQMnCG4SpNvnXalarKKKxVw==,iv:Pnf8+9wBGNooPl4sKX5aGXITQt7/qfpn+mWyKk8YLXo=,tag:mXL+bz0gESj18qjpdksldA==,type:str]"
},
"authelia": {
"session_secret": "ENC[AES256_GCM,data:eSpAwX/KPzed/Y0oi6QvBwB7Gv5Kiml4FJS5RyuJ7A0plAd8acNThNXi3H4=,iv:RmH0wB3smlSF+CYs4x1w2V9ixdxgdav4dAQntjO0S5g=,tag:Vo5eHiU+1/dep/IUryN/XQ==,type:str]",
"jwt_secret": "ENC[AES256_GCM,data:XGDV2+SbMPYxhzv8S/6SjfA0MZeelRNjgIR10+qcTFYs2IW+IZjkCExLpQ==,iv:hv1b2Dddm21vObwQBUb3LZFfYjAkIm2/ZE1Syt3//YI=,tag:TojRWFctm1H72oPfq62Y2g==,type:str]",
"encryption_key": "ENC[AES256_GCM,data:D5F7eScWxCQ8G7pU8khi8aj8/p8ZKSErROhrqKS569fYUQpsHt6+3QQfeH7/naMvJ45r/5oVGCGeeFcEqlY0lEnbFLJEZ/tSOcm4RcIigPcx4a+8H7s=,iv:sf+TdLzacFaDgYjYhw4RKExLu6XfpewKiklt/q7VVzw=,tag:Zu3kCJfCZ7ae7HneXF6jVA==,type:str]",
"database_pw": "ENC[AES256_GCM,data:w5TmJwjeFa8tgTXDBI7doNfbBnDBUoWyZ0Qetp4M5JpwyRv06kAj2sAKOCY=,iv:rJubsGeyxSXkOxyTjzTo1GJRgLNWbAIMy1sS74MiuHc=,tag:Sbi4gVZgRcJLriTxm2ebeQ==,type:str]"
},
"postgres": {
"postgres": "ENC[AES256_GCM,data:lKuLcVTuUbfrlVhRdCs=,iv:TsbtAbXYTysxuiCi08F0hJsgoolzzgE2EPdFdPMQ+NQ=,tag:9oNua06hHdeCzE7nB22c0g==,type:str]"
},
"gitea": {
"internal_token": "ENC[AES256_GCM,data:teIsV+6nUPWO9/amas3FmK6uv44YEZNpV780ncTwUkQDygDvQRr7A3KEbk/rYFcTjfxK6Kw8nmqi0rBrcBNX1bSVNg8jwfYHhY2TxFMgCo4tkQxLf3eSBUhlPGsfpsskACKIPnZ1RQ2m,iv:NAKPw0YVNtLlyEp7wld9ml4zQlVxo/takiOid6YQlfA=,tag:QIk+USh8MLZDzJkQsglJ+w==,type:str]",
"jwt_secret": "ENC[AES256_GCM,data:/dPDqJdn4Af3Wo005V7lU9b8RbN/wyF0Tx66827cdyaZfi4QPOSj23wNqw==,iv:yJW2PiAGGr97q0DoBr64X88eFNpuVPZX0SPyNDp5QjQ=,tag:p27XTUbMC0WDMTNJCscmGQ==,type:str]",
"database_pw": "ENC[AES256_GCM,data:EzGPKdsX3Ib2zWrz09kUdegIxGNwg1j4msbOKUmvCGy6R9/EG1nvOC9Z5Oo=,iv:msek112FxmVAwFume6b7RnSICL/sw5CK3XzgCq9Sp1s=,tag:UcxUi2hySv54liN+Ddodpw==,type:str]"
},
"hass": {
"magic-token": "ENC[AES256_GCM,data:3mKbPFgvtX+hWYEZ0q4jBjnR8KM+E/1DqmkVzoV6ROY=,iv:9L748apqK1TcsW0Y0HvU9QHVD/eSh56c/uN/K4KNct4=,tag:ZmXiaPz7MEvaQ0yu3byiKQ==,type:str]"
},
"traefik": {
"cf_tunnel_token": "ENC[AES256_GCM,data:IgrmKwdeipix1dIXNuXnTWN5rCZjClbKZQJfgr5c2IP/n8bcc/nG5Wb42WL2C4hTeVqhG5p62ZXoz0j4dNAjxvuzcW/P0XeSYaiDRXMNWKhNIcK7jOexgswio0sUC+F7f3fa6HH4C02Mx8dWoFZChYtM5EhGdcEwVwspyBlMhTSHTz+/w5T9OqH18o132ZTM6kMQY85sgH36azWoSw73N+aC4ANhgybuok06z6R5D2jMdDX47Bo5bg==,iv:yOcUDTYHh58iejbl0wxNJO1hcDypcBq6KlHKyqnMSVk=,tag:CMyHKgahkIGdXItMJ1/hOg==,type:str]",
"kopia_basic_auth": "ENC[AES256_GCM,data:GKJKTtFqW2f8L6VYsBIuNsssUk8vBn74A4TIFw==,iv:rjV0o+CKUtZi8nVsVv0m17OPkYW5ymje9QoWvlRHa7g=,tag:CqEf6n5xgc2RWddbZoNqBQ==,type:str]"
},
"immich": {
"database_pw": "ENC[AES256_GCM,data:SUyMGqu7deZyZpVt,iv:asZehOvn/JamwFyS+Xl9Xpr4JFkKlJjHVw7LywYOxTc=,tag:plRvuv7+ievfEhxurBl7YQ==,type:str]"
},
"sonarr": {
"database_pw": "ENC[AES256_GCM,data:TN381ZYJLeUHX9U3Jnd9+w==,iv:lKaMYHeaSGXJd0/EGxkDY2l2v62xG3xs8TVC0HwXL94=,tag:3z5rK+2RfJHJdQc7KC9KmA==,type:str]"
},
"pia": {
"user": "ENC[AES256_GCM,data:kniAs2gCTq4=,iv:1Oaht02fFSQwzWmWEtjsJZCJChPJsZhwRyux8dMY2CU=,tag:NqWaUhuYTSFZZK/CpSisdg==,type:str]",
"pass": "ENC[AES256_GCM,data:c8qWGcaI0p7MyQ==,iv:/3ehYrgdDwjzFdXyX/vKTK+zt6u7gWNRZBIdWDG1KiE=,tag:jqfIMnB1OKchBZ4U2s1o4g==,type:str]"
},
"prowlarr": {
"database_pw": "ENC[AES256_GCM,data:FkW5LPoyn8bh0UfWcFq3og==,iv:SFq4Xsdz3FfCDyPjIaAmz5nsC/SPdFrR03GCr3KE/nw=,tag:PVYj7hSWDnfeE7igSXGBSA==,type:str]"
},
"frigate": {
"rtsp_password": "ENC[AES256_GCM,data:8vq06/IkNOUgpHmf,iv:lj8buuIC0ub0YOUiOiaN6tokkIT2/+bBwFNz2QXmCd4=,tag:EMm/bIHdJSAtjYAlrNOCMw==,type:str]"
},
"openreader": {
"database_pw": "ENC[AES256_GCM,data:2Ey9Ypb2Ked/LP/ApJhCqhKWuzognxVK7ku60nERp7I=,iv:KdLFD+fuNpYmPEU5G96SvFcQeZB0XlnOh/6uf7OfFqI=,tag:h7DQlqx5fxhiHuWyFd7svQ==,type:str]"
},
"ntfy": {
"database_pw": "ENC[AES256_GCM,data:79c2KFs3tcbet1dSGnkSDlAeKLCZrh4aMYLXTROM8w==,iv:eZ4limyjl++nsvHUzPKy82hfLZEOc+XQYpO6Czo/8os=,tag:iX9SiEACQ5IM8f1jhZh5Qw==,type:str]"
},
"renovate": {
"gitea_token": "ENC[AES256_GCM,data:/J3CDMgWZLe20oQ+ENKBMi8fs/+jgsARV7xihMq0OLmRk8C8ae/IXg==,iv:e7WYOanSOCZ/LhN6SKrH0VrR3xLPTTppOKpGpSl+oAc=,tag:XBAilRdK3jL7WtM+92Fsmg==,type:str]"
},
"sops": {
"age": [
{
"recipient": "age1zuj9ssd0kkfeefjmyz82t9h3dfjq8degqm2l5eszhu5zhazpgsys54rq2n",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByUWM4ZDVVbGFrUGdMRHBX\nUFBmU3Nlc0RBSzhFK0tHNHpkQXUvUVdiZUZJCmpRN1lFdENpWW0rcThjVlVQNUl6\nWnlLU0RnQ3FZby81Ly8xTFBrek9nMncKLS0tIFQ4UTRNOC9CRmx4OFJWem1wckZz\nUDFTSzdWZldFK3FqcTNWTWRyNDhHQ2MKS811mR5xn7qiC/aVgPFYJ5c6Q3zxRfcr\nHcvxUvB01vNJKZpRg92vvKPkV6lQO3DXCT98OdfwiymlEOvYxg71Pg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2026-04-18T06:07:22Z",
"mac": "ENC[AES256_GCM,data:4UJMEZNS8HXtn1vIJ+qCkBJL5oh3Jp7wbaGm5hMiSNPseNq3smTYmMgh+TNK4t/K7yxfukTuhEFTUsnMfi7rIxTrbCpcTdnqJSYm/iflkdA57Tx+mHpY9iG7wtRmObow18Ea0rj6foMu+1V8pVFomzhc/ipafinTCuqodKW1a2Y=,iv:Gu/Lh8mir36ltN++qJg122ry+eJA0GKSrfijulYM7q4=,tag:5tjBzrCZcQUvc76No+E9Ow==,type:str]",
"encrypted_regex": "^(.*)$",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1,10 @@
export SOPS_VERSION=v3.10.2
# Download the binary
curl -LO https://github.com/getsops/sops/releases/download/$SOPS_VERSION/sops-$SOPS_VERSION.linux.amd64
# Move the binary in to your PATH
sudo mv sops-$SOPS_VERSION.linux.amd64 /usr/local/bin/sops
# Make the binary executable
sudo chmod +x /usr/local/bin/sops

View File

@@ -1,67 +0,0 @@
provider "nomad" {
# For some reason nomad is binding to the tailscale IP but not the (local) IP that we get for the same hostname
address = "http://jaglan-beta-m01:4646"
}
resource "nomad_job" "glance" {
jobspec = file("glance.nomad.hcl")
}
resource "nomad_job" "hello_world" {
jobspec = file("hello-world.nomad.hcl")
}
resource "nomad_job" "traefik" {
jobspec = file("traefik.nomad.hcl")
}
resource "nomad_job" "authelia" {
jobspec = file("authelia.nomad.hcl")
}
resource "nomad_job" "webapp" {
jobspec = file("webapp.nomad.hcl")
}
resource "nomad_job" "csi-smb" {
jobspec = file("csi-smb.nomad.hcl")
}
data "nomad_plugin" "smb" {
plugin_id = "smb"
wait_for_healthy = true
}
resource "nomad_csi_volume_registration" "unraid_transfer" {
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
depends_on = [data.nomad_plugin.smb]
plugin_id = "smb"
volume_id = "unraid_transfer"
name = "unraid_transfer"
external_id = "unraid_transfer"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
source = "//192.168.1.192/transfer"
}
parameters = {
"csi.storage.k8s.io/node-stage-secret-name" = "smbcreds"
"csi.storage.k8s.io/node-stage-secret-namespace" = "default"
}
secrets = {
"username" = "anon"
"password" = ""
}
}
resource "nomad_job" "transfer" {
jobspec = file("transfer.nomad.hcl")
}

View File

@@ -0,0 +1,140 @@
job "sonarr" {
group "sonarr" {
network {
mode = "bridge"
port "http" {
to = 8989
}
}
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
upstreams {
destination_name = "deluge-api"
local_bind_port = 8112
}
upstreams {
destination_name = "prowlarr-api"
local_bind_port = 9696
}
}
}
}
}
service {
name = "sonarr"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.sonarr.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
service {
name = "sonarr-api"
port = "http"
address_mode = "alloc" # Use allocation IP for Connect as the sidecar can't access the host's published port (hairpin/loopback NAT issue)
connect {
sidecar_service {}
}
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "sonarr" {
driver = "docker"
config {
image = "lscr.io/linuxserver/sonarr:latest"
ports = ["http"]
}
env {
PUID = 1000
PGID = 1000
TZ = "Australia/Melbourne"
# https://wiki.servarr.com/sonarr/environment-variables
# Disable internal auth to use Traefik + Authelia
SONARR__AUTH__REQUIRED = "Enabled"
SONARR__AUTH__METHOD = "External"
SONARR__POSTGRES__USER = "sonarr"
SONARR__POSTGRES__HOST = "localhost"
SONARR__POSTGRES__PORT = "5432"
SONARR__POSTGRES__MAINDB = "sonarr-main"
SONARR__POSTGRES__LOGDB = "sonarr-log"
}
volume_mount {
volume = "unraid_appdata_sonarr"
destination = "/config"
read_only = false
}
volume_mount {
volume = "unraid_media_sonarr"
destination = "/data"
read_only = false
}
resources {
cpu = 150
memory = 1024
}
template {
data = <<EOH
SONARR__POSTGRES__PASSWORD="{{ with nomadVar "nomad/jobs/sonarr" }}{{ .database_pw }}{{ end }}"
EOH
destination = "secrets/db.env"
env = true # Load the file as environment variables
}
}
volume "unraid_appdata_sonarr" {
type = "csi"
read_only = false
source = "unraid_appdata_sonarr"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
volume "unraid_media_sonarr" {
type = "csi"
read_only = false
source = "unraid_media_sonarr"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["nobrl", "uid=1000", "gid=1000"]
}
}
}
}

38
2-nomad-config/sonarr.tf Normal file
View File

@@ -0,0 +1,38 @@
resource "nomad_job" "sonarr" {
jobspec = file("sonarr.nomad.hcl")
}
resource "nomad_variable" "sonarr" {
path = "nomad/jobs/sonarr"
items = {
database_pw = data.sops_file.secrets.data["sonarr.database_pw"]
}
}
# https://wiki.servarr.com/sonarr/postgres-setup#schema-creation
resource "postgresql_role" "sonarr" {
name = "sonarr"
password = data.sops_file.secrets.data["sonarr.database_pw"]
login = true
}
resource "postgresql_database" "sonarr_main" {
name = "sonarr-main"
owner = postgresql_role.sonarr.name
}
resource "postgresql_database" "sonarr_log" {
name = "sonarr-log"
owner = postgresql_role.sonarr.name
}
module "appdata_sonarr" {
source = "./modules/appdata"
name = "sonarr"
}
module "unraid_smb_sonarr_media" {
source = "./modules/unraid_smb"
name = "sonarr"
share = "media"
}

View File

@@ -1,519 +0,0 @@
{
"version": 4,
"terraform_version": "1.11.4",
"serial": 595,
"lineage": "15e0900c-88bc-9754-4600-e3977d018ba0",
"outputs": {},
"resources": [
{
"mode": "data",
"type": "nomad_plugin",
"name": "smb",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"id": "smb",
"nodes": [
{
"healthy": true,
"healthy_description": "healthy",
"name": "0db77253-0579-e8b0-42cd-d619af9d8e73"
}
],
"nodes_expected": 1,
"nodes_healthy": 1,
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"wait_for_healthy": true,
"wait_for_registration": false
},
"sensitive_attributes": []
}
]
},
{
"mode": "managed",
"type": "nomad_csi_volume_registration",
"name": "unraid_transfer",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"capability": [
{
"access_mode": "single-node-writer",
"attachment_mode": "file-system"
}
],
"capacity": 0,
"capacity_max": null,
"capacity_max_bytes": 0,
"capacity_min": null,
"capacity_min_bytes": 0,
"context": {
"source": "//192.168.1.192/transfer"
},
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"deregister_on_destroy": true,
"external_id": "unraid_transfer",
"id": "unraid_transfer",
"mount_options": [],
"name": "unraid_transfer",
"namespace": "default",
"nodes_expected": 1,
"nodes_healthy": 1,
"parameters": {
"csi.storage.k8s.io/node-stage-secret-name": "smbcreds",
"csi.storage.k8s.io/node-stage-secret-namespace": "default"
},
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"schedulable": true,
"secrets": {
"password": "",
"username": "anon"
},
"timeouts": null,
"topologies": [],
"topology_request": [],
"volume_id": "unraid_transfer"
},
"sensitive_attributes": [
[
{
"type": "get_attr",
"value": "secrets"
}
]
],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfX0=",
"dependencies": [
"data.nomad_plugin.smb"
]
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "authelia",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "authelia",
"jobspec": "job \"authelia\" {\n group \"authelia\" {\n network {\n port \"http\" {\n static = 9091\n }\n }\n\n service {\n name = \"auth\"\n port = \"http\"\n\n tags = [\n \"traefik.enable=true\",\n ]\n\n check {\n type = \"http\"\n path = \"/health\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n task \"authelia\" {\n driver = \"docker\"\n\n config {\n image = \"authelia/authelia:latest\"\n\n ports = [\"http\"]\n\n volumes = [\n \"local/config:/config\",\n \"local/data:/data\"\n ]\n }\n\n resources {\n cpu = 100\n memory = 128\n }\n\n template {\n data = \u003c\u003cEOF\nserver:\n address: tcp://0.0.0.0:{{ env \"NOMAD_PORT_http\" }}/\ntheme: \"auto\"\nidentity_validation:\n reset_password:\n jwt_secret: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}\"\n\nauthentication_backend:\n file:\n path: /config/users_database.yml\n\naccess_control:\n default_policy: deny\n rules:\n - domain: \"*.othrayte.one\"\n policy: one_factor\n # Disable auth for authelia\n #- domain: \"auth.othrayte.one\"\n # policy: bypass\n\nsession:\n name: authelia_session\n secret: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}\"\n expiration: 3600\n cookies:\n - domain: othrayte.one\n authelia_url: \"https://auth.othrayte.one\"\n\nstorage:\n local:\n path: /config/db.sqlite3\n encryption_key: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .encryption_key }}{{ end }}\"\n\nnotifier:\n filesystem:\n filename: /config/notification.txt\nEOF\n\n destination = \"local/config/configuration.yml\"\n }\n\n template {\n data = \u003c\u003cEOF\n# Users database for Authelia\nusers:\n othrayte:\n password: \"$2y$10$FeemMJevZXq6y1pc6FNOXeIlthGWiGHRmMfpV33BNcpChA5ozLUmK\"\n displayname: \"Adrian\"\n email: \"othrayte@gmail.com\"\nEOF\n\n destination = \"local/config/users_database.yml\"\n }\n }\n }\n}\n",
"json": null,
"modify_index": "17976",
"name": "authelia",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "authelia",
"task": [
{
"driver": "docker",
"meta": {},
"name": "authelia",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "csi-smb",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "csi-smb",
"jobspec": "job \"csi-smb\" {\n type = \"system\"\n\n group \"smb\" {\n task \"plugin\" {\n driver = \"docker\"\n\n config {\n image = \"mcr.microsoft.com/k8s/csi/smb-csi:v1.7.0\"\n args = [\n \"--v=5\",\n \"--nodeid=${attr.unique.hostname}\",\n \"--endpoint=unix:///csi/csi.sock\",\n \"--drivername=smb.csi.k8s.io\"\n ]\n privileged = true\n }\n\n csi_plugin {\n id = \"smb\"\n type = \"node\"\n mount_dir = \"/csi\"\n }\n\n resources {\n cpu = 100\n memory = 50\n }\n }\n }\n}",
"json": null,
"modify_index": "11526",
"name": "csi-smb",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "smb",
"task": [
{
"driver": "docker",
"meta": {},
"name": "plugin",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "system"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "glance",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "glance",
"jobspec": "job \"glance\" {\n group \"glance\" {\n count = 1\n\n network {\n port \"http\" {\n to = 8080\n }\n }\n\n task \"glance\" {\n driver = \"docker\"\n\n config {\n image = \"glanceapp/glance:latest\"\n ports = [\"http\"]\n volumes = [\n \"local/glance.yml:/app/config/glance.yml\",\n ]\n }\n\n service {\n name = \"home\"\n port = \"http\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.home.middlewares=auth@file\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"http\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n resources {\n cpu = 50\n memory = 128\n }\n\n\n template {\n data = \u003c\u003cEOF\npages:\n - name: Home\n # Optionally, if you only have a single page you can hide the desktop navigation for a cleaner look\n # hide-desktop-navigation: true\n columns:\n - size: small\n widgets:\n - type: calendar\n first-day-of-week: monday\n\n - type: rss\n limit: 10\n collapse-after: 3\n cache: 12h\n feeds:\n - url: https://selfh.st/rss/\n title: selfh.st\n limit: 4\n - url: https://ciechanow.ski/atom.xml\n - url: https://www.joshwcomeau.com/rss.xml\n title: Josh Comeau\n - url: https://samwho.dev/rss.xml\n - url: https://ishadeed.com/feed.xml\n title: Ahmad Shadeed\n\n - type: twitch-channels\n channels:\n - theprimeagen\n - j_blow\n - piratesoftware\n - cohhcarnage\n - christitustech\n - EJ_SA\n\n - size: full\n widgets:\n - type: group\n widgets:\n - type: hacker-news\n - type: lobsters\n\n - type: videos\n channels:\n - UCXuqSBlHAE6Xw-yeJA0Tunw # Linus Tech Tips\n - UCR-DXc1voovS8nhAvccRZhg # Jeff Geerling\n - UCsBjURrPoezykLs9EqgamOA # Fireship\n - UCBJycsmduvYEL83R_U4JriQ # Marques Brownlee\n - UCHnyfMqiRRG1u-2MsSQLbXA # Veritasium\n\n - type: bookmarks\n groups:\n - links:\n - title: Gmail\n url: https://mail.google.com/mail/u/0/\n - title: Amazon\n url: https://www.amazon.com/\n - title: Github\n url: https://github.com/\n - title: Wikipedia\n url: https://en.wikipedia.org/\n - title: Infra\n color: 10 70 50\n links:\n - title: Nomad\n url: https://nomad.othrayte.one/\n - title: Consul\n url: https://consul.othrayte.one/\n - title: Traefik\n url: https://traefik.othrayte.one/\n - title: Social\n color: 200 50 50\n links:\n - title: Reddit\n url: https://www.reddit.com/\n - title: Twitter\n url: https://twitter.com/\n - title: Instagram\n url: https://www.instagram.com/\n\n - size: small\n widgets:\n - type: weather\n location: Melbourne, Australia\n units: metric\n hour-format: 12h\n # Optionally hide the location from being displayed in the widget\n # hide-location: true\n\n - type: releases\n cache: 1d\n # Without authentication the Github API allows for up to 60 requests per hour. You can create a\n # read-only token from your Github account settings and use it here to increase the limit.\n # token: ...\n repositories:\n - glanceapp/glance\n - go-gitea/gitea\n - immich-app/immich\n - syncthing/syncthing\n\n # Add more pages here:\n # - name: Your page name\n # columns:\n # - size: small\n # widgets:\n # # Add widgets here\n\n # - size: full\n # widgets:\n # # Add widgets here\n\n # - size: small\n # widgets:\n # # Add widgets here\nEOF\n\n destination = \"local/glance.yml\"\n }\n\n }\n }\n}",
"json": null,
"modify_index": "17710",
"name": "glance",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "glance",
"task": [
{
"driver": "docker",
"meta": {},
"name": "glance",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "hello_world",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "hello-world",
"jobspec": "job \"hello-world\" {\n group \"servers\" {\n network {\n port \"www\" {\n to = -1\n }\n }\n\n service {\n name = \"hello-world\"\n port = \"www\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.hello-world.middlewares=auth@file\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"www\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n \n\n # Tasks are individual units of work that are run by Nomad.\n task \"web\" {\n # This particular task starts a simple web server within a Docker container\n driver = \"docker\"\n\n config {\n image = \"busybox:1\"\n command = \"httpd\"\n args = [\"-v\", \"-f\", \"-p\", \"${NOMAD_PORT_www}\", \"-h\", \"/local\"]\n ports = [\"www\"]\n }\n\n template {\n data = \u003c\u003c-EOF\n \u003ch1\u003eHello, Nomad!\u003c/h1\u003e\n \u003cul\u003e\n \u003cli\u003eTask: {{env \"NOMAD_TASK_NAME\"}}\u003c/li\u003e\n \u003cli\u003eGroup: {{env \"NOMAD_GROUP_NAME\"}}\u003c/li\u003e\n \u003cli\u003eJob: {{env \"NOMAD_JOB_NAME\"}}\u003c/li\u003e\n \u003cli\u003eMetadata value for foo: {{env \"NOMAD_META_foo\"}}\u003c/li\u003e\n \u003cli\u003eCurrently running on port: {{env \"NOMAD_PORT_www\"}}\u003c/li\u003e\n \u003c/ul\u003e\n EOF\n destination = \"local/index.html\"\n }\n\n # Specify the maximum resources required to run the task\n resources {\n cpu = 50\n memory = 64\n }\n }\n }\n}",
"json": null,
"modify_index": "17709",
"name": "hello-world",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "servers",
"task": [
{
"driver": "docker",
"meta": {},
"name": "web",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "traefik",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "traefik",
"jobspec": "job \"traefik\" {\n group \"traefik\" {\n network {\n port \"http\" {\n static = 80\n }\n\n port \"https\" {\n static = 443\n }\n\n port \"api\" {\n static = 8081\n }\n }\n\n service {\n name = \"traefik\"\n\n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.traefik.rule=Host(`traefik.othrayte.one`)\",\n \"traefik.http.routers.traefik.service=traefik\",\n \"traefik.http.routers.traefik.middlewares=auth@file\",\n \"traefik.http.services.traefik.loadbalancer.server.port=8081\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"http\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n volume \"traefik\" {\n type = \"host\"\n read_only = false\n source = \"traefik\"\n }\n\n task \"traefik\" {\n driver = \"docker\"\n\n config {\n image = \"traefik:v3.3\"\n network_mode = \"host\"\n\n volumes = [\n \"local/traefik.yml:/etc/traefik/traefik.yml\",\n \"local/configs/:/etc/traefik/configs/\"\n ]\n }\n\n volume_mount {\n volume = \"traefik\"\n destination = \"/opt/traefik\"\n read_only = false\n }\n\n template {\n data = \u003c\u003cEOF\nentryPoints:\n web:\n address: \":80\"\n http:\n redirections:\n entryPoint:\n to: websecure\n scheme: https\n websecure:\n address: \":443\"\n http:\n tls:\n certResolver: letsencrypt\n traefik:\n address: \":8081\"\n\napi:\n dashboard: true\n insecure: true\n\nproviders:\n file:\n directory: \"/etc/traefik/configs/\"\n\n consulCatalog:\n prefix: \"traefik\"\n exposedByDefault: false\n defaultRule: {{\"Host(`{{ .Name }}.othrayte.one`)\"}}\n endpoint:\n address: \"127.0.0.1:8500\"\n scheme: \"http\"\n\ncertificatesResolvers:\n letsencrypt:\n acme:\n email: \"othrayte@gmail.com\"\n storage: \"/opt/traefik/acme.json\"\n httpChallenge:\n entryPoint: web\nEOF\n\n destination = \"local/traefik.yml\"\n }\n\n template {\n data = \u003c\u003cEOF\nhttp:\n middlewares:\n auth:\n forwardAuth:\n address: \"http://192.168.1.235:9091/api/authz/forward-auth\"\n trustForwardHeader: true\n routers:\n fallback:\n rule: \"HostRegexp(`^.+$`)\"\n entryPoints:\n - websecure\n middlewares:\n - auth\n service: noop@internal # This router just applies middleware\n priority: 1\n nomad-ui:\n rule: \"Host(`nomad.othrayte.one`)\"\n service: nomad-ui\n middlewares:\n - auth\n consul-ui:\n rule: \"Host(`consul.othrayte.one`)\"\n service: consul-ui\n middlewares:\n - auth\n unraid:\n rule: \"Host(`unraid.othrayte.one`)\"\n service: unraid\n middlewares:\n - auth\n\n services:\n nomad-ui:\n loadBalancer:\n servers:\n - url: \"http://127.0.0.1:4646\"\n consul-ui:\n loadBalancer:\n servers:\n - url: \"http://127.0.0.1:8500\"\n unraid:\n loadBalancer:\n servers:\n - url: \"http://192.168.1.192:80\"\nEOF\n\n destination = \"local/configs/nomad.yml\"\n }\n\n resources {\n cpu = 100\n memory = 128\n }\n }\n }\n}\n",
"json": null,
"modify_index": "18000",
"name": "traefik",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "traefik",
"task": [
{
"driver": "docker",
"meta": {},
"name": "traefik",
"volume_mounts": [
{
"destination": "/opt/traefik",
"read_only": false,
"volume": "traefik"
}
]
}
],
"volumes": [
{
"name": "traefik",
"read_only": false,
"source": "traefik",
"type": "host"
}
]
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "transfer",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "transfer",
"jobspec": "job \"transfer\" {\n group \"transfer\" {\n network {\n port \"http\" {\n to = 80\n }\n }\n\n service {\n name = \"transfer\"\n port = \"http\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.volume-test.middlewares=auth@file\",\n ]\n\n check {\n type = \"http\"\n path = \"/\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n volume \"unraid_transfer\" {\n type = \"csi\"\n read_only = false\n source = \"unraid_transfer\"\n access_mode = \"single-node-writer\"\n attachment_mode = \"file-system\"\n\n mount_options {\n mount_flags = [\"uid=911\",\"gid=1000\"] # linuxserver.io container services run as uid 911\n }\n }\n\n task \"filebrowser\" {\n driver = \"docker\"\n\n config {\n # Use the s6 tag for the linuxserver.io based image\n image = \"filebrowser/filebrowser:s6\"\n\n ports = [\"http\"]\n\n volumes = [\n \"local/config/settings.json:/config/settings.json\",\n ]\n }\n\n volume_mount {\n volume = \"unraid_transfer\"\n\t destination = \"/srv\"\n read_only = false\n }\n\n resources {\n cpu = 500\n memory = 256\n }\n\n template {\n data = \u003c\u003cEOF\n{\n \"port\": 80,\n \"baseURL\": \"\",\n \"address\": \"\",\n \"log\": \"stdout\",\n \"database\": \"/database/filebrowser.db\",\n \"root\": \"/srv\",\n \"auth\": {\n \"method\": \"noauth\"\n }\n}\nEOF\n\n destination = \"local/config/settings.json\"\n }\n }\n }\n}",
"json": null,
"modify_index": "21245",
"name": "transfer",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "transfer",
"task": [
{
"driver": "docker",
"meta": {},
"name": "filebrowser",
"volume_mounts": [
{
"destination": "/srv",
"read_only": false,
"volume": "unraid_transfer"
}
]
}
],
"volumes": [
{
"name": "unraid_transfer",
"read_only": false,
"source": "unraid_transfer",
"type": "csi"
}
]
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "webapp",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "demo-webapp",
"jobspec": "job \"demo-webapp\" {\n group \"demo\" {\n count = 3\n\n network {\n port \"http\"{\n to = -1\n }\n }\n\n service {\n name = \"demo-webapp\"\n port = \"http\"\n\n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.demo-webapp.middlewares=auth@file\",\n ]\n\n check {\n type = \"http\"\n path = \"/\"\n interval = \"2s\"\n timeout = \"2s\"\n }\n }\n\n task \"server\" {\n env {\n PORT = \"${NOMAD_PORT_http}\"\n NODE_IP = \"${NOMAD_IP_http}\"\n }\n\n driver = \"docker\"\n\n config {\n image = \"hashicorp/demo-webapp-lb-guide\"\n ports = [\"http\"]\n }\n }\n }\n}",
"json": null,
"modify_index": "17707",
"name": "demo-webapp",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 3,
"meta": {},
"name": "demo",
"task": [
{
"driver": "docker",
"meta": {},
"name": "server",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
}
],
"check_results": null
}

View File

@@ -1,584 +0,0 @@
{
"version": 4,
"terraform_version": "1.11.4",
"serial": 593,
"lineage": "15e0900c-88bc-9754-4600-e3977d018ba0",
"outputs": {},
"resources": [
{
"mode": "data",
"type": "nomad_plugin",
"name": "smb",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"id": "smb",
"nodes": [
{
"healthy": true,
"healthy_description": "healthy",
"name": "0db77253-0579-e8b0-42cd-d619af9d8e73"
}
],
"nodes_expected": 1,
"nodes_healthy": 1,
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"wait_for_healthy": true,
"wait_for_registration": false
},
"sensitive_attributes": []
}
]
},
{
"mode": "managed",
"type": "nomad_csi_volume_registration",
"name": "unraid_transfer",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"capability": [
{
"access_mode": "single-node-writer",
"attachment_mode": "file-system"
}
],
"capacity": 0,
"capacity_max": null,
"capacity_max_bytes": 0,
"capacity_min": null,
"capacity_min_bytes": 0,
"context": {
"source": "//192.168.1.192/transfer"
},
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"deregister_on_destroy": true,
"external_id": "unraid_transfer",
"id": "unraid_transfer",
"mount_options": [],
"name": "unraid_transfer",
"namespace": "default",
"nodes_expected": 1,
"nodes_healthy": 1,
"parameters": {
"csi.storage.k8s.io/node-stage-secret-name": "smbcreds",
"csi.storage.k8s.io/node-stage-secret-namespace": "default"
},
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"schedulable": true,
"secrets": {
"password": "",
"username": "anon"
},
"timeouts": null,
"topologies": [],
"topology_request": [],
"volume_id": "unraid_transfer"
},
"sensitive_attributes": [
[
{
"type": "get_attr",
"value": "secrets"
}
]
],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfX0=",
"dependencies": [
"data.nomad_plugin.smb"
]
}
]
},
{
"mode": "managed",
"type": "nomad_csi_volume_registration",
"name": "unraid_transfer_subdir",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"capability": [
{
"access_mode": "single-node-writer",
"attachment_mode": "file-system"
}
],
"capacity": 0,
"capacity_max": null,
"capacity_max_bytes": 0,
"capacity_min": null,
"capacity_min_bytes": 0,
"context": {
"source": "//192.168.1.192/transfer",
"subDir": "subdir"
},
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"deregister_on_destroy": true,
"external_id": "unraid_transfer_subdir",
"id": "unraid_transfer_subdir",
"mount_options": [],
"name": "unraid_transfer_subdir",
"namespace": "default",
"nodes_expected": 1,
"nodes_healthy": 1,
"parameters": {
"csi.storage.k8s.io/node-stage-secret-name": "smbcreds",
"csi.storage.k8s.io/node-stage-secret-namespace": "default"
},
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"schedulable": true,
"secrets": {
"password": "$lUPyJw1Yc\u0026B997i",
"username": "othrayte"
},
"timeouts": null,
"topologies": [],
"topology_request": [],
"volume_id": "unraid_transfer_subdir"
},
"sensitive_attributes": [
[
{
"type": "get_attr",
"value": "secrets"
}
]
],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfX0=",
"dependencies": [
"data.nomad_plugin.smb"
]
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "authelia",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "authelia",
"jobspec": "job \"authelia\" {\n group \"authelia\" {\n network {\n port \"http\" {\n static = 9091\n }\n }\n\n service {\n name = \"auth\"\n port = \"http\"\n\n tags = [\n \"traefik.enable=true\",\n ]\n\n check {\n type = \"http\"\n path = \"/health\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n task \"authelia\" {\n driver = \"docker\"\n\n config {\n image = \"authelia/authelia:latest\"\n\n ports = [\"http\"]\n\n volumes = [\n \"local/config:/config\",\n \"local/data:/data\"\n ]\n }\n\n resources {\n cpu = 100\n memory = 128\n }\n\n template {\n data = \u003c\u003cEOF\nserver:\n address: tcp://0.0.0.0:{{ env \"NOMAD_PORT_http\" }}/\ntheme: \"auto\"\nidentity_validation:\n reset_password:\n jwt_secret: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}\"\n\nauthentication_backend:\n file:\n path: /config/users_database.yml\n\naccess_control:\n default_policy: deny\n rules:\n - domain: \"*.othrayte.one\"\n policy: one_factor\n # Disable auth for authelia\n #- domain: \"auth.othrayte.one\"\n # policy: bypass\n\nsession:\n name: authelia_session\n secret: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}\"\n expiration: 3600\n cookies:\n - domain: othrayte.one\n authelia_url: \"https://auth.othrayte.one\"\n\nstorage:\n local:\n path: /config/db.sqlite3\n encryption_key: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .encryption_key }}{{ end }}\"\n\nnotifier:\n filesystem:\n filename: /config/notification.txt\nEOF\n\n destination = \"local/config/configuration.yml\"\n }\n\n template {\n data = \u003c\u003cEOF\n# Users database for Authelia\nusers:\n othrayte:\n password: \"$2y$10$FeemMJevZXq6y1pc6FNOXeIlthGWiGHRmMfpV33BNcpChA5ozLUmK\"\n displayname: \"Adrian\"\n email: \"othrayte@gmail.com\"\nEOF\n\n destination = \"local/config/users_database.yml\"\n }\n }\n }\n}\n",
"json": null,
"modify_index": "17976",
"name": "authelia",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "authelia",
"task": [
{
"driver": "docker",
"meta": {},
"name": "authelia",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "csi-smb",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "csi-smb",
"jobspec": "job \"csi-smb\" {\n type = \"system\"\n\n group \"smb\" {\n task \"plugin\" {\n driver = \"docker\"\n\n config {\n image = \"mcr.microsoft.com/k8s/csi/smb-csi:v1.7.0\"\n args = [\n \"--v=5\",\n \"--nodeid=${attr.unique.hostname}\",\n \"--endpoint=unix:///csi/csi.sock\",\n \"--drivername=smb.csi.k8s.io\"\n ]\n privileged = true\n }\n\n csi_plugin {\n id = \"smb\"\n type = \"node\"\n mount_dir = \"/csi\"\n }\n\n resources {\n cpu = 100\n memory = 50\n }\n }\n }\n}",
"json": null,
"modify_index": "11526",
"name": "csi-smb",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "smb",
"task": [
{
"driver": "docker",
"meta": {},
"name": "plugin",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "system"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "glance",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "glance",
"jobspec": "job \"glance\" {\n group \"glance\" {\n count = 1\n\n network {\n port \"http\" {\n to = 8080\n }\n }\n\n task \"glance\" {\n driver = \"docker\"\n\n config {\n image = \"glanceapp/glance:latest\"\n ports = [\"http\"]\n volumes = [\n \"local/glance.yml:/app/config/glance.yml\",\n ]\n }\n\n service {\n name = \"home\"\n port = \"http\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.home.middlewares=auth@file\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"http\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n resources {\n cpu = 50\n memory = 128\n }\n\n\n template {\n data = \u003c\u003cEOF\npages:\n - name: Home\n # Optionally, if you only have a single page you can hide the desktop navigation for a cleaner look\n # hide-desktop-navigation: true\n columns:\n - size: small\n widgets:\n - type: calendar\n first-day-of-week: monday\n\n - type: rss\n limit: 10\n collapse-after: 3\n cache: 12h\n feeds:\n - url: https://selfh.st/rss/\n title: selfh.st\n limit: 4\n - url: https://ciechanow.ski/atom.xml\n - url: https://www.joshwcomeau.com/rss.xml\n title: Josh Comeau\n - url: https://samwho.dev/rss.xml\n - url: https://ishadeed.com/feed.xml\n title: Ahmad Shadeed\n\n - type: twitch-channels\n channels:\n - theprimeagen\n - j_blow\n - piratesoftware\n - cohhcarnage\n - christitustech\n - EJ_SA\n\n - size: full\n widgets:\n - type: group\n widgets:\n - type: hacker-news\n - type: lobsters\n\n - type: videos\n channels:\n - UCXuqSBlHAE6Xw-yeJA0Tunw # Linus Tech Tips\n - UCR-DXc1voovS8nhAvccRZhg # Jeff Geerling\n - UCsBjURrPoezykLs9EqgamOA # Fireship\n - UCBJycsmduvYEL83R_U4JriQ # Marques Brownlee\n - UCHnyfMqiRRG1u-2MsSQLbXA # Veritasium\n\n - type: bookmarks\n groups:\n - links:\n - title: Gmail\n url: https://mail.google.com/mail/u/0/\n - title: Amazon\n url: https://www.amazon.com/\n - title: Github\n url: https://github.com/\n - title: Wikipedia\n url: https://en.wikipedia.org/\n - title: Infra\n color: 10 70 50\n links:\n - title: Nomad\n url: https://nomad.othrayte.one/\n - title: Consul\n url: https://consul.othrayte.one/\n - title: Traefik\n url: https://traefik.othrayte.one/\n - title: Social\n color: 200 50 50\n links:\n - title: Reddit\n url: https://www.reddit.com/\n - title: Twitter\n url: https://twitter.com/\n - title: Instagram\n url: https://www.instagram.com/\n\n - size: small\n widgets:\n - type: weather\n location: Melbourne, Australia\n units: metric\n hour-format: 12h\n # Optionally hide the location from being displayed in the widget\n # hide-location: true\n\n - type: releases\n cache: 1d\n # Without authentication the Github API allows for up to 60 requests per hour. You can create a\n # read-only token from your Github account settings and use it here to increase the limit.\n # token: ...\n repositories:\n - glanceapp/glance\n - go-gitea/gitea\n - immich-app/immich\n - syncthing/syncthing\n\n # Add more pages here:\n # - name: Your page name\n # columns:\n # - size: small\n # widgets:\n # # Add widgets here\n\n # - size: full\n # widgets:\n # # Add widgets here\n\n # - size: small\n # widgets:\n # # Add widgets here\nEOF\n\n destination = \"local/glance.yml\"\n }\n\n }\n }\n}",
"json": null,
"modify_index": "17710",
"name": "glance",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "glance",
"task": [
{
"driver": "docker",
"meta": {},
"name": "glance",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "hello_world",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "hello-world",
"jobspec": "job \"hello-world\" {\n group \"servers\" {\n network {\n port \"www\" {\n to = -1\n }\n }\n\n service {\n name = \"hello-world\"\n port = \"www\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.hello-world.middlewares=auth@file\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"www\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n \n\n # Tasks are individual units of work that are run by Nomad.\n task \"web\" {\n # This particular task starts a simple web server within a Docker container\n driver = \"docker\"\n\n config {\n image = \"busybox:1\"\n command = \"httpd\"\n args = [\"-v\", \"-f\", \"-p\", \"${NOMAD_PORT_www}\", \"-h\", \"/local\"]\n ports = [\"www\"]\n }\n\n template {\n data = \u003c\u003c-EOF\n \u003ch1\u003eHello, Nomad!\u003c/h1\u003e\n \u003cul\u003e\n \u003cli\u003eTask: {{env \"NOMAD_TASK_NAME\"}}\u003c/li\u003e\n \u003cli\u003eGroup: {{env \"NOMAD_GROUP_NAME\"}}\u003c/li\u003e\n \u003cli\u003eJob: {{env \"NOMAD_JOB_NAME\"}}\u003c/li\u003e\n \u003cli\u003eMetadata value for foo: {{env \"NOMAD_META_foo\"}}\u003c/li\u003e\n \u003cli\u003eCurrently running on port: {{env \"NOMAD_PORT_www\"}}\u003c/li\u003e\n \u003c/ul\u003e\n EOF\n destination = \"local/index.html\"\n }\n\n # Specify the maximum resources required to run the task\n resources {\n cpu = 50\n memory = 64\n }\n }\n }\n}",
"json": null,
"modify_index": "17709",
"name": "hello-world",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "servers",
"task": [
{
"driver": "docker",
"meta": {},
"name": "web",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "traefik",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "traefik",
"jobspec": "job \"traefik\" {\n group \"traefik\" {\n network {\n port \"http\" {\n static = 80\n }\n\n port \"https\" {\n static = 443\n }\n\n port \"api\" {\n static = 8081\n }\n }\n\n service {\n name = \"traefik\"\n\n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.traefik.rule=Host(`traefik.othrayte.one`)\",\n \"traefik.http.routers.traefik.service=traefik\",\n \"traefik.http.routers.traefik.middlewares=auth@file\",\n \"traefik.http.services.traefik.loadbalancer.server.port=8081\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"http\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n volume \"traefik\" {\n type = \"host\"\n read_only = false\n source = \"traefik\"\n }\n\n task \"traefik\" {\n driver = \"docker\"\n\n config {\n image = \"traefik:v3.3\"\n network_mode = \"host\"\n\n volumes = [\n \"local/traefik.yml:/etc/traefik/traefik.yml\",\n \"local/configs/:/etc/traefik/configs/\"\n ]\n }\n\n volume_mount {\n volume = \"traefik\"\n destination = \"/opt/traefik\"\n read_only = false\n }\n\n template {\n data = \u003c\u003cEOF\nentryPoints:\n web:\n address: \":80\"\n http:\n redirections:\n entryPoint:\n to: websecure\n scheme: https\n websecure:\n address: \":443\"\n http:\n tls:\n certResolver: letsencrypt\n traefik:\n address: \":8081\"\n\napi:\n dashboard: true\n insecure: true\n\nproviders:\n file:\n directory: \"/etc/traefik/configs/\"\n\n consulCatalog:\n prefix: \"traefik\"\n exposedByDefault: false\n defaultRule: {{\"Host(`{{ .Name }}.othrayte.one`)\"}}\n endpoint:\n address: \"127.0.0.1:8500\"\n scheme: \"http\"\n\ncertificatesResolvers:\n letsencrypt:\n acme:\n email: \"othrayte@gmail.com\"\n storage: \"/opt/traefik/acme.json\"\n httpChallenge:\n entryPoint: web\nEOF\n\n destination = \"local/traefik.yml\"\n }\n\n template {\n data = \u003c\u003cEOF\nhttp:\n middlewares:\n auth:\n forwardAuth:\n address: \"http://192.168.1.235:9091/api/authz/forward-auth\"\n trustForwardHeader: true\n routers:\n fallback:\n rule: \"HostRegexp(`^.+$`)\"\n entryPoints:\n - websecure\n middlewares:\n - auth\n service: noop@internal # This router just applies middleware\n priority: 1\n nomad-ui:\n rule: \"Host(`nomad.othrayte.one`)\"\n service: nomad-ui\n middlewares:\n - auth\n consul-ui:\n rule: \"Host(`consul.othrayte.one`)\"\n service: consul-ui\n middlewares:\n - auth\n unraid:\n rule: \"Host(`unraid.othrayte.one`)\"\n service: unraid\n middlewares:\n - auth\n\n services:\n nomad-ui:\n loadBalancer:\n servers:\n - url: \"http://127.0.0.1:4646\"\n consul-ui:\n loadBalancer:\n servers:\n - url: \"http://127.0.0.1:8500\"\n unraid:\n loadBalancer:\n servers:\n - url: \"http://192.168.1.192:80\"\nEOF\n\n destination = \"local/configs/nomad.yml\"\n }\n\n resources {\n cpu = 100\n memory = 128\n }\n }\n }\n}\n",
"json": null,
"modify_index": "18000",
"name": "traefik",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "traefik",
"task": [
{
"driver": "docker",
"meta": {},
"name": "traefik",
"volume_mounts": [
{
"destination": "/opt/traefik",
"read_only": false,
"volume": "traefik"
}
]
}
],
"volumes": [
{
"name": "traefik",
"read_only": false,
"source": "traefik",
"type": "host"
}
]
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "transfer",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "transfer",
"jobspec": "job \"transfer\" {\n group \"transfer\" {\n network {\n port \"http\" {\n to = 80\n }\n }\n\n service {\n name = \"transfer\"\n port = \"http\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.volume-test.middlewares=auth@file\",\n ]\n\n check {\n type = \"http\"\n path = \"/\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n volume \"unraid_transfer\" {\n type = \"csi\"\n read_only = false\n source = \"unraid_transfer\"\n access_mode = \"single-node-writer\"\n attachment_mode = \"file-system\"\n\n mount_options {\n mount_flags = [\"uid=911\",\"gid=1000\"] # linuxserver.io container services run as uid 911\n }\n }\n\n task \"filebrowser\" {\n driver = \"docker\"\n\n config {\n # Use the s6 tag for the linuxserver.io based image\n image = \"filebrowser/filebrowser:s6\"\n\n ports = [\"http\"]\n\n volumes = [\n \"local/config/settings.json:/config/settings.json\",\n ]\n }\n\n volume_mount {\n volume = \"unraid_transfer\"\n\t destination = \"/srv\"\n read_only = false\n }\n\n resources {\n cpu = 500\n memory = 256\n }\n\n template {\n data = \u003c\u003cEOF\n{\n \"port\": 80,\n \"baseURL\": \"\",\n \"address\": \"\",\n \"log\": \"stdout\",\n \"database\": \"/database/filebrowser.db\",\n \"root\": \"/srv\",\n \"auth\": {\n \"method\": \"noauth\"\n }\n}\nEOF\n\n destination = \"local/config/settings.json\"\n }\n }\n }\n}",
"json": null,
"modify_index": "21193",
"name": "transfer",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "transfer",
"task": [
{
"driver": "docker",
"meta": {},
"name": "filebrowser",
"volume_mounts": [
{
"destination": "/srv",
"read_only": false,
"volume": "unraid_transfer"
}
]
}
],
"volumes": [
{
"name": "unraid_transfer",
"read_only": false,
"source": "unraid_transfer",
"type": "csi"
}
]
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "webapp",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "demo-webapp",
"jobspec": "job \"demo-webapp\" {\n group \"demo\" {\n count = 3\n\n network {\n port \"http\"{\n to = -1\n }\n }\n\n service {\n name = \"demo-webapp\"\n port = \"http\"\n\n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.demo-webapp.middlewares=auth@file\",\n ]\n\n check {\n type = \"http\"\n path = \"/\"\n interval = \"2s\"\n timeout = \"2s\"\n }\n }\n\n task \"server\" {\n env {\n PORT = \"${NOMAD_PORT_http}\"\n NODE_IP = \"${NOMAD_IP_http}\"\n }\n\n driver = \"docker\"\n\n config {\n image = \"hashicorp/demo-webapp-lb-guide\"\n ports = [\"http\"]\n }\n }\n }\n}",
"json": null,
"modify_index": "17707",
"name": "demo-webapp",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 3,
"meta": {},
"name": "demo",
"task": [
{
"driver": "docker",
"meta": {},
"name": "server",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
}
],
"check_results": null
}

View File

@@ -1,165 +0,0 @@
job "traefik" {
group "traefik" {
network {
port "http" {
static = 80
}
port "https" {
static = 443
}
port "api" {
static = 8081
}
}
service {
name = "traefik"
tags = [
"traefik.enable=true",
"traefik.http.routers.traefik.rule=Host(`traefik.othrayte.one`)",
"traefik.http.routers.traefik.service=traefik",
"traefik.http.routers.traefik.middlewares=auth@file",
"traefik.http.services.traefik.loadbalancer.server.port=8081",
]
check {
name = "alive"
type = "tcp"
port = "http"
interval = "10s"
timeout = "2s"
}
}
volume "traefik" {
type = "host"
read_only = false
source = "traefik"
}
task "traefik" {
driver = "docker"
config {
image = "traefik:v3.3"
network_mode = "host"
volumes = [
"local/traefik.yml:/etc/traefik/traefik.yml",
"local/configs/:/etc/traefik/configs/"
]
}
volume_mount {
volume = "traefik"
destination = "/opt/traefik"
read_only = false
}
template {
data = <<EOF
entryPoints:
web:
address: ":80"
http:
redirections:
entryPoint:
to: websecure
scheme: https
websecure:
address: ":443"
http:
tls:
certResolver: letsencrypt
traefik:
address: ":8081"
api:
dashboard: true
insecure: true
providers:
file:
directory: "/etc/traefik/configs/"
consulCatalog:
prefix: "traefik"
exposedByDefault: false
defaultRule: {{"Host(`{{ .Name }}.othrayte.one`)"}}
endpoint:
address: "127.0.0.1:8500"
scheme: "http"
certificatesResolvers:
letsencrypt:
acme:
email: "othrayte@gmail.com"
storage: "/opt/traefik/acme.json"
httpChallenge:
entryPoint: web
EOF
destination = "local/traefik.yml"
}
template {
data = <<EOF
http:
middlewares:
auth:
forwardAuth:
address: "http://192.168.1.235:9091/api/authz/forward-auth"
trustForwardHeader: true
routers:
fallback:
rule: "HostRegexp(`^.+$`)"
entryPoints:
- websecure
middlewares:
- auth
service: noop@internal # This router just applies middleware
priority: 1
nomad-ui:
rule: "Host(`nomad.othrayte.one`)"
service: nomad-ui
middlewares:
- auth
consul-ui:
rule: "Host(`consul.othrayte.one`)"
service: consul-ui
middlewares:
- auth
unraid:
rule: "Host(`unraid.othrayte.one`)"
service: unraid
middlewares:
- auth
services:
nomad-ui:
loadBalancer:
servers:
- url: "http://127.0.0.1:4646"
consul-ui:
loadBalancer:
servers:
- url: "http://127.0.0.1:8500"
unraid:
loadBalancer:
servers:
- url: "http://192.168.1.192:80"
EOF
destination = "local/configs/nomad.yml"
}
resources {
cpu = 100
memory = 128
}
}
}
}

View File

@@ -35,6 +35,18 @@ job "transfer" {
}
}
volume "appdata" {
type = "csi"
read_only = false
source = "unraid_appdata_transferfilebrowser"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=911", "gid=1000"] # linuxserver.io container services run as uid 911
}
}
task "filebrowser" {
driver = "docker"
@@ -49,6 +61,12 @@ job "transfer" {
]
}
volume_mount {
volume = "appdata"
destination = "/database"
read_only = false
}
volume_mount {
volume = "unraid_transfer"
destination = "/srv"
@@ -68,10 +86,7 @@ job "transfer" {
"address": "",
"log": "stdout",
"database": "/database/filebrowser.db",
"root": "/srv",
"auth": {
"method": "noauth"
}
"root": "/srv"
}
EOF

View File

@@ -0,0 +1,15 @@
resource "nomad_job" "transfer" {
jobspec = file("transfer.nomad.hcl")
}
module "unraid_smb_transfer" {
source = "./modules/unraid_smb"
name = "transfer"
id = "unraid_transfer"
share = "transfer"
}
module "appdata_transferfilebrowser" {
source = "./modules/appdata"
name = "transferfilebrowser"
}

View File

@@ -0,0 +1,50 @@
job "unifi-network" {
group "unifi-network" {
count = 1
task "unifi-controller" {
driver = "docker"
config {
image = "jacobalberty/unifi:v9.5.21"
// Fixed IP on the actual network so that devices can find it
network_mode = "macvlan"
ipv4_address = "192.168.1.50"
}
env {
TZ = "Australia/Melbourne"
SYSTEM_IP = "192.168.1.50"
JVM_INIT_HEAP_SIZE = "1024M"
JVM_MAX_HEAP_SIZE = "1024M"
UNIFI_STDOUT = "true"
}
volume_mount {
volume = "unraid_appdata_unifi_network"
destination = "/unifi" # Expected root directory (contains data, log, cert subdirs)
read_only = false
}
resources {
cpu = 200
memory = 1850
memory_max = 2500
}
}
# CSI volume for UniFi Controller persistent data/logs
volume "unraid_appdata_unifi_network" {
type = "csi"
read_only = false
source = "unraid_appdata_unifi_network"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=0", "gid=0"]
}
}
}
}

9
2-nomad-config/unifi.tf Normal file
View File

@@ -0,0 +1,9 @@
resource "nomad_job" "unifi_network" {
jobspec = file("unifi.nomad.hcl")
}
module "appdata_unifi_network" {
source = "./modules/appdata"
name = "unifi-network"
}

View File

@@ -1,42 +0,0 @@
job "demo-webapp" {
group "demo" {
count = 3
network {
port "http"{
to = -1
}
}
service {
name = "demo-webapp"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.demo-webapp.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "2s"
timeout = "2s"
}
}
task "server" {
env {
PORT = "${NOMAD_PORT_http}"
NODE_IP = "${NOMAD_IP_http}"
}
driver = "docker"
config {
image = "hashicorp/demo-webapp-lb-guide"
ports = ["http"]
}
}
}
}

15
renovate.json Normal file
View File

@@ -0,0 +1,15 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended"],
"customManagers": [
{
"description": "Update Docker image tags in Nomad job files",
"customType": "regex",
"fileMatch": ["\\.nomad\\.hcl$"],
"matchStrings": [
"image\\s*=\\s*\"(?<depName>[^:\"]+):(?<currentValue>[^\"]+)\""
],
"datasourceTemplate": "docker"
}
]
}