Compare commits

10 Commits

Author SHA1 Message Date
f7c4defe7d Add gitea 2025-05-19 22:45:33 +10:00
3ab392b50b Move nomad var secrets to secrets.enc.json 2025-05-18 23:44:24 +10:00
d2279028be Remove the hello world services initially added for testing 2025-05-18 21:53:44 +10:00
9cdd529633 Format terraform and nomad files 2025-05-18 21:50:04 +10:00
837cfdae68 Remove comment about terraform access to nomad
it was orignally only working over tailscale, but it turned out it was just being blocked by the host firewall, that tailscale was bypassing. This was fixed back in the initial commit using nix by setting networking.firewall.allowedTCPPorts to include 4646.
2025-05-18 21:50:04 +10:00
021d22048d Cleanup terraform files
by moving core infra into it's own file
2025-05-18 21:50:04 +10:00
b10df52f1c Allow login sessions to last longer than 1hr 2025-05-18 20:45:26 +10:00
08a2e458b2 Initial work on db backups 2025-05-18 20:18:48 +10:00
c6925362a6 Add initial PostgreSQL and pgAdmin services with Nomad configuration 2025-05-16 22:43:45 +10:00
805636f44c Properly persistent configs 2025-05-15 19:06:09 +10:00
25 changed files with 745 additions and 1297 deletions

3
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"recommendations": ["hashicorp.terraform", "fredwangwang.vscode-hcl-format"]
}

5
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,5 @@
{
"editor.tabSize": 2,
"editor.insertSpaces": true,
"editor.formatOnSave": true
}

View File

@@ -50,10 +50,19 @@
path = "/opt/traefik"; path = "/opt/traefik";
read_only = false; read_only = false;
}; };
postgres = {
path = "/opt/postgres";
read_only = false;
}; };
}; };
cni_path = "${pkgs.cni-plugins}/bin";
};
plugin.docker.config.allow_privileged = true; plugin.docker.config.allow_privileged = true;
}; };
extraPackages = with pkgs; [
cni-plugins
consul
];
}; };
consul = { consul = {
enable = true; enable = true;
@@ -65,6 +74,8 @@
server = true; server = true;
client_addr = "127.0.0.1 100.79.223.55"; client_addr = "127.0.0.1 100.79.223.55";
datacenter = "jaglan-beta"; datacenter = "jaglan-beta";
connect.enabled = true;
ports.grpc = 8502;
}; };
}; };
openssh = { openssh = {
@@ -78,6 +89,8 @@
"d /var/lib/alloc_mounts 0755 root root -" "d /var/lib/alloc_mounts 0755 root root -"
# Create a directory for Traefik to store its data (tls certs, etc.) # Create a directory for Traefik to store its data (tls certs, etc.)
"d /opt/traefik 0755 root root -" "d /opt/traefik 0755 root root -"
# Create a directory for Postgres to store its data
"d /opt/postgres 0755 root root -"
]; ];
# Open ports in the firewall. 464X are the default ports for Nomad. # Open ports in the firewall. 464X are the default ports for Nomad.

View File

@@ -1,7 +1,7 @@
{ {
"version": 4, "version": 4,
"terraform_version": "1.11.4", "terraform_version": "1.11.4",
"serial": 73, "serial": 127,
"lineage": "db7dcf21-a255-0ec4-c8b8-d4a7559b3768", "lineage": "db7dcf21-a255-0ec4-c8b8-d4a7559b3768",
"outputs": {}, "outputs": {},
"resources": [ "resources": [
@@ -14,9 +14,9 @@
{ {
"schema_version": 0, "schema_version": 0,
"attributes": { "attributes": {
"id": "4311733097030196208", "id": "6875817390663867831",
"triggers": { "triggers": {
"configuration_content": "{ config, lib, pkgs, ... }:\n{\n imports =\n [ # Include the results of the hardware scan.\n ./hardware-configuration.nix\n ];\n\n nixpkgs.config.allowUnfree = true;\n\n # Use the systemd-boot EFI boot loader.\n boot.loader.systemd-boot.enable = true;\n boot.loader.efi.canTouchEfiVariables = true;\n\n networking.hostName = \"jaglan-beta-m01\"; # Define your hostname.\n\n time.timeZone = \"Australia/Melbourne\";\n\n # List packages installed in system profile. To search, run:\n # $ nix search wget\n # environment.systemPackages = with pkgs; [\n # vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.\n # wget\n # ];\n\n # Some programs need SUID wrappers, can be configured further or are\n # started in user sessions.\n # programs.mtr.enable = true;\n # programs.gnupg.agent = {\n # enable = true;\n # enableSSHSupport = true;\n # };\n\n # List services that you want to enable:\n services = {\n tailscale.enable = true;\n nomad = {\n enable = true;\n enableDocker = true;\n dropPrivileges = false;\n settings = {\n datacenter = \"jaglan-beta\";\n server = {\n enabled = true;\n bootstrap_expect = 1;\n };\n client = {\n enabled = true;\n host_volume = {\n traefik = {\n path = \"/opt/traefik\";\n read_only = false;\n };\n };\n };\n plugin.docker.config.allow_privileged = true;\n };\n };\n consul = {\n enable = true;\n webUi = true;\n interface.bind = \"tailscale0\"; # Bind to the Tailscale interface\n interface.advertise = \"tailscale0\"; # Advertise the Tailscale interface\n extraConfig = {\n bootstrap_expect = 1;\n server = true;\n client_addr = \"127.0.0.1 100.79.223.55\";\n datacenter = \"jaglan-beta\";\n };\n };\n openssh = {\n enable = true;\n settings.PermitRootLogin = \"yes\";\n };\n };\n\n systemd.tmpfiles.rules = [\n # Fix issue where nomad needs alloc_mounts to be writable\n \"d /var/lib/alloc_mounts 0755 root root -\"\n # Create a directory for Traefik to store its data (tls certs, etc.)\n \"d /opt/traefik 0755 root root -\"\n ];\n\n # Open ports in the firewall. 464X are the default ports for Nomad.\n networking.firewall.allowedTCPPorts = [ 80 443 4646 4647 4648 ];\n\n # Copy the NixOS configuration file and link it from the resulting system\n # (/run/current-system/configuration.nix). This is useful in case you\n # accidentally delete configuration.nix.\n system.copySystemConfiguration = true;\n\n # Defines the initial NixOS version for compatibility with older application data.\n # Do NOT change this value after installation without careful consideration.\n system.stateVersion = \"24.11\"; # Did you read the comment?\n}\n" "configuration_content": "{ config, lib, pkgs, ... }:\n{\n imports =\n [ # Include the results of the hardware scan.\n ./hardware-configuration.nix\n ];\n\n nixpkgs.config.allowUnfree = true;\n\n # Use the systemd-boot EFI boot loader.\n boot.loader.systemd-boot.enable = true;\n boot.loader.efi.canTouchEfiVariables = true;\n\n networking.hostName = \"jaglan-beta-m01\"; # Define your hostname.\n\n time.timeZone = \"Australia/Melbourne\";\n\n # List packages installed in system profile. To search, run:\n # $ nix search wget\n # environment.systemPackages = with pkgs; [\n # vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.\n # wget\n # ];\n\n\n\n # Some programs need SUID wrappers, can be configured further or are\n # started in user sessions.\n # programs.mtr.enable = true;\n # programs.gnupg.agent = {\n # enable = true;\n # enableSSHSupport = true;\n # };\n\n # List services that you want to enable:\n services = {\n tailscale.enable = true;\n nomad = {\n enable = true;\n enableDocker = true;\n dropPrivileges = false;\n settings = {\n datacenter = \"jaglan-beta\";\n server = {\n enabled = true;\n bootstrap_expect = 1;\n };\n client = {\n enabled = true;\n host_volume = {\n traefik = {\n path = \"/opt/traefik\";\n read_only = false;\n };\n postgres = {\n path = \"/opt/postgres\";\n read_only = false;\n };\n };\n cni_path = \"${pkgs.cni-plugins}/bin\";\n };\n plugin.docker.config.allow_privileged = true;\n };\n extraPackages = with pkgs; [\n cni-plugins\n consul\n ];\n };\n consul = {\n enable = true;\n webUi = true;\n interface.bind = \"tailscale0\"; # Bind to the Tailscale interface\n interface.advertise = \"tailscale0\"; # Advertise the Tailscale interface\n extraConfig = {\n bootstrap_expect = 1;\n server = true;\n client_addr = \"127.0.0.1 100.79.223.55\";\n datacenter = \"jaglan-beta\";\n connect.enabled = true;\n ports.grpc = 8502;\n };\n };\n openssh = {\n enable = true;\n settings.PermitRootLogin = \"yes\";\n };\n };\n\n systemd.tmpfiles.rules = [\n # Fix issue where nomad needs alloc_mounts to be writable\n \"d /var/lib/alloc_mounts 0755 root root -\"\n # Create a directory for Traefik to store its data (tls certs, etc.)\n \"d /opt/traefik 0755 root root -\"\n # Create a directory for Postgres to store its data\n \"d /opt/postgres 0755 root root -\"\n ];\n\n # Open ports in the firewall. 464X are the default ports for Nomad.\n networking.firewall.allowedTCPPorts = [ 80 443 4646 4647 4648 ];\n\n # Copy the NixOS configuration file and link it from the resulting system\n # (/run/current-system/configuration.nix). This is useful in case you\n # accidentally delete configuration.nix.\n system.copySystemConfiguration = true;\n\n # Defines the initial NixOS version for compatibility with older application data.\n # Do NOT change this value after installation without careful consideration.\n system.stateVersion = \"24.11\"; # Did you read the comment?\n}\n"
} }
}, },
"sensitive_attributes": [] "sensitive_attributes": []

View File

@@ -1,7 +1,7 @@
{ {
"version": 4, "version": 4,
"terraform_version": "1.11.4", "terraform_version": "1.11.4",
"serial": 70, "serial": 124,
"lineage": "db7dcf21-a255-0ec4-c8b8-d4a7559b3768", "lineage": "db7dcf21-a255-0ec4-c8b8-d4a7559b3768",
"outputs": {}, "outputs": {},
"resources": [ "resources": [
@@ -12,12 +12,11 @@
"provider": "provider[\"registry.terraform.io/hashicorp/null\"]", "provider": "provider[\"registry.terraform.io/hashicorp/null\"]",
"instances": [ "instances": [
{ {
"status": "tainted",
"schema_version": 0, "schema_version": 0,
"attributes": { "attributes": {
"id": "6299339239344216968", "id": "1416630882184424678",
"triggers": { "triggers": {
"configuration_content": "{ config, lib, pkgs, ... }:\n{\n imports =\n [ # Include the results of the hardware scan.\n ./hardware-configuration.nix\n ];\n\n nixpkgs.config.allowUnfree = true;\n\n # Use the systemd-boot EFI boot loader.\n boot.loader.systemd-boot.enable = true;\n boot.loader.efi.canTouchEfiVariables = true;\n\n networking.hostName = \"jaglan-beta-m01\"; # Define your hostname.\n\n time.timeZone = \"Australia/Melbourne\";\n\n # List packages installed in system profile. To search, run:\n # $ nix search wget\n # environment.systemPackages = with pkgs; [\n # vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.\n # wget\n # ];\n\n # Some programs need SUID wrappers, can be configured further or are\n # started in user sessions.\n # programs.mtr.enable = true;\n # programs.gnupg.agent = {\n # enable = true;\n # enableSSHSupport = true;\n # };\n\n # List services that you want to enable:\n services = {\n tailscale.enable = true;\n nomad = {\n enable = true;\n enableDocker = true;\n dropPrivileges = false;\n settings = {\n datacenter = \"jaglan-beta\";\n server = {\n enabled = true;\n bootstrap_expect = 1;\n };\n client = {\n enabled = true;\n host_volume = {\n traefik = {\n path = \"/opt/traefik\";\n read_only = false;\n };\n };\n plugin.docker.config.allow_privileged = true;\n };\n };\n };\n consul = {\n enable = true;\n webUi = true;\n interface.bind = \"tailscale0\"; # Bind to the Tailscale interface\n interface.advertise = \"tailscale0\"; # Advertise the Tailscale interface\n extraConfig = {\n bootstrap_expect = 1;\n server = true;\n client_addr = \"127.0.0.1 100.79.223.55\";\n datacenter = \"jaglan-beta\";\n };\n };\n openssh = {\n enable = true;\n settings.PermitRootLogin = \"yes\";\n };\n };\n\n systemd.tmpfiles.rules = [\n # Fix issue where nomad needs alloc_mounts to be writable\n \"d /var/lib/alloc_mounts 0755 root root -\"\n # Create a directory for Traefik to store its data (tls certs, etc.)\n \"d /opt/traefik 0755 root root -\"\n ];\n\n # Open ports in the firewall. 464X are the default ports for Nomad.\n networking.firewall.allowedTCPPorts = [ 80 443 4646 4647 4648 ];\n\n # Copy the NixOS configuration file and link it from the resulting system\n # (/run/current-system/configuration.nix). This is useful in case you\n # accidentally delete configuration.nix.\n system.copySystemConfiguration = true;\n\n # Defines the initial NixOS version for compatibility with older application data.\n # Do NOT change this value after installation without careful consideration.\n system.stateVersion = \"24.11\"; # Did you read the comment?\n}\n" "configuration_content": "{ config, lib, pkgs, ... }:\n{\n imports =\n [ # Include the results of the hardware scan.\n ./hardware-configuration.nix\n ];\n\n nixpkgs.config.allowUnfree = true;\n\n # Use the systemd-boot EFI boot loader.\n boot.loader.systemd-boot.enable = true;\n boot.loader.efi.canTouchEfiVariables = true;\n\n networking.hostName = \"jaglan-beta-m01\"; # Define your hostname.\n\n time.timeZone = \"Australia/Melbourne\";\n\n # List packages installed in system profile. To search, run:\n # $ nix search wget\n # environment.systemPackages = with pkgs; [\n # vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.\n # wget\n # ];\n\n\n\n # Some programs need SUID wrappers, can be configured further or are\n # started in user sessions.\n # programs.mtr.enable = true;\n # programs.gnupg.agent = {\n # enable = true;\n # enableSSHSupport = true;\n # };\n\n # List services that you want to enable:\n services = {\n tailscale.enable = true;\n nomad = {\n enable = true;\n enableDocker = true;\n dropPrivileges = false;\n settings = {\n datacenter = \"jaglan-beta\";\n server = {\n enabled = true;\n bootstrap_expect = 1;\n };\n client = {\n enabled = true;\n host_volume = {\n traefik = {\n path = \"/opt/traefik\";\n read_only = false;\n };\n postgres = {\n path = \"/opt/postgres\";\n read_only = false;\n };\n };\n cni_path = \"${pkgs.cni-plugins}/bin\";\n };\n plugin.docker.config.allow_privileged = true;\n };\n extraPackages = with pkgs; [\n cni-plugins\n consul\n ];\n };\n consul = {\n enable = true;\n webUi = true;\n interface.bind = \"tailscale0\"; # Bind to the Tailscale interface\n interface.advertise = \"tailscale0\"; # Advertise the Tailscale interface\n extraConfig = {\n bootstrap_expect = 1;\n server = true;\n client_addr = \"127.0.0.1 100.79.223.55\";\n datacenter = \"jaglan-beta\";\n connect.enabled = true;\n ports.grpc = 8502;\n };\n };\n openssh = {\n enable = true;\n settings.PermitRootLogin = \"yes\";\n };\n };\n\n systemd.tmpfiles.rules = [\n # Fix issue where nomad needs alloc_mounts to be writable\n \"d /var/lib/alloc_mounts 0755 root root -\"\n # Create a directory for Traefik to store its data (tls certs, etc.)\n \"d /opt/traefik 0755 root root -\"\n # Create a directory for Postgres to store its data\n \"d /opt/postgres 0755 root root -\"\n ];\n\n # Open ports in the firewall. 464X are the default ports for Nomad.\n networking.firewall.allowedTCPPorts = [ 80 443 4646 4647 4648 5432 ];\n\n # Copy the NixOS configuration file and link it from the resulting system\n # (/run/current-system/configuration.nix). This is useful in case you\n # accidentally delete configuration.nix.\n system.copySystemConfiguration = true;\n\n # Defines the initial NixOS version for compatibility with older application data.\n # Do NOT change this value after installation without careful consideration.\n system.stateVersion = \"24.11\"; # Did you read the comment?\n}\n"
} }
}, },
"sensitive_attributes": [] "sensitive_attributes": []

View File

@@ -1,6 +1,42 @@
# This file is maintained automatically by "terraform init". # This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/carlpett/sops" {
version = "0.7.2"
constraints = "~> 0.5"
hashes = [
"h1:eetjYKFBQb6nbgxjehD/gzzAmH2ru94ha2tEzXNiNy8=",
"zh:43f218054ea3a72c9756bf989aeebb9d0f23b66fd08e9fb4ae75d4f921295e82",
"zh:57fd326388042a6b7ecd60f740f81e5ef931546c4f068f054e7df34acf65d190",
"zh:87b970db8c137f4c2fcbff7a5705419a0aea9268ae0ac94f1ec5b978e42ab0d2",
"zh:9e3b67b89ac919f01731eb0466baa08ce0721e6cf962fe6752e7cc526ac0cba0",
"zh:c028f67ef330be0d15ce4d7ac7649a2e07a98ed3003fca52e0c72338b5f481f8",
"zh:c29362e36a44480d0d9cb7d90d1efba63fe7e0e94706b2a07884bc067c46cbc7",
"zh:d5bcfa836244718a1d564aa96eb7d733b4d361b6ecb961f7c5bcd0cadb1dfd05",
]
}
provider "registry.terraform.io/cyrilgdn/postgresql" {
version = "1.25.0"
hashes = [
"h1:4Hlupc8gYrPnFKisesXs9lypK6LXslU4L4tjBZOhmiE=",
"zh:0f9db6e1274603d642e96b58eaf6cc4223f7118f2d7ce909dc4812d332cc002a",
"zh:1819470f0304c6a60b2b51817cb43f6ff59a49e08cc9e50644b86b3a76c91601",
"zh:27bfb544983cac101a7c7c2e4cb9939a712dffcdd7ddcab83c2f8afc334e33c5",
"zh:46166f6f05771b0495df18459fdf3a63fae8b38e95a1b2754f03d006e17ea33d",
"zh:64d53afc52f26e8214990acc3e07f3b47bef628aa6b317595a8faec05b252209",
"zh:944d7ded418c022dd3ee513246677d601376fa38d76c9c4aecff2c2eefcaa35b",
"zh:9819551b61542a6d322d6a323bbb552ce02e769ce2222fd9bb1935473c7c4b3c",
"zh:c38bd73e208fe216efab48d099c85b8ad1e51ff102b3892443febc9778e7236e",
"zh:c73de133274dcc7a03e95f598550facc59315538f355e57e14b36e222b298826",
"zh:c7af02f5338bfe7f1976e01d3fcf82e05b3551893e732539a84c568d25571a84",
"zh:d1aa3d7432c7de883873f8f70e9a6207c7b536d874486d37aee0ca8c8853a890",
"zh:e17e9809fc7cc2d6f89078b8bfe6308930117b2270be8081820da40029b04828",
"zh:e1b21b7b7022e0d468d72f4534d226d57a7bfd8c96a4c7dc2c2fa0bb0b99298d",
"zh:f24b73645d8bc225f692bdf9c035411099ef57138569f45f3605ec79ac872e3b",
]
}
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.5.0" version = "2.5.0"
hashes = [ hashes = [

111
2-nomad-config/1-infra.tf Normal file
View File

@@ -0,0 +1,111 @@
terraform {
backend "local" {
path = "./.tfstate/terraform.tfstate"
}
}
terraform {
required_providers {
sops = {
source = "carlpett/sops"
version = "~> 0.5"
}
postgresql = {
source = "cyrilgdn/postgresql"
}
}
}
provider "nomad" {
address = "http://jaglan-beta-m01:4646"
}
data "sops_file" "secrets" {
source_file = "secrets.enc.json"
}
// Networking
resource "nomad_job" "traefik" {
jobspec = file("traefik.nomad.hcl")
}
// Authentication
resource "nomad_job" "authelia" {
jobspec = file("authelia.nomad.hcl")
}
resource "nomad_variable" "authelia" {
path = "nomad/jobs/authelia"
items = {
session_secret = data.sops_file.secrets.data["authelia.session_secret"]
jwt_secret = data.sops_file.secrets.data["authelia.jwt_secret"]
encryption_key = data.sops_file.secrets.data["authelia.encryption_key"]
}
}
// Data
resource "nomad_job" "csi-smb" {
jobspec = file("csi-smb.nomad.hcl")
}
data "nomad_plugin" "smb" {
plugin_id = "smb"
wait_for_healthy = true
}
resource "nomad_job" "postgres" {
jobspec = file("postgres.nomad.hcl")
}
resource "nomad_job" "pgadmin" {
jobspec = file("pgadmin.nomad.hcl")
}
resource "nomad_job" "pgbackup" {
jobspec = file("pgbackup.nomad.hcl")
}
resource "nomad_variable" "postgres" {
path = "nomad/jobs/postgres"
items = {
postgres_password = data.sops_file.secrets.data["postgres.postgres"]
}
}
provider "postgresql" {
host = "jaglan-beta-m01"
port = 5432
database = "postgres"
username = "postgres"
password = data.sops_file.secrets.data["postgres.postgres"]
sslmode = "disable"
connect_timeout = 15
}
resource "nomad_csi_volume_registration" "unraid_database_dump" {
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
depends_on = [data.nomad_plugin.smb]
plugin_id = "smb"
volume_id = "unraid_database_dump"
name = "unraid_database_dump"
external_id = "unraid_database_dump"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
source = "//192.168.1.192/database-dump"
}
secrets = {
"username" = "nomad"
"password" = data.sops_file.secrets.data["unraid.nomad"]
}
}

View File

@@ -0,0 +1,110 @@
resource "nomad_job" "glance" {
jobspec = file("glance.nomad.hcl")
}
resource "nomad_job" "transfer" {
jobspec = file("transfer.nomad.hcl")
}
resource "nomad_csi_volume_registration" "unraid_transfer" {
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
depends_on = [data.nomad_plugin.smb]
plugin_id = "smb"
volume_id = "unraid_transfer"
name = "unraid_transfer"
external_id = "unraid_transfer"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
source = "//192.168.1.192/transfer"
}
secrets = {
"username" = "anon"
"password" = ""
}
}
resource "nomad_csi_volume_registration" "unraid_appdata_transferfilebrowser" {
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
depends_on = [data.nomad_plugin.smb]
plugin_id = "smb"
volume_id = "unraid_appdata_transferfilebrowser"
name = "unraid_appdata_transferfilebrowser"
external_id = "unraid_appdata_transferfilebrowser"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
source = "//192.168.1.192/appdata"
subDir = "transferfilebrowser" # Note: Needs to be manually created on the share
}
secrets = {
"username" = "nomad"
"password" = data.sops_file.secrets.data["unraid.nomad"]
}
}
resource "nomad_job" "gitea" {
jobspec = file("gitea.nomad.hcl")
}
resource "nomad_variable" "gitea" {
path = "nomad/jobs/gitea"
items = {
internal_token = data.sops_file.secrets.data["gitea.internal_token"]
jwt_secret = data.sops_file.secrets.data["gitea.jwt_secret"]
}
}
resource "postgresql_role" "gitea" {
name = "gitea"
password = "gitea"
login = true
}
resource "postgresql_database" "gitea" {
name = "gitea"
owner = postgresql_role.gitea.name
}
resource "nomad_csi_volume_registration" "unraid_appdata_gitea" {
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
depends_on = [data.nomad_plugin.smb]
plugin_id = "smb"
volume_id = "unraid_appdata_gitea"
name = "unraid_appdata_gitea"
external_id = "unraid_appdata_gitea"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
source = "//192.168.1.192/appdata"
subDir = "gitea" # Note: Needs to be manually created on the share
}
secrets = {
"username" = "nomad"
"password" = data.sops_file.secrets.data["unraid.nomad"]
}
}

View File

@@ -0,0 +1,13 @@
export AGE_VERSION=v1.2.1
# Download the archive
wget https://github.com/FiloSottile/age/releases/download/$AGE_VERSION/age-$AGE_VERSION-linux-amd64.tar.gz
# Extract the contents of the archive
tar -xvf age-$AGE_VERSION-linux-amd64.tar.gz
# Move the binaries to a directory in our PATH
sudo mv age/age* /usr/local/bin/
# Make the binaries executable
sudo chmod +x /usr/local/bin/age*

View File

@@ -66,7 +66,7 @@ access_control:
session: session:
name: authelia_session name: authelia_session
secret: "{{ with nomadVar "nomad/jobs/authelia" }}{{ .session_secret }}{{ end }}" secret: "{{ with nomadVar "nomad/jobs/authelia" }}{{ .session_secret }}{{ end }}"
expiration: 3600 expiration: "1 day and 9 hours"
cookies: cookies:
- domain: othrayte.one - domain: othrayte.one
authelia_url: "https://auth.othrayte.one" authelia_url: "https://auth.othrayte.one"

View File

@@ -0,0 +1,122 @@
# TODOs
# - Map /data/ to unraid appdata
# - Move database config to /data/gitea/conf/app.ini (where it would be copied on first run)
job "gitea" {
group "gitea" {
network {
mode = "bridge"
port "http" {
to = 3000
}
}
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
service {
name = "code"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.gitea.middlewares=auth@file",
]
# check {
# type = "http"
# path = "/"
# interval = "10s"
# timeout = "2s"
# }
}
task "gitea" {
driver = "docker"
config {
image = "gitea/gitea:latest"
ports = ["http"]
volumes = ["local/app.ini:/data/gitea/conf/app.ini"]
}
env = {
USER_UID = "1000"
USER_GID = "1000"
}
resources {
cpu = 500
memory = 256
}
volume_mount {
volume = "unraid_appdata_gitea"
destination = "/data"
read_only = false
}
template {
data = <<EOF
# Gitea configuration file
WORK_PATH = /data/
[database]
DB_TYPE = postgres
HOST = localhost:5432
NAME = gitea
USER = gitea
PASSWD = gitea
[repository]
ROOT = /data/git/repositories
[server]
DOMAIN = code.othrayte.one
ROOT_URL = https://code.othrayte.one/
[lfs]
PATH = /data/git/lfs
[log]
MODE = console
LEVEL = info
ROOT_PATH = /data/gitea/log
[security]
INSTALL_LOCK = true
INTERNAL_TOKEN = {{ with nomadVar "nomad/jobs/gitea" }}{{ .internal_token }}{{ end }}
PASSWORD_HASH_ALGO = pbkdf2
[oauth2]
JWT_SECRET = {{ with nomadVar "nomad/jobs/gitea" }}{{ .jwt_secret }}{{ end }}
EOF
destination = "local/app.ini"
}
}
volume "unraid_appdata_gitea" {
type = "csi"
read_only = false
source = "unraid_appdata_gitea"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=1000"]
}
}
}
}

View File

@@ -1,61 +0,0 @@
job "hello-world" {
group "servers" {
network {
port "www" {
to = -1
}
}
service {
name = "hello-world"
port = "www"
tags = [
"traefik.enable=true",
"traefik.http.routers.hello-world.middlewares=auth@file",
]
check {
name = "alive"
type = "tcp"
port = "www"
interval = "10s"
timeout = "2s"
}
}
# Tasks are individual units of work that are run by Nomad.
task "web" {
# This particular task starts a simple web server within a Docker container
driver = "docker"
config {
image = "busybox:1"
command = "httpd"
args = ["-v", "-f", "-p", "${NOMAD_PORT_www}", "-h", "/local"]
ports = ["www"]
}
template {
data = <<-EOF
<h1>Hello, Nomad!</h1>
<ul>
<li>Task: {{env "NOMAD_TASK_NAME"}}</li>
<li>Group: {{env "NOMAD_GROUP_NAME"}}</li>
<li>Job: {{env "NOMAD_JOB_NAME"}}</li>
<li>Metadata value for foo: {{env "NOMAD_META_foo"}}</li>
<li>Currently running on port: {{env "NOMAD_PORT_www"}}</li>
</ul>
EOF
destination = "local/index.html"
}
# Specify the maximum resources required to run the task
resources {
cpu = 50
memory = 64
}
}
}
}

View File

@@ -0,0 +1,97 @@
job "pgadmin" {
group "pgadmin" {
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
network {
mode = "bridge"
port "http" {
to = 80
}
}
task "pgadmin" {
driver = "docker"
config {
image = "dpage/pgadmin4:latest"
ports = ["http"]
volumes = [
"local/servers.json:/pgadmin4/servers.json",
"secrets/.pgpass:/home/.pgpass"
]
}
env = {
PGADMIN_DEFAULT_EMAIL = "othrayte@gmail.com"
PGADMIN_DEFAULT_PASSWORD = "admin"
PGADMIN_CONFIG_WTF_CSRF_ENABLED = "False"
PGADMIN_CONFIG_WTF_CSRF_CHECK_DEFAULT = "False"
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION = "False"
PGADMIN_CONFIG_SERVER_MODE = "False"
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED = "False"
}
resources {
cpu = 500
memory = 256
}
service {
name = "pgadmin"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.pgadmin.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
template {
data = <<EOF
{
"Servers": {
"1": {
"Group": "Servers",
"Name": "postgres",
"Host": "localhost",
"Port": 5432,
"MaintenanceDB": "postgres",
"Username": "postgres",
"PassFile": "/home/.pgpass"
}
}
}
EOF
destination = "local/servers.json"
}
template {
data = <<EOF
localhost:5432:*:postgres:{{ with nomadVar "nomad/jobs/postgres" }}{{ .postgres_password }}{{ end }}
EOF
destination = "secrets/.pgpass"
perms = "0400"
uid = 5050 # pgadmin
}
}
}
}

View File

@@ -0,0 +1,77 @@
job "pgbackup" {
type = "batch"
periodic {
# Note: To avoid issues with daylight savings, avoid scheduling jobs at 2am +/- 1 hour
cron = "* 04 * * *" # Every day at 4am
time_zone = "Australia/Melbourne"
prohibit_overlap = true
}
group "pgbackup" {
service {
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "postgres"
local_bind_port = 5432
}
}
}
}
}
task "pgbackup" {
driver = "docker"
config {
image = "postgres:latest"
command = "/bin/bash"
args = ["-c", "pg_dumpall -h localhost -U postgres > /backup/all_databases.sql"]
volumes = ["secrets/postgres_password:/run/secrets/postgres_password"]
}
user = "1000"
volume_mount {
volume = "unraid_database_dump"
destination = "/backup"
read_only = false
}
env {
PGPASSFILE = "/run/secrets/postgres_password"
}
template {
data = <<EOF
localhost:5432:*:postgres:{{ with nomadVar "nomad/jobs/postgres" }}{{ .postgres_password }}{{ end }}
EOF
destination = "/secrets/postgres_password"
perms = "0400"
uid = 1000
}
resources {
cpu = 250
memory = 128
}
}
volume "unraid_database_dump" {
type = "csi"
read_only = false
source = "unraid_database_dump"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=1000", "gid=0"]
}
}
network {
mode = "bridge"
}
}
}

View File

@@ -0,0 +1,64 @@
job "postgres" {
group "postgres" {
service {
name = "postgres"
port = "db"
connect {
sidecar_service {}
}
}
task "postgres" {
driver = "docker"
config {
image = "postgres:latest"
ports = ["db"]
volumes = [
"secrets/postgres_password:/run/secrets/postgres_password"
]
}
volume_mount {
volume = "data"
destination = "/var/lib/postgresql/data"
read_only = false
}
env {
POSTGRES_USER = "postgres"
POSTGRES_PASSWORD_FILE = "/run/secrets/postgres_password"
POSTGRES_INITDB_ARGS = "--auth-host=md5"
}
resources {
cpu = 500
memory = 512
}
template {
# This securely sets the initial password for the postgres user, to change it later
# you need to connect to the database and change it manually
data = <<EOF
{{ with nomadVar "nomad/jobs/postgres" }}{{ .postgres_password }}{{ end }}
EOF
destination = "secrets/postgres_password"
}
}
network {
mode = "bridge"
port "db" {
static = 5432
}
}
volume "data" {
type = "host"
read_only = false
source = "postgres"
}
}
}

14
2-nomad-config/readme.md Normal file
View File

@@ -0,0 +1,14 @@
# Terraform State
Mount the state on the fileshare to 2-nomad-config/.tfstate/
`sudo mount -t cifs //192.168.1.192/appdata/terraform /home/othrayte/Code/infra/2-nomad-config/.tfstate/ -o rw,username=othrayte,password=<pw>,uid=$(id -u),gid=$(id -g)`
# Secrets
The secrets file is encrypted using sops and will be automatically decrypted in the terraform provider.
Put the age keys in /home/<user>/.config/sops/age/keys.txt
## Adding Secrets
Edit the secrets using `sops secrets.enc.json`

View File

@@ -0,0 +1,29 @@
{
"unraid": {
"nomad": "ENC[AES256_GCM,data:FCGEs+XCSuunLxVPyzE=,iv:j8Ey+l8iJiPY7CbE5IoT0ZgNklnv+4odSZkorJQ/nr8=,tag:7PoizENid+vgWC/eb5MOaQ==,type:str]"
},
"authelia": {
"session_secret": "ENC[AES256_GCM,data:gPVSGzU00EjuW/NDD9bpsc+4DQ==,iv:IRzSKqfv2Quaj1bzrFaK0glCKEPrle+uI8fq/1HFi60=,tag:loiTEpEBGBwQETRWpOffNg==,type:str]",
"jwt_secret": "ENC[AES256_GCM,data:7Q/0M5IY0vLsgCE0z78L,iv:f6GymDrq2/NlKJuMNnDDmG2GUAzhonNa8LXlr0x1elw=,tag:1ITT9WmD3UOP30AjYEkLJQ==,type:str]",
"encryption_key": "ENC[AES256_GCM,data:wT7aYD2DIu4VQa3GTmlkBFBvtoPvlgUF/fYJo9+wQhRcywY=,iv:29pIf46S9+OVWgSNyuwOaOXD2bWTmdcLzMLQ06VywZQ=,tag:n9JkIbHCB2xFfJ7MHcUKvg==,type:str]"
},
"postgres": {
"postgres": "ENC[AES256_GCM,data:lKuLcVTuUbfrlVhRdCs=,iv:TsbtAbXYTysxuiCi08F0hJsgoolzzgE2EPdFdPMQ+NQ=,tag:9oNua06hHdeCzE7nB22c0g==,type:str]"
},
"gitea": {
"internal_token": "ENC[AES256_GCM,data:teIsV+6nUPWO9/amas3FmK6uv44YEZNpV780ncTwUkQDygDvQRr7A3KEbk/rYFcTjfxK6Kw8nmqi0rBrcBNX1bSVNg8jwfYHhY2TxFMgCo4tkQxLf3eSBUhlPGsfpsskACKIPnZ1RQ2m,iv:NAKPw0YVNtLlyEp7wld9ml4zQlVxo/takiOid6YQlfA=,tag:QIk+USh8MLZDzJkQsglJ+w==,type:str]",
"jwt_secret": "ENC[AES256_GCM,data:/dPDqJdn4Af3Wo005V7lU9b8RbN/wyF0Tx66827cdyaZfi4QPOSj23wNqw==,iv:yJW2PiAGGr97q0DoBr64X88eFNpuVPZX0SPyNDp5QjQ=,tag:p27XTUbMC0WDMTNJCscmGQ==,type:str]"
},
"sops": {
"age": [
{
"recipient": "age1zuj9ssd0kkfeefjmyz82t9h3dfjq8degqm2l5eszhu5zhazpgsys54rq2n",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByUWM4ZDVVbGFrUGdMRHBX\nUFBmU3Nlc0RBSzhFK0tHNHpkQXUvUVdiZUZJCmpRN1lFdENpWW0rcThjVlVQNUl6\nWnlLU0RnQ3FZby81Ly8xTFBrek9nMncKLS0tIFQ4UTRNOC9CRmx4OFJWem1wckZz\nUDFTSzdWZldFK3FqcTNWTWRyNDhHQ2MKS811mR5xn7qiC/aVgPFYJ5c6Q3zxRfcr\nHcvxUvB01vNJKZpRg92vvKPkV6lQO3DXCT98OdfwiymlEOvYxg71Pg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-05-19T12:11:30Z",
"mac": "ENC[AES256_GCM,data:BjUuMWp3bE0iHLZZ9lHh/sSKSDF3sBgRr4CmKKqjXaY1CJ6k9wESgZmxjT2FOTfzJ5tZaBXdm4WKwagE6frke0eNfYDIWC+FQfX/4geUe8OyQFW/22i7I60uS4bVv9PAO/JJKTNCZxOdtLsK7fZ8rS4Jve9mAdhEbKfPmQHIiy4=,iv:cqi4rpbJLxLr8zjKrx80mKJBbSuU1D0XjUbBg1CYDRk=,tag:+8Hl9WQ2ZUY6BPMw/GMtpQ==,type:str]",
"encrypted_regex": "^(.*)$",
"version": "3.10.2"
}
}

View File

@@ -1,67 +0,0 @@
provider "nomad" {
# For some reason nomad is binding to the tailscale IP but not the (local) IP that we get for the same hostname
address = "http://jaglan-beta-m01:4646"
}
resource "nomad_job" "glance" {
jobspec = file("glance.nomad.hcl")
}
resource "nomad_job" "hello_world" {
jobspec = file("hello-world.nomad.hcl")
}
resource "nomad_job" "traefik" {
jobspec = file("traefik.nomad.hcl")
}
resource "nomad_job" "authelia" {
jobspec = file("authelia.nomad.hcl")
}
resource "nomad_job" "webapp" {
jobspec = file("webapp.nomad.hcl")
}
resource "nomad_job" "csi-smb" {
jobspec = file("csi-smb.nomad.hcl")
}
data "nomad_plugin" "smb" {
plugin_id = "smb"
wait_for_healthy = true
}
resource "nomad_csi_volume_registration" "unraid_transfer" {
#Note: Before chaning the definition of this volume you need to stop the jobs that are using it
depends_on = [data.nomad_plugin.smb]
plugin_id = "smb"
volume_id = "unraid_transfer"
name = "unraid_transfer"
external_id = "unraid_transfer"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
source = "//192.168.1.192/transfer"
}
parameters = {
"csi.storage.k8s.io/node-stage-secret-name" = "smbcreds"
"csi.storage.k8s.io/node-stage-secret-namespace" = "default"
}
secrets = {
"username" = "anon"
"password" = ""
}
}
resource "nomad_job" "transfer" {
jobspec = file("transfer.nomad.hcl")
}

View File

@@ -0,0 +1,10 @@
export SOPS_VERSION=v3.10.2
# Download the binary
curl -LO https://github.com/getsops/sops/releases/download/$SOPS_VERSION/sops-$SOPS_VERSION.linux.amd64
# Move the binary in to your PATH
sudo mv sops-$SOPS_VERSION.linux.amd64 /usr/local/bin/sops
# Make the binary executable
sudo chmod +x /usr/local/bin/sops

View File

@@ -1,519 +0,0 @@
{
"version": 4,
"terraform_version": "1.11.4",
"serial": 595,
"lineage": "15e0900c-88bc-9754-4600-e3977d018ba0",
"outputs": {},
"resources": [
{
"mode": "data",
"type": "nomad_plugin",
"name": "smb",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"id": "smb",
"nodes": [
{
"healthy": true,
"healthy_description": "healthy",
"name": "0db77253-0579-e8b0-42cd-d619af9d8e73"
}
],
"nodes_expected": 1,
"nodes_healthy": 1,
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"wait_for_healthy": true,
"wait_for_registration": false
},
"sensitive_attributes": []
}
]
},
{
"mode": "managed",
"type": "nomad_csi_volume_registration",
"name": "unraid_transfer",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"capability": [
{
"access_mode": "single-node-writer",
"attachment_mode": "file-system"
}
],
"capacity": 0,
"capacity_max": null,
"capacity_max_bytes": 0,
"capacity_min": null,
"capacity_min_bytes": 0,
"context": {
"source": "//192.168.1.192/transfer"
},
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"deregister_on_destroy": true,
"external_id": "unraid_transfer",
"id": "unraid_transfer",
"mount_options": [],
"name": "unraid_transfer",
"namespace": "default",
"nodes_expected": 1,
"nodes_healthy": 1,
"parameters": {
"csi.storage.k8s.io/node-stage-secret-name": "smbcreds",
"csi.storage.k8s.io/node-stage-secret-namespace": "default"
},
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"schedulable": true,
"secrets": {
"password": "",
"username": "anon"
},
"timeouts": null,
"topologies": [],
"topology_request": [],
"volume_id": "unraid_transfer"
},
"sensitive_attributes": [
[
{
"type": "get_attr",
"value": "secrets"
}
]
],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfX0=",
"dependencies": [
"data.nomad_plugin.smb"
]
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "authelia",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "authelia",
"jobspec": "job \"authelia\" {\n group \"authelia\" {\n network {\n port \"http\" {\n static = 9091\n }\n }\n\n service {\n name = \"auth\"\n port = \"http\"\n\n tags = [\n \"traefik.enable=true\",\n ]\n\n check {\n type = \"http\"\n path = \"/health\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n task \"authelia\" {\n driver = \"docker\"\n\n config {\n image = \"authelia/authelia:latest\"\n\n ports = [\"http\"]\n\n volumes = [\n \"local/config:/config\",\n \"local/data:/data\"\n ]\n }\n\n resources {\n cpu = 100\n memory = 128\n }\n\n template {\n data = \u003c\u003cEOF\nserver:\n address: tcp://0.0.0.0:{{ env \"NOMAD_PORT_http\" }}/\ntheme: \"auto\"\nidentity_validation:\n reset_password:\n jwt_secret: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}\"\n\nauthentication_backend:\n file:\n path: /config/users_database.yml\n\naccess_control:\n default_policy: deny\n rules:\n - domain: \"*.othrayte.one\"\n policy: one_factor\n # Disable auth for authelia\n #- domain: \"auth.othrayte.one\"\n # policy: bypass\n\nsession:\n name: authelia_session\n secret: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}\"\n expiration: 3600\n cookies:\n - domain: othrayte.one\n authelia_url: \"https://auth.othrayte.one\"\n\nstorage:\n local:\n path: /config/db.sqlite3\n encryption_key: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .encryption_key }}{{ end }}\"\n\nnotifier:\n filesystem:\n filename: /config/notification.txt\nEOF\n\n destination = \"local/config/configuration.yml\"\n }\n\n template {\n data = \u003c\u003cEOF\n# Users database for Authelia\nusers:\n othrayte:\n password: \"$2y$10$FeemMJevZXq6y1pc6FNOXeIlthGWiGHRmMfpV33BNcpChA5ozLUmK\"\n displayname: \"Adrian\"\n email: \"othrayte@gmail.com\"\nEOF\n\n destination = \"local/config/users_database.yml\"\n }\n }\n }\n}\n",
"json": null,
"modify_index": "17976",
"name": "authelia",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "authelia",
"task": [
{
"driver": "docker",
"meta": {},
"name": "authelia",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "csi-smb",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "csi-smb",
"jobspec": "job \"csi-smb\" {\n type = \"system\"\n\n group \"smb\" {\n task \"plugin\" {\n driver = \"docker\"\n\n config {\n image = \"mcr.microsoft.com/k8s/csi/smb-csi:v1.7.0\"\n args = [\n \"--v=5\",\n \"--nodeid=${attr.unique.hostname}\",\n \"--endpoint=unix:///csi/csi.sock\",\n \"--drivername=smb.csi.k8s.io\"\n ]\n privileged = true\n }\n\n csi_plugin {\n id = \"smb\"\n type = \"node\"\n mount_dir = \"/csi\"\n }\n\n resources {\n cpu = 100\n memory = 50\n }\n }\n }\n}",
"json": null,
"modify_index": "11526",
"name": "csi-smb",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "smb",
"task": [
{
"driver": "docker",
"meta": {},
"name": "plugin",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "system"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "glance",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "glance",
"jobspec": "job \"glance\" {\n group \"glance\" {\n count = 1\n\n network {\n port \"http\" {\n to = 8080\n }\n }\n\n task \"glance\" {\n driver = \"docker\"\n\n config {\n image = \"glanceapp/glance:latest\"\n ports = [\"http\"]\n volumes = [\n \"local/glance.yml:/app/config/glance.yml\",\n ]\n }\n\n service {\n name = \"home\"\n port = \"http\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.home.middlewares=auth@file\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"http\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n resources {\n cpu = 50\n memory = 128\n }\n\n\n template {\n data = \u003c\u003cEOF\npages:\n - name: Home\n # Optionally, if you only have a single page you can hide the desktop navigation for a cleaner look\n # hide-desktop-navigation: true\n columns:\n - size: small\n widgets:\n - type: calendar\n first-day-of-week: monday\n\n - type: rss\n limit: 10\n collapse-after: 3\n cache: 12h\n feeds:\n - url: https://selfh.st/rss/\n title: selfh.st\n limit: 4\n - url: https://ciechanow.ski/atom.xml\n - url: https://www.joshwcomeau.com/rss.xml\n title: Josh Comeau\n - url: https://samwho.dev/rss.xml\n - url: https://ishadeed.com/feed.xml\n title: Ahmad Shadeed\n\n - type: twitch-channels\n channels:\n - theprimeagen\n - j_blow\n - piratesoftware\n - cohhcarnage\n - christitustech\n - EJ_SA\n\n - size: full\n widgets:\n - type: group\n widgets:\n - type: hacker-news\n - type: lobsters\n\n - type: videos\n channels:\n - UCXuqSBlHAE6Xw-yeJA0Tunw # Linus Tech Tips\n - UCR-DXc1voovS8nhAvccRZhg # Jeff Geerling\n - UCsBjURrPoezykLs9EqgamOA # Fireship\n - UCBJycsmduvYEL83R_U4JriQ # Marques Brownlee\n - UCHnyfMqiRRG1u-2MsSQLbXA # Veritasium\n\n - type: bookmarks\n groups:\n - links:\n - title: Gmail\n url: https://mail.google.com/mail/u/0/\n - title: Amazon\n url: https://www.amazon.com/\n - title: Github\n url: https://github.com/\n - title: Wikipedia\n url: https://en.wikipedia.org/\n - title: Infra\n color: 10 70 50\n links:\n - title: Nomad\n url: https://nomad.othrayte.one/\n - title: Consul\n url: https://consul.othrayte.one/\n - title: Traefik\n url: https://traefik.othrayte.one/\n - title: Social\n color: 200 50 50\n links:\n - title: Reddit\n url: https://www.reddit.com/\n - title: Twitter\n url: https://twitter.com/\n - title: Instagram\n url: https://www.instagram.com/\n\n - size: small\n widgets:\n - type: weather\n location: Melbourne, Australia\n units: metric\n hour-format: 12h\n # Optionally hide the location from being displayed in the widget\n # hide-location: true\n\n - type: releases\n cache: 1d\n # Without authentication the Github API allows for up to 60 requests per hour. You can create a\n # read-only token from your Github account settings and use it here to increase the limit.\n # token: ...\n repositories:\n - glanceapp/glance\n - go-gitea/gitea\n - immich-app/immich\n - syncthing/syncthing\n\n # Add more pages here:\n # - name: Your page name\n # columns:\n # - size: small\n # widgets:\n # # Add widgets here\n\n # - size: full\n # widgets:\n # # Add widgets here\n\n # - size: small\n # widgets:\n # # Add widgets here\nEOF\n\n destination = \"local/glance.yml\"\n }\n\n }\n }\n}",
"json": null,
"modify_index": "17710",
"name": "glance",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "glance",
"task": [
{
"driver": "docker",
"meta": {},
"name": "glance",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "hello_world",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "hello-world",
"jobspec": "job \"hello-world\" {\n group \"servers\" {\n network {\n port \"www\" {\n to = -1\n }\n }\n\n service {\n name = \"hello-world\"\n port = \"www\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.hello-world.middlewares=auth@file\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"www\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n \n\n # Tasks are individual units of work that are run by Nomad.\n task \"web\" {\n # This particular task starts a simple web server within a Docker container\n driver = \"docker\"\n\n config {\n image = \"busybox:1\"\n command = \"httpd\"\n args = [\"-v\", \"-f\", \"-p\", \"${NOMAD_PORT_www}\", \"-h\", \"/local\"]\n ports = [\"www\"]\n }\n\n template {\n data = \u003c\u003c-EOF\n \u003ch1\u003eHello, Nomad!\u003c/h1\u003e\n \u003cul\u003e\n \u003cli\u003eTask: {{env \"NOMAD_TASK_NAME\"}}\u003c/li\u003e\n \u003cli\u003eGroup: {{env \"NOMAD_GROUP_NAME\"}}\u003c/li\u003e\n \u003cli\u003eJob: {{env \"NOMAD_JOB_NAME\"}}\u003c/li\u003e\n \u003cli\u003eMetadata value for foo: {{env \"NOMAD_META_foo\"}}\u003c/li\u003e\n \u003cli\u003eCurrently running on port: {{env \"NOMAD_PORT_www\"}}\u003c/li\u003e\n \u003c/ul\u003e\n EOF\n destination = \"local/index.html\"\n }\n\n # Specify the maximum resources required to run the task\n resources {\n cpu = 50\n memory = 64\n }\n }\n }\n}",
"json": null,
"modify_index": "17709",
"name": "hello-world",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "servers",
"task": [
{
"driver": "docker",
"meta": {},
"name": "web",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "traefik",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "traefik",
"jobspec": "job \"traefik\" {\n group \"traefik\" {\n network {\n port \"http\" {\n static = 80\n }\n\n port \"https\" {\n static = 443\n }\n\n port \"api\" {\n static = 8081\n }\n }\n\n service {\n name = \"traefik\"\n\n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.traefik.rule=Host(`traefik.othrayte.one`)\",\n \"traefik.http.routers.traefik.service=traefik\",\n \"traefik.http.routers.traefik.middlewares=auth@file\",\n \"traefik.http.services.traefik.loadbalancer.server.port=8081\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"http\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n volume \"traefik\" {\n type = \"host\"\n read_only = false\n source = \"traefik\"\n }\n\n task \"traefik\" {\n driver = \"docker\"\n\n config {\n image = \"traefik:v3.3\"\n network_mode = \"host\"\n\n volumes = [\n \"local/traefik.yml:/etc/traefik/traefik.yml\",\n \"local/configs/:/etc/traefik/configs/\"\n ]\n }\n\n volume_mount {\n volume = \"traefik\"\n destination = \"/opt/traefik\"\n read_only = false\n }\n\n template {\n data = \u003c\u003cEOF\nentryPoints:\n web:\n address: \":80\"\n http:\n redirections:\n entryPoint:\n to: websecure\n scheme: https\n websecure:\n address: \":443\"\n http:\n tls:\n certResolver: letsencrypt\n traefik:\n address: \":8081\"\n\napi:\n dashboard: true\n insecure: true\n\nproviders:\n file:\n directory: \"/etc/traefik/configs/\"\n\n consulCatalog:\n prefix: \"traefik\"\n exposedByDefault: false\n defaultRule: {{\"Host(`{{ .Name }}.othrayte.one`)\"}}\n endpoint:\n address: \"127.0.0.1:8500\"\n scheme: \"http\"\n\ncertificatesResolvers:\n letsencrypt:\n acme:\n email: \"othrayte@gmail.com\"\n storage: \"/opt/traefik/acme.json\"\n httpChallenge:\n entryPoint: web\nEOF\n\n destination = \"local/traefik.yml\"\n }\n\n template {\n data = \u003c\u003cEOF\nhttp:\n middlewares:\n auth:\n forwardAuth:\n address: \"http://192.168.1.235:9091/api/authz/forward-auth\"\n trustForwardHeader: true\n routers:\n fallback:\n rule: \"HostRegexp(`^.+$`)\"\n entryPoints:\n - websecure\n middlewares:\n - auth\n service: noop@internal # This router just applies middleware\n priority: 1\n nomad-ui:\n rule: \"Host(`nomad.othrayte.one`)\"\n service: nomad-ui\n middlewares:\n - auth\n consul-ui:\n rule: \"Host(`consul.othrayte.one`)\"\n service: consul-ui\n middlewares:\n - auth\n unraid:\n rule: \"Host(`unraid.othrayte.one`)\"\n service: unraid\n middlewares:\n - auth\n\n services:\n nomad-ui:\n loadBalancer:\n servers:\n - url: \"http://127.0.0.1:4646\"\n consul-ui:\n loadBalancer:\n servers:\n - url: \"http://127.0.0.1:8500\"\n unraid:\n loadBalancer:\n servers:\n - url: \"http://192.168.1.192:80\"\nEOF\n\n destination = \"local/configs/nomad.yml\"\n }\n\n resources {\n cpu = 100\n memory = 128\n }\n }\n }\n}\n",
"json": null,
"modify_index": "18000",
"name": "traefik",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "traefik",
"task": [
{
"driver": "docker",
"meta": {},
"name": "traefik",
"volume_mounts": [
{
"destination": "/opt/traefik",
"read_only": false,
"volume": "traefik"
}
]
}
],
"volumes": [
{
"name": "traefik",
"read_only": false,
"source": "traefik",
"type": "host"
}
]
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "transfer",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "transfer",
"jobspec": "job \"transfer\" {\n group \"transfer\" {\n network {\n port \"http\" {\n to = 80\n }\n }\n\n service {\n name = \"transfer\"\n port = \"http\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.volume-test.middlewares=auth@file\",\n ]\n\n check {\n type = \"http\"\n path = \"/\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n volume \"unraid_transfer\" {\n type = \"csi\"\n read_only = false\n source = \"unraid_transfer\"\n access_mode = \"single-node-writer\"\n attachment_mode = \"file-system\"\n\n mount_options {\n mount_flags = [\"uid=911\",\"gid=1000\"] # linuxserver.io container services run as uid 911\n }\n }\n\n task \"filebrowser\" {\n driver = \"docker\"\n\n config {\n # Use the s6 tag for the linuxserver.io based image\n image = \"filebrowser/filebrowser:s6\"\n\n ports = [\"http\"]\n\n volumes = [\n \"local/config/settings.json:/config/settings.json\",\n ]\n }\n\n volume_mount {\n volume = \"unraid_transfer\"\n\t destination = \"/srv\"\n read_only = false\n }\n\n resources {\n cpu = 500\n memory = 256\n }\n\n template {\n data = \u003c\u003cEOF\n{\n \"port\": 80,\n \"baseURL\": \"\",\n \"address\": \"\",\n \"log\": \"stdout\",\n \"database\": \"/database/filebrowser.db\",\n \"root\": \"/srv\",\n \"auth\": {\n \"method\": \"noauth\"\n }\n}\nEOF\n\n destination = \"local/config/settings.json\"\n }\n }\n }\n}",
"json": null,
"modify_index": "21245",
"name": "transfer",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "transfer",
"task": [
{
"driver": "docker",
"meta": {},
"name": "filebrowser",
"volume_mounts": [
{
"destination": "/srv",
"read_only": false,
"volume": "unraid_transfer"
}
]
}
],
"volumes": [
{
"name": "unraid_transfer",
"read_only": false,
"source": "unraid_transfer",
"type": "csi"
}
]
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "webapp",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "demo-webapp",
"jobspec": "job \"demo-webapp\" {\n group \"demo\" {\n count = 3\n\n network {\n port \"http\"{\n to = -1\n }\n }\n\n service {\n name = \"demo-webapp\"\n port = \"http\"\n\n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.demo-webapp.middlewares=auth@file\",\n ]\n\n check {\n type = \"http\"\n path = \"/\"\n interval = \"2s\"\n timeout = \"2s\"\n }\n }\n\n task \"server\" {\n env {\n PORT = \"${NOMAD_PORT_http}\"\n NODE_IP = \"${NOMAD_IP_http}\"\n }\n\n driver = \"docker\"\n\n config {\n image = \"hashicorp/demo-webapp-lb-guide\"\n ports = [\"http\"]\n }\n }\n }\n}",
"json": null,
"modify_index": "17707",
"name": "demo-webapp",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 3,
"meta": {},
"name": "demo",
"task": [
{
"driver": "docker",
"meta": {},
"name": "server",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
}
],
"check_results": null
}

View File

@@ -1,584 +0,0 @@
{
"version": 4,
"terraform_version": "1.11.4",
"serial": 593,
"lineage": "15e0900c-88bc-9754-4600-e3977d018ba0",
"outputs": {},
"resources": [
{
"mode": "data",
"type": "nomad_plugin",
"name": "smb",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"id": "smb",
"nodes": [
{
"healthy": true,
"healthy_description": "healthy",
"name": "0db77253-0579-e8b0-42cd-d619af9d8e73"
}
],
"nodes_expected": 1,
"nodes_healthy": 1,
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"wait_for_healthy": true,
"wait_for_registration": false
},
"sensitive_attributes": []
}
]
},
{
"mode": "managed",
"type": "nomad_csi_volume_registration",
"name": "unraid_transfer",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"capability": [
{
"access_mode": "single-node-writer",
"attachment_mode": "file-system"
}
],
"capacity": 0,
"capacity_max": null,
"capacity_max_bytes": 0,
"capacity_min": null,
"capacity_min_bytes": 0,
"context": {
"source": "//192.168.1.192/transfer"
},
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"deregister_on_destroy": true,
"external_id": "unraid_transfer",
"id": "unraid_transfer",
"mount_options": [],
"name": "unraid_transfer",
"namespace": "default",
"nodes_expected": 1,
"nodes_healthy": 1,
"parameters": {
"csi.storage.k8s.io/node-stage-secret-name": "smbcreds",
"csi.storage.k8s.io/node-stage-secret-namespace": "default"
},
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"schedulable": true,
"secrets": {
"password": "",
"username": "anon"
},
"timeouts": null,
"topologies": [],
"topology_request": [],
"volume_id": "unraid_transfer"
},
"sensitive_attributes": [
[
{
"type": "get_attr",
"value": "secrets"
}
]
],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfX0=",
"dependencies": [
"data.nomad_plugin.smb"
]
}
]
},
{
"mode": "managed",
"type": "nomad_csi_volume_registration",
"name": "unraid_transfer_subdir",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"capability": [
{
"access_mode": "single-node-writer",
"attachment_mode": "file-system"
}
],
"capacity": 0,
"capacity_max": null,
"capacity_max_bytes": 0,
"capacity_min": null,
"capacity_min_bytes": 0,
"context": {
"source": "//192.168.1.192/transfer",
"subDir": "subdir"
},
"controller_required": false,
"controllers_expected": 0,
"controllers_healthy": 0,
"deregister_on_destroy": true,
"external_id": "unraid_transfer_subdir",
"id": "unraid_transfer_subdir",
"mount_options": [],
"name": "unraid_transfer_subdir",
"namespace": "default",
"nodes_expected": 1,
"nodes_healthy": 1,
"parameters": {
"csi.storage.k8s.io/node-stage-secret-name": "smbcreds",
"csi.storage.k8s.io/node-stage-secret-namespace": "default"
},
"plugin_id": "smb",
"plugin_provider": "smb.csi.k8s.io",
"plugin_provider_version": "v1.7.0",
"schedulable": true,
"secrets": {
"password": "$lUPyJw1Yc\u0026B997i",
"username": "othrayte"
},
"timeouts": null,
"topologies": [],
"topology_request": [],
"volume_id": "unraid_transfer_subdir"
},
"sensitive_attributes": [
[
{
"type": "get_attr",
"value": "secrets"
}
]
],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfX0=",
"dependencies": [
"data.nomad_plugin.smb"
]
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "authelia",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "authelia",
"jobspec": "job \"authelia\" {\n group \"authelia\" {\n network {\n port \"http\" {\n static = 9091\n }\n }\n\n service {\n name = \"auth\"\n port = \"http\"\n\n tags = [\n \"traefik.enable=true\",\n ]\n\n check {\n type = \"http\"\n path = \"/health\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n task \"authelia\" {\n driver = \"docker\"\n\n config {\n image = \"authelia/authelia:latest\"\n\n ports = [\"http\"]\n\n volumes = [\n \"local/config:/config\",\n \"local/data:/data\"\n ]\n }\n\n resources {\n cpu = 100\n memory = 128\n }\n\n template {\n data = \u003c\u003cEOF\nserver:\n address: tcp://0.0.0.0:{{ env \"NOMAD_PORT_http\" }}/\ntheme: \"auto\"\nidentity_validation:\n reset_password:\n jwt_secret: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .jwt_secret }}{{ end }}\"\n\nauthentication_backend:\n file:\n path: /config/users_database.yml\n\naccess_control:\n default_policy: deny\n rules:\n - domain: \"*.othrayte.one\"\n policy: one_factor\n # Disable auth for authelia\n #- domain: \"auth.othrayte.one\"\n # policy: bypass\n\nsession:\n name: authelia_session\n secret: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .session_secret }}{{ end }}\"\n expiration: 3600\n cookies:\n - domain: othrayte.one\n authelia_url: \"https://auth.othrayte.one\"\n\nstorage:\n local:\n path: /config/db.sqlite3\n encryption_key: \"{{ with nomadVar \"nomad/jobs/authelia\" }}{{ .encryption_key }}{{ end }}\"\n\nnotifier:\n filesystem:\n filename: /config/notification.txt\nEOF\n\n destination = \"local/config/configuration.yml\"\n }\n\n template {\n data = \u003c\u003cEOF\n# Users database for Authelia\nusers:\n othrayte:\n password: \"$2y$10$FeemMJevZXq6y1pc6FNOXeIlthGWiGHRmMfpV33BNcpChA5ozLUmK\"\n displayname: \"Adrian\"\n email: \"othrayte@gmail.com\"\nEOF\n\n destination = \"local/config/users_database.yml\"\n }\n }\n }\n}\n",
"json": null,
"modify_index": "17976",
"name": "authelia",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "authelia",
"task": [
{
"driver": "docker",
"meta": {},
"name": "authelia",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "csi-smb",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "csi-smb",
"jobspec": "job \"csi-smb\" {\n type = \"system\"\n\n group \"smb\" {\n task \"plugin\" {\n driver = \"docker\"\n\n config {\n image = \"mcr.microsoft.com/k8s/csi/smb-csi:v1.7.0\"\n args = [\n \"--v=5\",\n \"--nodeid=${attr.unique.hostname}\",\n \"--endpoint=unix:///csi/csi.sock\",\n \"--drivername=smb.csi.k8s.io\"\n ]\n privileged = true\n }\n\n csi_plugin {\n id = \"smb\"\n type = \"node\"\n mount_dir = \"/csi\"\n }\n\n resources {\n cpu = 100\n memory = 50\n }\n }\n }\n}",
"json": null,
"modify_index": "11526",
"name": "csi-smb",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "smb",
"task": [
{
"driver": "docker",
"meta": {},
"name": "plugin",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "system"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "glance",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "glance",
"jobspec": "job \"glance\" {\n group \"glance\" {\n count = 1\n\n network {\n port \"http\" {\n to = 8080\n }\n }\n\n task \"glance\" {\n driver = \"docker\"\n\n config {\n image = \"glanceapp/glance:latest\"\n ports = [\"http\"]\n volumes = [\n \"local/glance.yml:/app/config/glance.yml\",\n ]\n }\n\n service {\n name = \"home\"\n port = \"http\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.home.middlewares=auth@file\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"http\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n resources {\n cpu = 50\n memory = 128\n }\n\n\n template {\n data = \u003c\u003cEOF\npages:\n - name: Home\n # Optionally, if you only have a single page you can hide the desktop navigation for a cleaner look\n # hide-desktop-navigation: true\n columns:\n - size: small\n widgets:\n - type: calendar\n first-day-of-week: monday\n\n - type: rss\n limit: 10\n collapse-after: 3\n cache: 12h\n feeds:\n - url: https://selfh.st/rss/\n title: selfh.st\n limit: 4\n - url: https://ciechanow.ski/atom.xml\n - url: https://www.joshwcomeau.com/rss.xml\n title: Josh Comeau\n - url: https://samwho.dev/rss.xml\n - url: https://ishadeed.com/feed.xml\n title: Ahmad Shadeed\n\n - type: twitch-channels\n channels:\n - theprimeagen\n - j_blow\n - piratesoftware\n - cohhcarnage\n - christitustech\n - EJ_SA\n\n - size: full\n widgets:\n - type: group\n widgets:\n - type: hacker-news\n - type: lobsters\n\n - type: videos\n channels:\n - UCXuqSBlHAE6Xw-yeJA0Tunw # Linus Tech Tips\n - UCR-DXc1voovS8nhAvccRZhg # Jeff Geerling\n - UCsBjURrPoezykLs9EqgamOA # Fireship\n - UCBJycsmduvYEL83R_U4JriQ # Marques Brownlee\n - UCHnyfMqiRRG1u-2MsSQLbXA # Veritasium\n\n - type: bookmarks\n groups:\n - links:\n - title: Gmail\n url: https://mail.google.com/mail/u/0/\n - title: Amazon\n url: https://www.amazon.com/\n - title: Github\n url: https://github.com/\n - title: Wikipedia\n url: https://en.wikipedia.org/\n - title: Infra\n color: 10 70 50\n links:\n - title: Nomad\n url: https://nomad.othrayte.one/\n - title: Consul\n url: https://consul.othrayte.one/\n - title: Traefik\n url: https://traefik.othrayte.one/\n - title: Social\n color: 200 50 50\n links:\n - title: Reddit\n url: https://www.reddit.com/\n - title: Twitter\n url: https://twitter.com/\n - title: Instagram\n url: https://www.instagram.com/\n\n - size: small\n widgets:\n - type: weather\n location: Melbourne, Australia\n units: metric\n hour-format: 12h\n # Optionally hide the location from being displayed in the widget\n # hide-location: true\n\n - type: releases\n cache: 1d\n # Without authentication the Github API allows for up to 60 requests per hour. You can create a\n # read-only token from your Github account settings and use it here to increase the limit.\n # token: ...\n repositories:\n - glanceapp/glance\n - go-gitea/gitea\n - immich-app/immich\n - syncthing/syncthing\n\n # Add more pages here:\n # - name: Your page name\n # columns:\n # - size: small\n # widgets:\n # # Add widgets here\n\n # - size: full\n # widgets:\n # # Add widgets here\n\n # - size: small\n # widgets:\n # # Add widgets here\nEOF\n\n destination = \"local/glance.yml\"\n }\n\n }\n }\n}",
"json": null,
"modify_index": "17710",
"name": "glance",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "glance",
"task": [
{
"driver": "docker",
"meta": {},
"name": "glance",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "hello_world",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "hello-world",
"jobspec": "job \"hello-world\" {\n group \"servers\" {\n network {\n port \"www\" {\n to = -1\n }\n }\n\n service {\n name = \"hello-world\"\n port = \"www\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.hello-world.middlewares=auth@file\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"www\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n \n\n # Tasks are individual units of work that are run by Nomad.\n task \"web\" {\n # This particular task starts a simple web server within a Docker container\n driver = \"docker\"\n\n config {\n image = \"busybox:1\"\n command = \"httpd\"\n args = [\"-v\", \"-f\", \"-p\", \"${NOMAD_PORT_www}\", \"-h\", \"/local\"]\n ports = [\"www\"]\n }\n\n template {\n data = \u003c\u003c-EOF\n \u003ch1\u003eHello, Nomad!\u003c/h1\u003e\n \u003cul\u003e\n \u003cli\u003eTask: {{env \"NOMAD_TASK_NAME\"}}\u003c/li\u003e\n \u003cli\u003eGroup: {{env \"NOMAD_GROUP_NAME\"}}\u003c/li\u003e\n \u003cli\u003eJob: {{env \"NOMAD_JOB_NAME\"}}\u003c/li\u003e\n \u003cli\u003eMetadata value for foo: {{env \"NOMAD_META_foo\"}}\u003c/li\u003e\n \u003cli\u003eCurrently running on port: {{env \"NOMAD_PORT_www\"}}\u003c/li\u003e\n \u003c/ul\u003e\n EOF\n destination = \"local/index.html\"\n }\n\n # Specify the maximum resources required to run the task\n resources {\n cpu = 50\n memory = 64\n }\n }\n }\n}",
"json": null,
"modify_index": "17709",
"name": "hello-world",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "servers",
"task": [
{
"driver": "docker",
"meta": {},
"name": "web",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "traefik",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "traefik",
"jobspec": "job \"traefik\" {\n group \"traefik\" {\n network {\n port \"http\" {\n static = 80\n }\n\n port \"https\" {\n static = 443\n }\n\n port \"api\" {\n static = 8081\n }\n }\n\n service {\n name = \"traefik\"\n\n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.traefik.rule=Host(`traefik.othrayte.one`)\",\n \"traefik.http.routers.traefik.service=traefik\",\n \"traefik.http.routers.traefik.middlewares=auth@file\",\n \"traefik.http.services.traefik.loadbalancer.server.port=8081\",\n ]\n\n check {\n name = \"alive\"\n type = \"tcp\"\n port = \"http\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n volume \"traefik\" {\n type = \"host\"\n read_only = false\n source = \"traefik\"\n }\n\n task \"traefik\" {\n driver = \"docker\"\n\n config {\n image = \"traefik:v3.3\"\n network_mode = \"host\"\n\n volumes = [\n \"local/traefik.yml:/etc/traefik/traefik.yml\",\n \"local/configs/:/etc/traefik/configs/\"\n ]\n }\n\n volume_mount {\n volume = \"traefik\"\n destination = \"/opt/traefik\"\n read_only = false\n }\n\n template {\n data = \u003c\u003cEOF\nentryPoints:\n web:\n address: \":80\"\n http:\n redirections:\n entryPoint:\n to: websecure\n scheme: https\n websecure:\n address: \":443\"\n http:\n tls:\n certResolver: letsencrypt\n traefik:\n address: \":8081\"\n\napi:\n dashboard: true\n insecure: true\n\nproviders:\n file:\n directory: \"/etc/traefik/configs/\"\n\n consulCatalog:\n prefix: \"traefik\"\n exposedByDefault: false\n defaultRule: {{\"Host(`{{ .Name }}.othrayte.one`)\"}}\n endpoint:\n address: \"127.0.0.1:8500\"\n scheme: \"http\"\n\ncertificatesResolvers:\n letsencrypt:\n acme:\n email: \"othrayte@gmail.com\"\n storage: \"/opt/traefik/acme.json\"\n httpChallenge:\n entryPoint: web\nEOF\n\n destination = \"local/traefik.yml\"\n }\n\n template {\n data = \u003c\u003cEOF\nhttp:\n middlewares:\n auth:\n forwardAuth:\n address: \"http://192.168.1.235:9091/api/authz/forward-auth\"\n trustForwardHeader: true\n routers:\n fallback:\n rule: \"HostRegexp(`^.+$`)\"\n entryPoints:\n - websecure\n middlewares:\n - auth\n service: noop@internal # This router just applies middleware\n priority: 1\n nomad-ui:\n rule: \"Host(`nomad.othrayte.one`)\"\n service: nomad-ui\n middlewares:\n - auth\n consul-ui:\n rule: \"Host(`consul.othrayte.one`)\"\n service: consul-ui\n middlewares:\n - auth\n unraid:\n rule: \"Host(`unraid.othrayte.one`)\"\n service: unraid\n middlewares:\n - auth\n\n services:\n nomad-ui:\n loadBalancer:\n servers:\n - url: \"http://127.0.0.1:4646\"\n consul-ui:\n loadBalancer:\n servers:\n - url: \"http://127.0.0.1:8500\"\n unraid:\n loadBalancer:\n servers:\n - url: \"http://192.168.1.192:80\"\nEOF\n\n destination = \"local/configs/nomad.yml\"\n }\n\n resources {\n cpu = 100\n memory = 128\n }\n }\n }\n}\n",
"json": null,
"modify_index": "18000",
"name": "traefik",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "traefik",
"task": [
{
"driver": "docker",
"meta": {},
"name": "traefik",
"volume_mounts": [
{
"destination": "/opt/traefik",
"read_only": false,
"volume": "traefik"
}
]
}
],
"volumes": [
{
"name": "traefik",
"read_only": false,
"source": "traefik",
"type": "host"
}
]
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "transfer",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "transfer",
"jobspec": "job \"transfer\" {\n group \"transfer\" {\n network {\n port \"http\" {\n to = 80\n }\n }\n\n service {\n name = \"transfer\"\n port = \"http\"\n \n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.volume-test.middlewares=auth@file\",\n ]\n\n check {\n type = \"http\"\n path = \"/\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n volume \"unraid_transfer\" {\n type = \"csi\"\n read_only = false\n source = \"unraid_transfer\"\n access_mode = \"single-node-writer\"\n attachment_mode = \"file-system\"\n\n mount_options {\n mount_flags = [\"uid=911\",\"gid=1000\"] # linuxserver.io container services run as uid 911\n }\n }\n\n task \"filebrowser\" {\n driver = \"docker\"\n\n config {\n # Use the s6 tag for the linuxserver.io based image\n image = \"filebrowser/filebrowser:s6\"\n\n ports = [\"http\"]\n\n volumes = [\n \"local/config/settings.json:/config/settings.json\",\n ]\n }\n\n volume_mount {\n volume = \"unraid_transfer\"\n\t destination = \"/srv\"\n read_only = false\n }\n\n resources {\n cpu = 500\n memory = 256\n }\n\n template {\n data = \u003c\u003cEOF\n{\n \"port\": 80,\n \"baseURL\": \"\",\n \"address\": \"\",\n \"log\": \"stdout\",\n \"database\": \"/database/filebrowser.db\",\n \"root\": \"/srv\",\n \"auth\": {\n \"method\": \"noauth\"\n }\n}\nEOF\n\n destination = \"local/config/settings.json\"\n }\n }\n }\n}",
"json": null,
"modify_index": "21193",
"name": "transfer",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 1,
"meta": {},
"name": "transfer",
"task": [
{
"driver": "docker",
"meta": {},
"name": "filebrowser",
"volume_mounts": [
{
"destination": "/srv",
"read_only": false,
"volume": "unraid_transfer"
}
]
}
],
"volumes": [
{
"name": "unraid_transfer",
"read_only": false,
"source": "unraid_transfer",
"type": "csi"
}
]
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
},
{
"mode": "managed",
"type": "nomad_job",
"name": "webapp",
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"allocation_ids": [],
"datacenters": [
"*"
],
"deployment_id": null,
"deployment_status": null,
"deregister_on_destroy": true,
"deregister_on_id_change": true,
"detach": true,
"hcl2": [],
"id": "demo-webapp",
"jobspec": "job \"demo-webapp\" {\n group \"demo\" {\n count = 3\n\n network {\n port \"http\"{\n to = -1\n }\n }\n\n service {\n name = \"demo-webapp\"\n port = \"http\"\n\n tags = [\n \"traefik.enable=true\",\n \"traefik.http.routers.demo-webapp.middlewares=auth@file\",\n ]\n\n check {\n type = \"http\"\n path = \"/\"\n interval = \"2s\"\n timeout = \"2s\"\n }\n }\n\n task \"server\" {\n env {\n PORT = \"${NOMAD_PORT_http}\"\n NODE_IP = \"${NOMAD_IP_http}\"\n }\n\n driver = \"docker\"\n\n config {\n image = \"hashicorp/demo-webapp-lb-guide\"\n ports = [\"http\"]\n }\n }\n }\n}",
"json": null,
"modify_index": "17707",
"name": "demo-webapp",
"namespace": "default",
"policy_override": null,
"purge_on_destroy": null,
"read_allocation_ids": false,
"region": "global",
"rerun_if_dead": false,
"status": "running",
"task_groups": [
{
"count": 3,
"meta": {},
"name": "demo",
"task": [
{
"driver": "docker",
"meta": {},
"name": "server",
"volume_mounts": []
}
],
"volumes": []
}
],
"timeouts": null,
"type": "service"
},
"sensitive_attributes": [],
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
}
]
}
],
"check_results": null
}

View File

@@ -31,7 +31,19 @@ job "transfer" {
attachment_mode = "file-system" attachment_mode = "file-system"
mount_options { mount_options {
mount_flags = ["uid=911","gid=1000"] # linuxserver.io container services run as uid 911 mount_flags = ["uid=911", "gid=1000"] # linuxserver.io container services run as uid 911
}
}
volume "appdata" {
type = "csi"
read_only = false
source = "unraid_appdata_transferfilebrowser"
access_mode = "single-node-writer"
attachment_mode = "file-system"
mount_options {
mount_flags = ["uid=911", "gid=1000"] # linuxserver.io container services run as uid 911
} }
} }
@@ -49,6 +61,12 @@ job "transfer" {
] ]
} }
volume_mount {
volume = "appdata"
destination = "/database"
read_only = false
}
volume_mount { volume_mount {
volume = "unraid_transfer" volume = "unraid_transfer"
destination = "/srv" destination = "/srv"

View File

@@ -1,42 +0,0 @@
job "demo-webapp" {
group "demo" {
count = 3
network {
port "http"{
to = -1
}
}
service {
name = "demo-webapp"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.demo-webapp.middlewares=auth@file",
]
check {
type = "http"
path = "/"
interval = "2s"
timeout = "2s"
}
}
task "server" {
env {
PORT = "${NOMAD_PORT_http}"
NODE_IP = "${NOMAD_IP_http}"
}
driver = "docker"
config {
image = "hashicorp/demo-webapp-lb-guide"
ports = ["http"]
}
}
}
}