refactor: split server modules into smaller more manageable files
This commit is contained in:
parent
b2e5ae1f98
commit
cdeb4e108b
49 changed files with 1519 additions and 1270 deletions
|
@ -1,56 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataDirectory = "/var/lib/actual/";
|
||||
in {
|
||||
options.services.actual = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "actual";
|
||||
description = "subdomain of base domain that actual will be hosted at";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.actual.enable (lib.mkMerge [
|
||||
{
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${dataDirectory} 2770 actual actual"
|
||||
];
|
||||
|
||||
services.actual = {
|
||||
settings = {
|
||||
ACTUAL_DATA_DIR = dataDirectory;
|
||||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.host.reverse_proxy.enable {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.actual.subdomain} = {
|
||||
target = "http://localhost:${toString config.services.actual.settings.port}";
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.services.fail2ban.enable {
|
||||
# TODO: configuration for fail2ban for actual
|
||||
})
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.actual.settings.ACTUAL_DATA_DIR == dataDirectory;
|
||||
message = "actual data location does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = dataDirectory;
|
||||
user = "actual";
|
||||
group = "actual";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
]);
|
||||
}
|
3
modules/nixos-modules/server/actual/const.nix
Normal file
3
modules/nixos-modules/server/actual/const.nix
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
dataDirectory = "/var/lib/actual/";
|
||||
}
|
34
modules/nixos-modules/server/actual/default.nix
Normal file
34
modules/nixos-modules/server/actual/default.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
const = import ./const.nix;
|
||||
dataDirectory = const.dataDirectory;
|
||||
in {
|
||||
imports = [
|
||||
./proxy.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
];
|
||||
|
||||
options.services.actual = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "actual";
|
||||
description = "subdomain of base domain that actual will be hosted at";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.actual.enable {
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${dataDirectory} 2770 actual actual"
|
||||
];
|
||||
|
||||
services.actual = {
|
||||
settings = {
|
||||
ACTUAL_DATA_DIR = dataDirectory;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
9
modules/nixos-modules/server/actual/fail2ban.nix
Normal file
9
modules/nixos-modules/server/actual/fail2ban.nix
Normal file
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.actual.enable && config.services.fail2ban.enable) {
|
||||
# TODO: configuration for fail2ban for actual
|
||||
};
|
||||
}
|
26
modules/nixos-modules/server/actual/impermanence.nix
Normal file
26
modules/nixos-modules/server/actual/impermanence.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
const = import ./const.nix;
|
||||
dataDirectory = const.dataDirectory;
|
||||
in {
|
||||
config = lib.mkIf (config.services.actual.enable && config.host.impermanence.enable) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.actual.settings.ACTUAL_DATA_DIR == dataDirectory;
|
||||
message = "actual data location does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = dataDirectory;
|
||||
user = "actual";
|
||||
group = "actual";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
13
modules/nixos-modules/server/actual/proxy.nix
Normal file
13
modules/nixos-modules/server/actual/proxy.nix
Normal file
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.actual.enable && config.host.reverse_proxy.enable) {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.actual.subdomain} = {
|
||||
target = "http://localhost:${toString config.services.actual.settings.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,19 +1,20 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./fail2ban.nix
|
||||
./network_storage
|
||||
./reverse_proxy.nix
|
||||
./fail2ban.nix
|
||||
./postgres.nix
|
||||
./network_storage
|
||||
./podman.nix
|
||||
./jellyfin.nix
|
||||
./forgejo.nix
|
||||
./searx.nix
|
||||
./home-assistant.nix
|
||||
./wyoming.nix
|
||||
./immich.nix
|
||||
|
||||
./actual
|
||||
./immich
|
||||
./panoramax
|
||||
./forgejo
|
||||
./home-assistant
|
||||
./jellyfin
|
||||
./paperless
|
||||
./searx
|
||||
./qbittorent.nix
|
||||
./paperless.nix
|
||||
./actual.nix
|
||||
./panoramax.nix
|
||||
./wyoming.nix
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,128 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
forgejoPort = 8081;
|
||||
stateDir = "/var/lib/forgejo";
|
||||
db_user = "forgejo";
|
||||
sshPort = 22222;
|
||||
in {
|
||||
options.services.forgejo = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that forgejo will be hosted at";
|
||||
default = "forgejo";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.forgejo.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.forgejo.settings.server.BUILTIN_SSH_SERVER_USER == config.users.users.git.name;
|
||||
message = "Forgejo BUILTIN_SSH_SERVER_USER hardcoded value does not match expected git user name";
|
||||
}
|
||||
];
|
||||
host = {
|
||||
postgres = {
|
||||
enable = true;
|
||||
extraUsers = {
|
||||
${db_user} = {
|
||||
isClient = true;
|
||||
createUser = true;
|
||||
};
|
||||
};
|
||||
extraDatabases = {
|
||||
${db_user} = {
|
||||
name = db_user;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.forgejo = {
|
||||
database = {
|
||||
type = "postgres";
|
||||
socket = "/run/postgresql";
|
||||
};
|
||||
lfs.enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
DOMAIN = "${config.services.forgejo.subdomain}.${config.host.reverse_proxy.hostname}";
|
||||
HTTP_PORT = forgejoPort;
|
||||
START_SSH_SERVER = true;
|
||||
SSH_LISTEN_PORT = sshPort;
|
||||
SSH_PORT = 22;
|
||||
BUILTIN_SSH_SERVER_USER = "git";
|
||||
ROOT_URL = "https://git.jan-leila.com";
|
||||
};
|
||||
service = {
|
||||
DISABLE_REGISTRATION = true;
|
||||
};
|
||||
database = {
|
||||
DB_TYPE = "postgres";
|
||||
NAME = db_user;
|
||||
USER = db_user;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
config.services.forgejo.settings.server.SSH_LISTEN_PORT
|
||||
];
|
||||
}
|
||||
(lib.mkIf config.host.reverse_proxy.enable {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.forgejo.subdomain} = {
|
||||
target = "http://localhost:${toString forgejoPort}";
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.services.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/forgejo.local".text = lib.mkIf config.services.forgejo.enable (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = ".*(Failed authentication attempt|invalid credentials|Attempted access of unknown user).* from <HOST>"
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
forgejo-iptables.settings = lib.mkIf config.services.forgejo.enable {
|
||||
enabled = true;
|
||||
filter = "forgejo";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.forgejo.settings.log.ROOT_PATH}/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.forgejo.stateDir == stateDir;
|
||||
message = "forgejo state directory does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = stateDir;
|
||||
user = "forgejo";
|
||||
group = "forgejo";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
]);
|
||||
}
|
4
modules/nixos-modules/server/forgejo/const.nix
Normal file
4
modules/nixos-modules/server/forgejo/const.nix
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
httpPort = 8081;
|
||||
sshPort = 22222;
|
||||
}
|
41
modules/nixos-modules/server/forgejo/database.nix
Normal file
41
modules/nixos-modules/server/forgejo/database.nix
Normal file
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf config.services.forgejo.enable (
|
||||
lib.mkMerge [
|
||||
{
|
||||
host = {
|
||||
postgres = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.forgejo.settings.database.DB_TYPE == "postgres";
|
||||
message = "Forgejo database type must be postgres";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf config.host.postgres.enable {
|
||||
host = {
|
||||
postgres = {
|
||||
extraUsers = {
|
||||
forgejo = {
|
||||
isClient = true;
|
||||
createUser = true;
|
||||
};
|
||||
};
|
||||
extraDatabases = {
|
||||
forgejo = {
|
||||
name = "forgejo";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
61
modules/nixos-modules/server/forgejo/default.nix
Normal file
61
modules/nixos-modules/server/forgejo/default.nix
Normal file
|
@ -0,0 +1,61 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
const = import ./const.nix;
|
||||
httpPort = const.httpPort;
|
||||
sshPort = const.sshPort;
|
||||
db_user = "forgejo";
|
||||
in {
|
||||
imports = [
|
||||
./proxy.nix
|
||||
./database.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
];
|
||||
|
||||
options.services.forgejo = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that forgejo will be hosted at";
|
||||
default = "forgejo";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.forgejo.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.forgejo.settings.server.BUILTIN_SSH_SERVER_USER == config.users.users.git.name;
|
||||
message = "Forgejo BUILTIN_SSH_SERVER_USER hardcoded value does not match expected git user name";
|
||||
}
|
||||
];
|
||||
|
||||
services.forgejo = {
|
||||
database = {
|
||||
type = "postgres";
|
||||
socket = "/run/postgresql";
|
||||
};
|
||||
lfs.enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
DOMAIN = "${config.services.forgejo.subdomain}.${config.host.reverse_proxy.hostname}";
|
||||
HTTP_PORT = httpPort;
|
||||
START_SSH_SERVER = true;
|
||||
SSH_LISTEN_PORT = sshPort;
|
||||
SSH_PORT = 22;
|
||||
BUILTIN_SSH_SERVER_USER = "git";
|
||||
ROOT_URL = "https://git.jan-leila.com";
|
||||
};
|
||||
service = {
|
||||
DISABLE_REGISTRATION = true;
|
||||
};
|
||||
database = {
|
||||
DB_TYPE = "postgres";
|
||||
NAME = db_user;
|
||||
USER = db_user;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
32
modules/nixos-modules/server/forgejo/fail2ban.nix
Normal file
32
modules/nixos-modules/server/forgejo/fail2ban.nix
Normal file
|
@ -0,0 +1,32 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.forgejo.enable && config.services.fail2ban.enable) {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/forgejo.local".text = lib.mkIf config.services.forgejo.enable (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = ".*(Failed authentication attempt|invalid credentials|Attempted access of unknown user).* from <HOST>"
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
forgejo-iptables.settings = lib.mkIf config.services.forgejo.enable {
|
||||
enabled = true;
|
||||
filter = "forgejo";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.forgejo.settings.log.ROOT_PATH}/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
28
modules/nixos-modules/server/forgejo/impermanence.nix
Normal file
28
modules/nixos-modules/server/forgejo/impermanence.nix
Normal file
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
stateDir = "/var/lib/forgejo";
|
||||
in {
|
||||
config = lib.mkIf (config.services.forgejo.enable && config.host.impermanence.enable) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.forgejo.stateDir == stateDir;
|
||||
message = "forgejo state directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = stateDir;
|
||||
user = "forgejo";
|
||||
group = "forgejo";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
18
modules/nixos-modules/server/forgejo/proxy.nix
Normal file
18
modules/nixos-modules/server/forgejo/proxy.nix
Normal file
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
const = import ./const.nix;
|
||||
httpPort = const.httpPort;
|
||||
in {
|
||||
config = lib.mkIf (config.services.forgejo.enable && config.host.reverse_proxy.enable) {
|
||||
host.reverse_proxy.subdomains.${config.services.forgejo.subdomain} = {
|
||||
target = "http://localhost:${toString httpPort}";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
config.services.forgejo.settings.server.SSH_LISTEN_PORT
|
||||
];
|
||||
};
|
||||
}
|
|
@ -1,230 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
configDir = "/var/lib/hass";
|
||||
dbUser = "hass";
|
||||
in {
|
||||
options.services.home-assistant = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that home-assistant will be hosted at";
|
||||
default = "home-assistant";
|
||||
};
|
||||
|
||||
database = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"builtin"
|
||||
"postgres"
|
||||
];
|
||||
description = "what database do we want to use";
|
||||
default = "builtin";
|
||||
};
|
||||
|
||||
extensions = {
|
||||
sonos = {
|
||||
enable = lib.mkEnableOption "enable the sonos plugin";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 1400;
|
||||
description = "what port to use for sonos discovery";
|
||||
};
|
||||
};
|
||||
jellyfin = {
|
||||
enable = lib.mkEnableOption "enable the jellyfin plugin";
|
||||
};
|
||||
wyoming = {
|
||||
enable = lib.mkEnableOption "enable wyoming";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.home-assistant.enable (lib.mkMerge [
|
||||
{
|
||||
services.home-assistant = {
|
||||
configDir = configDir;
|
||||
extraComponents = [
|
||||
"default_config"
|
||||
"esphome"
|
||||
"met"
|
||||
"radio_browser"
|
||||
"isal"
|
||||
"zha"
|
||||
"webostv"
|
||||
"tailscale"
|
||||
"syncthing"
|
||||
"analytics_insights"
|
||||
"unifi"
|
||||
"openweathermap"
|
||||
"ollama"
|
||||
"mobile_app"
|
||||
"logbook"
|
||||
"ssdp"
|
||||
"usb"
|
||||
"webhook"
|
||||
"bluetooth"
|
||||
"dhcp"
|
||||
"energy"
|
||||
"history"
|
||||
"backup"
|
||||
"assist_pipeline"
|
||||
"conversation"
|
||||
"sun"
|
||||
"zeroconf"
|
||||
"cpuspeed"
|
||||
];
|
||||
config = {
|
||||
http = {
|
||||
server_port = 8123;
|
||||
use_x_forwarded_for = true;
|
||||
trusted_proxies = ["127.0.0.1" "::1"];
|
||||
ip_ban_enabled = true;
|
||||
login_attempts_threshold = 10;
|
||||
};
|
||||
homeassistant = {
|
||||
external_url = "https://${config.services.home-assistant.subdomain}.${config.host.reverse_proxy.hostname}";
|
||||
# internal_url = "http://192.168.1.2:8123";
|
||||
};
|
||||
recorder.db_url = "postgresql://@/${dbUser}";
|
||||
"automation manual" = [];
|
||||
"automation ui" = "!include automations.yaml";
|
||||
mobile_app = {};
|
||||
};
|
||||
extraPackages = python3Packages:
|
||||
with python3Packages; [
|
||||
hassil
|
||||
numpy
|
||||
gtts
|
||||
];
|
||||
};
|
||||
|
||||
# TODO: configure /var/lib/hass/secrets.yaml via sops
|
||||
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
1900
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"f ${config.services.home-assistant.configDir}/automations.yaml 0755 hass hass"
|
||||
];
|
||||
}
|
||||
(lib.mkIf (config.services.home-assistant.extensions.sonos.enable) {
|
||||
services.home-assistant.extraComponents = ["sonos"];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
config.services.home-assistant.extensions.sonos.port
|
||||
];
|
||||
})
|
||||
(lib.mkIf (config.services.home-assistant.extensions.jellyfin.enable) {
|
||||
services.home-assistant.extraComponents = ["jellyfin"];
|
||||
# TODO: configure port, address, and login information here
|
||||
})
|
||||
(lib.mkIf (config.services.home-assistant.extensions.wyoming.enable) {
|
||||
services.home-assistant.extraComponents = ["wyoming"];
|
||||
services.wyoming.enable = true;
|
||||
})
|
||||
(lib.mkIf (config.services.home-assistant.database == "postgres") {
|
||||
host = {
|
||||
postgres = {
|
||||
enable = true;
|
||||
extraUsers = {
|
||||
${dbUser} = {
|
||||
isClient = true;
|
||||
createUser = true;
|
||||
};
|
||||
};
|
||||
extraDatabases = {
|
||||
${dbUser} = {
|
||||
name = dbUser;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.home-assistant = {
|
||||
extraPackages = python3Packages:
|
||||
with python3Packages; [
|
||||
psycopg2
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services.home-assistant = {
|
||||
requires = [
|
||||
config.systemd.services.postgresql.name
|
||||
];
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.host.reverse_proxy.enable {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.home-assistant.subdomain} = {
|
||||
target = "http://localhost:${toString config.services.home-assistant.config.http.server_port}";
|
||||
|
||||
websockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
add_header Upgrade $http_upgrade;
|
||||
add_header Connection \"upgrade\";
|
||||
|
||||
proxy_buffering off;
|
||||
|
||||
proxy_read_timeout 90;
|
||||
'';
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.services.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/hass.local".text = lib.mkIf config.services.home-assistant.enable (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[INCLUDES]
|
||||
before = common.conf
|
||||
|
||||
[Definition]
|
||||
failregex = ^%(__prefix_line)s.*Login attempt or request with invalid authentication from <HOST>.*$
|
||||
|
||||
ignoreregex =
|
||||
|
||||
[Init]
|
||||
datepattern = ^%%Y-%%m-%%d %%H:%%M:%%S
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
home-assistant-iptables.settings = lib.mkIf config.services.home-assistant.enable {
|
||||
enabled = true;
|
||||
filter = "hass";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.home-assistant.configDir}/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.home-assistant.configDir == configDir;
|
||||
message = "home assistant config directory does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = configDir;
|
||||
user = "hass";
|
||||
group = "hass";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
]);
|
||||
}
|
56
modules/nixos-modules/server/home-assistant/database.nix
Normal file
56
modules/nixos-modules/server/home-assistant/database.nix
Normal file
|
@ -0,0 +1,56 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dbUser = "hass";
|
||||
in {
|
||||
config = lib.mkIf config.services.home-assistant.enable (
|
||||
lib.mkMerge [
|
||||
{
|
||||
host = {
|
||||
postgres = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.home-assistant.database == "postgres";
|
||||
message = "Home Assistant database type must be postgres";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf config.host.postgres.enable {
|
||||
host = {
|
||||
postgres = {
|
||||
extraUsers = {
|
||||
${dbUser} = {
|
||||
isClient = true;
|
||||
createUser = true;
|
||||
};
|
||||
};
|
||||
extraDatabases = {
|
||||
${dbUser} = {
|
||||
name = dbUser;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.home-assistant = {
|
||||
extraPackages = python3Packages:
|
||||
with python3Packages; [
|
||||
psycopg2
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services.home-assistant = {
|
||||
requires = [
|
||||
config.systemd.services.postgresql.name
|
||||
];
|
||||
};
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
118
modules/nixos-modules/server/home-assistant/default.nix
Normal file
118
modules/nixos-modules/server/home-assistant/default.nix
Normal file
|
@ -0,0 +1,118 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
./proxy.nix
|
||||
./database.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./extensions
|
||||
];
|
||||
|
||||
options.services.home-assistant = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that home-assistant will be hosted at";
|
||||
default = "home-assistant";
|
||||
};
|
||||
|
||||
database = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"builtin"
|
||||
"postgres"
|
||||
];
|
||||
description = "what database do we want to use";
|
||||
default = "builtin";
|
||||
};
|
||||
|
||||
extensions = {
|
||||
sonos = {
|
||||
enable = lib.mkEnableOption "enable the sonos plugin";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 1400;
|
||||
description = "what port to use for sonos discovery";
|
||||
};
|
||||
};
|
||||
jellyfin = {
|
||||
enable = lib.mkEnableOption "enable the jellyfin plugin";
|
||||
};
|
||||
wyoming = {
|
||||
enable = lib.mkEnableOption "enable wyoming";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.home-assistant.enable (lib.mkMerge [
|
||||
{
|
||||
services.home-assistant = {
|
||||
configDir = "/var/lib/hass";
|
||||
extraComponents = [
|
||||
"default_config"
|
||||
"esphome"
|
||||
"met"
|
||||
"radio_browser"
|
||||
"isal"
|
||||
"zha"
|
||||
"webostv"
|
||||
"tailscale"
|
||||
"syncthing"
|
||||
"analytics_insights"
|
||||
"unifi"
|
||||
"openweathermap"
|
||||
"ollama"
|
||||
"mobile_app"
|
||||
"logbook"
|
||||
"ssdp"
|
||||
"usb"
|
||||
"webhook"
|
||||
"bluetooth"
|
||||
"dhcp"
|
||||
"energy"
|
||||
"history"
|
||||
"backup"
|
||||
"assist_pipeline"
|
||||
"conversation"
|
||||
"sun"
|
||||
"zeroconf"
|
||||
"cpuspeed"
|
||||
];
|
||||
config = {
|
||||
http = {
|
||||
server_port = 8123;
|
||||
use_x_forwarded_for = true;
|
||||
trusted_proxies = ["127.0.0.1" "::1"];
|
||||
ip_ban_enabled = true;
|
||||
login_attempts_threshold = 10;
|
||||
};
|
||||
homeassistant = {
|
||||
external_url = "https://${config.services.home-assistant.subdomain}.${config.host.reverse_proxy.hostname}";
|
||||
# internal_url = "http://192.168.1.2:8123";
|
||||
};
|
||||
recorder.db_url = "postgresql://@/${config.services.home-assistant.configDir}";
|
||||
"automation manual" = [];
|
||||
"automation ui" = "!include automations.yaml";
|
||||
mobile_app = {};
|
||||
};
|
||||
extraPackages = python3Packages:
|
||||
with python3Packages; [
|
||||
hassil
|
||||
numpy
|
||||
gtts
|
||||
];
|
||||
};
|
||||
|
||||
# TODO: configure /var/lib/hass/secrets.yaml via sops
|
||||
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
1900
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"f ${config.services.home-assistant.configDir}/automations.yaml 0755 hass hass"
|
||||
];
|
||||
}
|
||||
]);
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
./sonos.nix
|
||||
./jellyfin.nix
|
||||
./wyoming.nix
|
||||
];
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.home-assistant.extensions.jellyfin.enable) {
|
||||
services.home-assistant.extraComponents = ["jellyfin"];
|
||||
# TODO: configure port, address, and login information here
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.home-assistant.extensions.sonos.enable) {
|
||||
services.home-assistant.extraComponents = ["sonos"];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
config.services.home-assistant.extensions.sonos.port
|
||||
];
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.home-assistant.extensions.wyoming.enable) {
|
||||
services.home-assistant.extraComponents = ["wyoming"];
|
||||
services.wyoming.enable = true;
|
||||
}
|
39
modules/nixos-modules/server/home-assistant/fail2ban.nix
Normal file
39
modules/nixos-modules/server/home-assistant/fail2ban.nix
Normal file
|
@ -0,0 +1,39 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.fail2ban.enable && config.services.home-assistant.enable) {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/hass.local".text = (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[INCLUDES]
|
||||
before = common.conf
|
||||
|
||||
[Definition]
|
||||
failregex = ^%(__prefix_line)s.*Login attempt or request with invalid authentication from <HOST>.*$
|
||||
|
||||
ignoreregex =
|
||||
|
||||
[Init]
|
||||
datepattern = ^%%Y-%%m-%%d %%H:%%M:%%S
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
home-assistant-iptables.settings = {
|
||||
enabled = true;
|
||||
filter = "hass";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.home-assistant.configDir}/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
26
modules/nixos-modules/server/home-assistant/impermanence.nix
Normal file
26
modules/nixos-modules/server/home-assistant/impermanence.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
configDir = "/var/lib/hass";
|
||||
in
|
||||
lib.mkIf (config.host.impermanence.enable && config.services.home-assistant.enable) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.home-assistant.configDir == configDir;
|
||||
message = "home assistant config directory does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = configDir;
|
||||
user = "hass";
|
||||
group = "hass";
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
24
modules/nixos-modules/server/home-assistant/proxy.nix
Normal file
24
modules/nixos-modules/server/home-assistant/proxy.nix
Normal file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.host.reverse_proxy.enable && config.services.home-assistant.enable) {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.home-assistant.subdomain} = {
|
||||
target = "http://localhost:${toString config.services.home-assistant.config.http.server_port}";
|
||||
|
||||
websockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
add_header Upgrade $http_upgrade;
|
||||
add_header Connection \"upgrade\";
|
||||
|
||||
proxy_buffering off;
|
||||
|
||||
proxy_read_timeout 90;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
mediaLocation = "/var/lib/immich";
|
||||
in {
|
||||
options.services.immich = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that immich will be hosted at";
|
||||
default = "immich";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.immich.enable (lib.mkMerge [
|
||||
{
|
||||
host = {
|
||||
postgres = {
|
||||
enable = true;
|
||||
extraUsers = {
|
||||
${config.services.immich.database.user} = {
|
||||
isClient = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.interfaces.${config.services.tailscale.interfaceName} = {
|
||||
allowedUDPPorts = [
|
||||
config.services.immich.port
|
||||
];
|
||||
allowedTCPPorts = [
|
||||
config.services.immich.port
|
||||
];
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.host.reverse_proxy.enable {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.immich.subdomain} = {
|
||||
target = "http://localhost:${toString config.services.immich.port}";
|
||||
|
||||
websockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
# allow large file uploads
|
||||
client_max_body_size 50000M;
|
||||
|
||||
# set timeout
|
||||
proxy_read_timeout 600s;
|
||||
proxy_send_timeout 600s;
|
||||
send_timeout 600s;
|
||||
proxy_redirect off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.services.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/immich.local".text = lib.mkIf config.services.immich.enable (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = immich-server.*Failed login attempt for user.+from ip address\s?<ADDR>
|
||||
journalmatch = CONTAINER_TAG=immich-server
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
immich-iptables.settings = lib.mkIf config.services.immich.enable {
|
||||
enabled = true;
|
||||
filter = "immich";
|
||||
backend = "systemd";
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.immich.mediaLocation == mediaLocation;
|
||||
message = "immich media location does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = mediaLocation;
|
||||
user = "immich";
|
||||
group = "immich";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
]);
|
||||
}
|
26
modules/nixos-modules/server/immich/database.nix
Normal file
26
modules/nixos-modules/server/immich/database.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf config.services.immich.enable (lib.mkMerge [
|
||||
{
|
||||
host = {
|
||||
postgres = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.host.postgres.enable {
|
||||
host = {
|
||||
postgres = {
|
||||
extraUsers = {
|
||||
${config.services.immich.database.user} = {
|
||||
isClient = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
]);
|
||||
}
|
28
modules/nixos-modules/server/immich/default.nix
Normal file
28
modules/nixos-modules/server/immich/default.nix
Normal file
|
@ -0,0 +1,28 @@
|
|||
{lib, ...}: {
|
||||
imports = [
|
||||
./proxy.nix
|
||||
./database.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
];
|
||||
|
||||
options.services.immich = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that immich will be hosted at";
|
||||
default = "immich";
|
||||
};
|
||||
};
|
||||
|
||||
# NOTE: This shouldn't be needed now that we are out of testing
|
||||
# config = lib.mkIf config.services.immich.enable {
|
||||
# networking.firewall.interfaces.${config.services.tailscale.interfaceName} = {
|
||||
# allowedUDPPorts = [
|
||||
# config.services.immich.port
|
||||
# ];
|
||||
# allowedTCPPorts = [
|
||||
# config.services.immich.port
|
||||
# ];
|
||||
# };
|
||||
# };
|
||||
}
|
26
modules/nixos-modules/server/immich/fail2ban.nix
Normal file
26
modules/nixos-modules/server/immich/fail2ban.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.fail2ban.enable && config.services.immich.enable) {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/immich.local".text = pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = immich-server.*Failed login attempt for user.+from ip address\s?<ADDR>
|
||||
journalmatch = CONTAINER_TAG=immich-server
|
||||
'');
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
immich-iptables.settings = {
|
||||
enabled = true;
|
||||
filter = "immich";
|
||||
backend = "systemd";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
25
modules/nixos-modules/server/immich/impermanence.nix
Normal file
25
modules/nixos-modules/server/immich/impermanence.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
mediaLocation = "/var/lib/immich";
|
||||
in {
|
||||
config = lib.mkIf (config.services.immich.enable && config.host.impermanence.enable) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.immich.mediaLocation == mediaLocation;
|
||||
message = "immich media location does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = mediaLocation;
|
||||
user = "immich";
|
||||
group = "immich";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
27
modules/nixos-modules/server/immich/proxy.nix
Normal file
27
modules/nixos-modules/server/immich/proxy.nix
Normal file
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.immich.enable && config.host.reverse_proxy.enable) {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.immich.subdomain} = {
|
||||
target = "http://localhost:${toString config.services.immich.port}";
|
||||
|
||||
websockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
# allow large file uploads
|
||||
client_max_body_size 50000M;
|
||||
|
||||
# set timeout
|
||||
proxy_read_timeout 600s;
|
||||
proxy_send_timeout 600s;
|
||||
send_timeout 600s;
|
||||
proxy_redirect off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfinPort = 8096;
|
||||
dlanPort = 1900;
|
||||
jellyfin_data_directory = "/var/lib/jellyfin";
|
||||
jellyfin_cache_directory = "/var/cache/jellyfin";
|
||||
in {
|
||||
options.services.jellyfin = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that jellyfin will be hosted at";
|
||||
default = "jellyfin";
|
||||
};
|
||||
extraSubdomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "ex subdomain of base domain that jellyfin will be hosted at";
|
||||
default = [];
|
||||
};
|
||||
media_directory = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "directory jellyfin media will be hosted at";
|
||||
default = "/srv/jellyfin/media";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jellyfin.enable (
|
||||
lib.mkMerge [
|
||||
{
|
||||
environment.systemPackages = [
|
||||
pkgs.jellyfin
|
||||
pkgs.jellyfin-web
|
||||
pkgs.jellyfin-ffmpeg
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [jellyfinPort dlanPort];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${config.services.jellyfin.media_directory} 2770 jellyfin jellyfin_media"
|
||||
"A ${config.services.jellyfin.media_directory} - - - - u:jellyfin:rwX,g:jellyfin_media:rwX,o::-"
|
||||
];
|
||||
}
|
||||
(lib.mkIf config.host.reverse_proxy.enable {
|
||||
host.reverse_proxy.subdomains.jellyfin = {
|
||||
target = "http://localhost:${toString jellyfinPort}";
|
||||
|
||||
subdomain = config.services.jellyfin.subdomain;
|
||||
extraSubdomains = config.services.jellyfin.extraSubdomains;
|
||||
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
client_max_body_size 20M;
|
||||
add_header X-Content-Type-Options "nosniff";
|
||||
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.services.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/jellyfin.local".text = (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = "^.*Authentication request for .* has been denied \\\(IP: \"<ADDR>\"\\\)\\\."
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
jellyfin-iptables.settings = {
|
||||
enabled = true;
|
||||
filter = "jellyfin";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.jellyfin.dataDir}/log/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
fileSystems."/persist/system/jellyfin".neededForBoot = true;
|
||||
|
||||
host.storage.pool.extraDatasets = {
|
||||
# sops age key needs to be available to pre persist for user generation
|
||||
"persist/system/jellyfin" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/jellyfin";
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.jellyfin.dataDir == jellyfin_data_directory;
|
||||
message = "jellyfin data directory does not match persistence";
|
||||
}
|
||||
{
|
||||
assertion = config.services.jellyfin.cacheDir == jellyfin_cache_directory;
|
||||
message = "jellyfin cache directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence = {
|
||||
"/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = jellyfin_data_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin";
|
||||
}
|
||||
{
|
||||
directory = jellyfin_cache_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
"/persist/system/jellyfin" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = config.services.jellyfin.media_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin_media";
|
||||
mode = "1770";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
48
modules/nixos-modules/server/jellyfin/default.nix
Normal file
48
modules/nixos-modules/server/jellyfin/default.nix
Normal file
|
@ -0,0 +1,48 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfinPort = 8096;
|
||||
dlanPort = 1900;
|
||||
in {
|
||||
imports = [
|
||||
./proxy.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
];
|
||||
|
||||
options.services.jellyfin = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that jellyfin will be hosted at";
|
||||
default = "jellyfin";
|
||||
};
|
||||
extraSubdomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "ex subdomain of base domain that jellyfin will be hosted at";
|
||||
default = [];
|
||||
};
|
||||
media_directory = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "directory jellyfin media will be hosted at";
|
||||
default = "/srv/jellyfin/media";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jellyfin.enable {
|
||||
environment.systemPackages = [
|
||||
pkgs.jellyfin
|
||||
pkgs.jellyfin-web
|
||||
pkgs.jellyfin-ffmpeg
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [jellyfinPort dlanPort];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${config.services.jellyfin.media_directory} 2770 jellyfin jellyfin_media"
|
||||
"A ${config.services.jellyfin.media_directory} - - - - u:jellyfin:rwX,g:jellyfin_media:rwX,o::-"
|
||||
];
|
||||
};
|
||||
}
|
32
modules/nixos-modules/server/jellyfin/fail2ban.nix
Normal file
32
modules/nixos-modules/server/jellyfin/fail2ban.nix
Normal file
|
@ -0,0 +1,32 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.jellyfin.enable && config.services.fail2ban.enable) {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/jellyfin.local".text = (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = "^.*Authentication request for .* has been denied \\\\\\(IP: \\\"<ADDR>\\\"\\\\\\)\\\\\\."
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
jellyfin-iptables.settings = {
|
||||
enabled = true;
|
||||
filter = "jellyfin";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.jellyfin.dataDir}/log/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
66
modules/nixos-modules/server/jellyfin/impermanence.nix
Normal file
66
modules/nixos-modules/server/jellyfin/impermanence.nix
Normal file
|
@ -0,0 +1,66 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfin_data_directory = "/var/lib/jellyfin";
|
||||
jellyfin_cache_directory = "/var/cache/jellyfin";
|
||||
in {
|
||||
config = lib.mkIf (config.services.jellyfin.enable && config.host.impermanence.enable) {
|
||||
fileSystems."/persist/system/jellyfin".neededForBoot = true;
|
||||
|
||||
host.storage.pool.extraDatasets = {
|
||||
# sops age key needs to be available to pre persist for user generation
|
||||
"persist/system/jellyfin" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/jellyfin";
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.jellyfin.dataDir == jellyfin_data_directory;
|
||||
message = "jellyfin data directory does not match persistence";
|
||||
}
|
||||
{
|
||||
assertion = config.services.jellyfin.cacheDir == jellyfin_cache_directory;
|
||||
message = "jellyfin cache directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence = {
|
||||
"/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = jellyfin_data_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin";
|
||||
}
|
||||
{
|
||||
directory = jellyfin_cache_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
"/persist/system/jellyfin" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = config.services.jellyfin.media_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin_media";
|
||||
mode = "1770";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
25
modules/nixos-modules/server/jellyfin/proxy.nix
Normal file
25
modules/nixos-modules/server/jellyfin/proxy.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfinPort = 8096;
|
||||
in {
|
||||
config = lib.mkIf (config.services.jellyfin.enable && config.host.reverse_proxy.enable) {
|
||||
host.reverse_proxy.subdomains.jellyfin = {
|
||||
target = "http://localhost:${toString jellyfinPort}";
|
||||
|
||||
subdomain = config.services.jellyfin.subdomain;
|
||||
extraSubdomains = config.services.jellyfin.extraSubdomains;
|
||||
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
client_max_body_size 20M;
|
||||
add_header X-Content-Type-Options "nosniff";
|
||||
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,408 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
osConfig,
|
||||
...
|
||||
}:
|
||||
with lib; let
|
||||
# Database configuration assertions
|
||||
dbUrlConfigured = config.services.panoramax.database.url != null;
|
||||
individualDbConfigured = all (x: x != null) [
|
||||
config.services.panoramax.database.host
|
||||
config.services.panoramax.database.port
|
||||
config.services.panoramax.database.username
|
||||
config.services.panoramax.database.password
|
||||
config.services.panoramax.database.name
|
||||
];
|
||||
|
||||
envContent = ''
|
||||
# Panoramax Configuration
|
||||
FLASK_APP=geovisio
|
||||
${
|
||||
if dbUrlConfigured
|
||||
then "DB_URL=${config.services.panoramax.database.url}"
|
||||
else ''
|
||||
DB_HOST=${config.services.panoramax.database.host}
|
||||
DB_PORT=${toString config.services.panoramax.database.port}
|
||||
DB_USERNAME=${config.services.panoramax.database.username}
|
||||
DB_PASSWORD=${config.services.panoramax.database.password}
|
||||
DB_NAME=${config.services.panoramax.database.name}
|
||||
''
|
||||
}
|
||||
${optionalString (config.services.panoramax.storage.fsUrl != null) "FS_URL=${config.services.panoramax.storage.fsUrl}"}
|
||||
${optionalString (config.services.panoramax.infrastructure.nbProxies != null) "INFRA_NB_PROXIES=${toString config.services.panoramax.infrastructure.nbProxies}"}
|
||||
${optionalString (config.services.panoramax.flask.secretKey != null) "FLASK_SECRET_KEY=${config.services.panoramax.flask.secretKey}"}
|
||||
${optionalString (config.services.panoramax.flask.sessionCookieDomain != null) "FLASK_SESSION_COOKIE_DOMAIN=${config.services.panoramax.flask.sessionCookieDomain}"}
|
||||
${optionalString (config.services.panoramax.api.pictures.licenseSpdxId != null) "API_PICTURES_LICENSE_SPDX_ID=${config.services.panoramax.api.pictures.licenseSpdxId}"}
|
||||
${optionalString (config.services.panoramax.api.pictures.licenseUrl != null) "API_PICTURES_LICENSE_URL=${config.services.panoramax.api.pictures.licenseUrl}"}
|
||||
${optionalString (config.services.panoramax.port != null) "PORT=${toString config.services.panoramax.port}"}
|
||||
${optionalString (config.services.panoramax.sgblur.enable) "SGBLUR_API_URL=${config.services.panoramax.sgblur.url}"}
|
||||
${concatStringsSep "\n" (mapAttrsToList (name: value: "${name}=${value}") config.services.panoramax.extraEnvironment)}
|
||||
'';
|
||||
|
||||
envFile = pkgs.writeText "panoramax.env" envContent;
|
||||
in {
|
||||
options.services.panoramax = {
|
||||
enable = lib.mkEnableOption "panoramax";
|
||||
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = pkgs.panoramax;
|
||||
description = "The panoramax package to use";
|
||||
};
|
||||
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that panoramax will be hosted at";
|
||||
default = "panoramax";
|
||||
};
|
||||
|
||||
database = {
|
||||
createDB = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Whether to automatically create the database and user";
|
||||
};
|
||||
|
||||
url = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Complete database URL connection string (e.g., "postgresql://user:password@host:port/dbname").
|
||||
If provided, individual database options (host, port, username, password, name) are ignored.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.nullOr types.port;
|
||||
default = 5432;
|
||||
description = "Database port (ignored if database.url is set)";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "localhost";
|
||||
description = "Database host (ignored if database.url is set)";
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "panoramax";
|
||||
description = "Database username (ignored if database.url is set)";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Database password (ignored if database.url is set)";
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "panoramax";
|
||||
description = "Database name (ignored if database.url is set)";
|
||||
};
|
||||
};
|
||||
|
||||
sgblur = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to enable sgblur integration for face and license plate blurring";
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.sgblur;
|
||||
description = "The sgblur package to use";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 8080;
|
||||
description = "Port for the sgblur service";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "Host to bind the sgblur service to";
|
||||
};
|
||||
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
default = "http://127.0.0.1:8080";
|
||||
description = "URL where sgblur service is accessible";
|
||||
};
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.nullOr types.port;
|
||||
default = 5000;
|
||||
description = "Port for the Panoramax service";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "Host to bind the Panoramax service to";
|
||||
};
|
||||
|
||||
urlScheme = mkOption {
|
||||
type = types.enum ["http" "https"];
|
||||
default = "https";
|
||||
description = "URL scheme for the application";
|
||||
};
|
||||
|
||||
storage = {
|
||||
fsUrl = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "/var/lib/panoramax/storage";
|
||||
description = "File system URL for storage";
|
||||
};
|
||||
};
|
||||
|
||||
infrastructure = {
|
||||
nbProxies = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = 1;
|
||||
description = "Number of proxies in front of the application";
|
||||
};
|
||||
};
|
||||
|
||||
flask = {
|
||||
secretKey = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Flask secret key for session security";
|
||||
};
|
||||
|
||||
sessionCookieDomain = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Flask session cookie domain";
|
||||
};
|
||||
};
|
||||
|
||||
api = {
|
||||
pictures = {
|
||||
licenseSpdxId = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "SPDX license identifier for API pictures";
|
||||
};
|
||||
|
||||
licenseUrl = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "License URL for API pictures";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
extraEnvironment = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = {};
|
||||
description = "Additional environment variables";
|
||||
example = {
|
||||
CUSTOM_SETTING = "value";
|
||||
DEBUG = "true";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.panoramax.enable (
|
||||
lib.mkMerge [
|
||||
{
|
||||
environment.systemPackages = with pkgs;
|
||||
[
|
||||
config.services.panoramax.package
|
||||
python3Packages.waitress
|
||||
]
|
||||
++ optionals config.services.panoramax.sgblur.enable [
|
||||
config.services.panoramax.sgblur.package
|
||||
];
|
||||
|
||||
systemd.services.panoramax = {
|
||||
description = "Panoramax Service";
|
||||
after = ["network.target"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3Packages.waitress}/bin/waitress-serve --env-file=${envFile} --host=${config.services.panoramax.host} --port=${toString config.services.panoramax.port} --url-scheme=${config.services.panoramax.urlScheme} --call geovisio:create_app";
|
||||
Restart = "always";
|
||||
User = "panoramax";
|
||||
Group = "panoramax";
|
||||
WorkingDirectory = "/var/lib/panoramax";
|
||||
Environment = "PYTHONPATH=${config.services.panoramax.package}/lib/python3.11/site-packages";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.panoramax = {
|
||||
isSystemUser = true;
|
||||
group = "panoramax";
|
||||
home = "/var/lib/panoramax";
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
users.groups.panoramax = {};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/panoramax 0755 panoramax panoramax -"
|
||||
"d ${config.services.panoramax.storage.fsUrl} 0755 panoramax panoramax -"
|
||||
];
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = dbUrlConfigured || individualDbConfigured;
|
||||
message = ''
|
||||
Panoramax database configuration requires either:
|
||||
- A complete database URL (services.panoramax.database.url), OR
|
||||
- All individual database options (host, port, username, password, name)
|
||||
|
||||
Currently configured:
|
||||
- database.url: ${
|
||||
if dbUrlConfigured
|
||||
then "✓ configured"
|
||||
else "✗ not configured"
|
||||
}
|
||||
- individual options: ${
|
||||
if individualDbConfigured
|
||||
then "✓ all configured"
|
||||
else "✗ some missing"
|
||||
}
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = !config.services.panoramax.database.createDB || config.services.panoramax.database.url == null || (lib.hasPrefix "/run/" config.services.panoramax.database.url || lib.hasPrefix "unix:" config.services.panoramax.database.url || lib.hasPrefix "/" config.services.panoramax.database.host);
|
||||
message = ''
|
||||
Panoramax createDB option can only be used with socket connections when a database URL is provided.
|
||||
Socket connections are identified by:
|
||||
- URLs starting with "unix:"
|
||||
- URLs starting with "/run/"
|
||||
- Host paths starting with "/"
|
||||
|
||||
Current configuration:
|
||||
- createDB: ${lib.boolToString config.services.panoramax.database.createDB}
|
||||
- database.url: ${
|
||||
if config.services.panoramax.database.url != null
|
||||
then config.services.panoramax.database.url
|
||||
else "not set"
|
||||
}
|
||||
- database.host: ${config.services.panoramax.database.host}
|
||||
'';
|
||||
}
|
||||
];
|
||||
}
|
||||
(
|
||||
lib.mkIf config.services.panoramax.sgblur.enable {
|
||||
systemd.services.sgblur = {
|
||||
description = "SGBlur AI-powered face and license plate blurring service";
|
||||
after = ["network.target"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
serviceConfig = {
|
||||
ExecStart = "${config.services.panoramax.sgblur.package}/bin/uvicorn sgblur.main:app --host ${config.services.panoramax.sgblur.host} --port ${toString config.services.panoramax.sgblur.port}";
|
||||
Restart = "always";
|
||||
User = "sgblur";
|
||||
Group = "sgblur";
|
||||
WorkingDirectory = "/var/lib/sgblur";
|
||||
Environment = "PYTHONPATH=${config.services.panoramax.sgblur.package}/lib/python3.11/site-packages";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.sgblur = {
|
||||
isSystemUser = true;
|
||||
group = "sgblur";
|
||||
home = "/var/lib/sgblur";
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
users.groups.sgblur = {};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/sgblur 0755 sgblur sgblur -"
|
||||
];
|
||||
|
||||
# Update panoramax service dependencies when sgblur is enabled
|
||||
systemd.services.panoramax = {
|
||||
after = ["sgblur.service"];
|
||||
wants = ["sgblur.service"];
|
||||
};
|
||||
}
|
||||
)
|
||||
(
|
||||
lib.mkIf config.services.panoramax.database.createDB {
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [config.services.panoramax.database.name];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = config.services.panoramax.database.username;
|
||||
ensureDBOwnership = true;
|
||||
ensureClauses.login = true;
|
||||
}
|
||||
];
|
||||
extensions = ps: with ps; [postgis];
|
||||
settings = {
|
||||
shared_preload_libraries = ["postgis"];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.postgresql.serviceConfig.ExecStartPost = let
|
||||
sqlFile = pkgs.writeText "panoramax-postgis-setup.sql" ''
|
||||
CREATE EXTENSION IF NOT EXISTS postgis;
|
||||
CREATE EXTENSION IF NOT EXISTS postgis_topology;
|
||||
CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;
|
||||
CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;
|
||||
|
||||
ALTER SCHEMA public OWNER TO ${config.services.panoramax.database.username};
|
||||
GRANT ALL ON SCHEMA public TO ${config.services.panoramax.database.username};
|
||||
'';
|
||||
in [
|
||||
''
|
||||
${lib.getExe' config.services.postgresql.package "psql"} -d "${config.services.panoramax.database.name}" -f "${sqlFile}"
|
||||
''
|
||||
];
|
||||
|
||||
systemd.services.panoramax = {
|
||||
after = ["postgresql.service"];
|
||||
requires = ["postgresql.service"];
|
||||
};
|
||||
}
|
||||
)
|
||||
(
|
||||
lib.mkIf config.host.reverse_proxy.enable {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.panoramax.subdomain} = {
|
||||
target = "http://localhost:${toString config.services.panoramax.port}";
|
||||
|
||||
websockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
# allow large file uploads for panoramic images
|
||||
client_max_body_size 100M;
|
||||
|
||||
# set timeout for image processing
|
||||
proxy_read_timeout 300s;
|
||||
proxy_send_timeout 300s;
|
||||
send_timeout 300s;
|
||||
proxy_redirect off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
(
|
||||
lib.mkIf config.services.fail2ban {
|
||||
# TODO: configure options for fail2ban
|
||||
}
|
||||
)
|
||||
(
|
||||
lib.mkIf osConfig.host.impermanence.enable {
|
||||
# TODO: configure impermanence for panoramax data
|
||||
}
|
||||
)
|
||||
]
|
||||
);
|
||||
}
|
340
modules/nixos-modules/server/panoramax/default.nix
Normal file
340
modules/nixos-modules/server/panoramax/default.nix
Normal file
|
@ -0,0 +1,340 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib; let
|
||||
# Database configuration assertions
|
||||
dbUrlConfigured = config.services.panoramax.database.url != null;
|
||||
individualDbConfigured = all (x: x != null) [
|
||||
config.services.panoramax.database.host
|
||||
config.services.panoramax.database.port
|
||||
config.services.panoramax.database.username
|
||||
config.services.panoramax.database.password
|
||||
config.services.panoramax.database.name
|
||||
];
|
||||
|
||||
envContent = ''
|
||||
# Panoramax Configuration
|
||||
FLASK_APP=geovisio
|
||||
${
|
||||
if dbUrlConfigured
|
||||
then "DB_URL=${config.services.panoramax.database.url}"
|
||||
else ''
|
||||
DB_HOST=${config.services.panoramax.database.host}
|
||||
DB_PORT=${toString config.services.panoramax.database.port}
|
||||
DB_USERNAME=${config.services.panoramax.database.username}
|
||||
DB_PASSWORD=${config.services.panoramax.database.password}
|
||||
DB_NAME=${config.services.panoramax.database.name}
|
||||
''
|
||||
}
|
||||
${optionalString (config.services.panoramax.storage.fsUrl != null) "FS_URL=${config.services.panoramax.storage.fsUrl}"}
|
||||
${optionalString (config.services.panoramax.infrastructure.nbProxies != null) "INFRA_NB_PROXIES=${toString config.services.panoramax.infrastructure.nbProxies}"}
|
||||
${optionalString (config.services.panoramax.flask.secretKey != null) "FLASK_SECRET_KEY=${config.services.panoramax.flask.secretKey}"}
|
||||
${optionalString (config.services.panoramax.flask.sessionCookieDomain != null) "FLASK_SESSION_COOKIE_DOMAIN=${config.services.panoramax.flask.sessionCookieDomain}"}
|
||||
${optionalString (config.services.panoramax.api.pictures.licenseSpdxId != null) "API_PICTURES_LICENSE_SPDX_ID=${config.services.panoramax.api.pictures.licenseSpdxId}"}
|
||||
${optionalString (config.services.panoramax.api.pictures.licenseUrl != null) "API_PICTURES_LICENSE_URL=${config.services.panoramax.api.pictures.licenseUrl}"}
|
||||
${optionalString (config.services.panoramax.port != null) "PORT=${toString config.services.panoramax.port}"}
|
||||
${optionalString (config.services.panoramax.sgblur.enable) "SGBLUR_API_URL=${config.services.panoramax.sgblur.url}"}
|
||||
${concatStringsSep "\n" (mapAttrsToList (name: value: "${name}=${value}") config.services.panoramax.extraEnvironment)}
|
||||
'';
|
||||
|
||||
envFile = pkgs.writeText "panoramax.env" envContent;
|
||||
in {
|
||||
imports = [
|
||||
./proxy.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
];
|
||||
|
||||
options.services.panoramax = {
|
||||
enable = lib.mkEnableOption "panoramax";
|
||||
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = pkgs.panoramax;
|
||||
description = "The panoramax package to use";
|
||||
};
|
||||
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that panoramax will be hosted at";
|
||||
default = "panoramax";
|
||||
};
|
||||
|
||||
database = {
|
||||
createDB = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Whether to automatically create the database and user";
|
||||
};
|
||||
|
||||
url = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Complete database URL connection string (e.g., "postgresql://user:password@host:port/dbname").
|
||||
If provided, individual database options (host, port, username, password, name) are ignored.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.nullOr types.port;
|
||||
default = 5432;
|
||||
description = "Database port (ignored if database.url is set)";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "localhost";
|
||||
description = "Database host (ignored if database.url is set)";
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "panoramax";
|
||||
description = "Database username (ignored if database.url is set)";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Database password (ignored if database.url is set)";
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "panoramax";
|
||||
description = "Database name (ignored if database.url is set)";
|
||||
};
|
||||
};
|
||||
|
||||
sgblur = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to enable sgblur integration for face and license plate blurring";
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.sgblur;
|
||||
description = "The sgblur package to use";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 8080;
|
||||
description = "Port for the sgblur service";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "Host to bind the sgblur service to";
|
||||
};
|
||||
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
default = "http://127.0.0.1:8080";
|
||||
description = "URL where sgblur service is accessible";
|
||||
};
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.nullOr types.port;
|
||||
default = 5000;
|
||||
description = "Port for the Panoramax service";
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "Host to bind the Panoramax service to";
|
||||
};
|
||||
|
||||
urlScheme = mkOption {
|
||||
type = types.enum ["http" "https"];
|
||||
default = "https";
|
||||
description = "URL scheme for the application";
|
||||
};
|
||||
|
||||
storage = {
|
||||
fsUrl = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "/var/lib/panoramax/storage";
|
||||
description = "File system URL for storage";
|
||||
};
|
||||
};
|
||||
|
||||
infrastructure = {
|
||||
nbProxies = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = 1;
|
||||
description = "Number of proxies in front of the application";
|
||||
};
|
||||
};
|
||||
|
||||
flask = {
|
||||
secretKey = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Flask secret key for session security";
|
||||
};
|
||||
|
||||
sessionCookieDomain = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Flask session cookie domain";
|
||||
};
|
||||
};
|
||||
|
||||
api = {
|
||||
pictures = {
|
||||
licenseSpdxId = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "SPDX license identifier for API pictures";
|
||||
};
|
||||
|
||||
licenseUrl = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "License URL for API pictures";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
extraEnvironment = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = {};
|
||||
description = "Additional environment variables";
|
||||
example = {
|
||||
CUSTOM_SETTING = "value";
|
||||
DEBUG = "true";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.panoramax.enable (lib.mkMerge [
|
||||
{
|
||||
environment.systemPackages = with pkgs;
|
||||
[
|
||||
config.services.panoramax.package
|
||||
python3Packages.waitress
|
||||
]
|
||||
++ optionals config.services.panoramax.sgblur.enable [
|
||||
config.services.panoramax.sgblur.package
|
||||
];
|
||||
|
||||
systemd.services.panoramax = {
|
||||
description = "Panoramax Service";
|
||||
after = ["network.target"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3Packages.waitress}/bin/waitress-serve --env-file=${envFile} --host=${config.services.panoramax.host} --port=${toString config.services.panoramax.port} --url-scheme=${config.services.panoramax.urlScheme} --call geovisio:create_app";
|
||||
Restart = "always";
|
||||
User = "panoramax";
|
||||
Group = "panoramax";
|
||||
WorkingDirectory = "/var/lib/panoramax";
|
||||
Environment = "PYTHONPATH=${config.services.panoramax.package}/lib/python3.11/site-packages";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.panoramax = {
|
||||
isSystemUser = true;
|
||||
group = "panoramax";
|
||||
home = "/var/lib/panoramax";
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
users.groups.panoramax = {};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/panoramax 0755 panoramax panoramax -"
|
||||
"d ${config.services.panoramax.storage.fsUrl} 0755 panoramax panoramax -"
|
||||
];
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = dbUrlConfigured || individualDbConfigured;
|
||||
message = ''
|
||||
Panoramax database configuration requires either:
|
||||
- A complete database URL (services.panoramax.database.url), OR
|
||||
- All individual database options (host, port, username, password, name)
|
||||
|
||||
Currently configured:
|
||||
- database.url: ${
|
||||
if dbUrlConfigured
|
||||
then "✓ configured"
|
||||
else "✗ not configured"
|
||||
}
|
||||
- individual options: ${
|
||||
if individualDbConfigured
|
||||
then "✓ all configured"
|
||||
else "✗ some missing"
|
||||
}
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = !config.services.panoramax.database.createDB || config.services.panoramax.database.url == null || (lib.hasPrefix "/run/" config.services.panoramax.database.url || lib.hasPrefix "unix:" config.services.panoramax.database.url || lib.hasPrefix "/" config.services.panoramax.database.host);
|
||||
message = ''
|
||||
Panoramax createDB option can only be used with socket connections when a database URL is provided.
|
||||
Socket connections are identified by:
|
||||
- URLs starting with "unix:"
|
||||
- URLs starting with "/run/"
|
||||
- Host paths starting with "/"
|
||||
|
||||
Current configuration:
|
||||
- createDB: ${lib.boolToString config.services.panoramax.database.createDB}
|
||||
- database.url: ${
|
||||
if config.services.panoramax.database.url != null
|
||||
then config.services.panoramax.database.url
|
||||
else "not set"
|
||||
}
|
||||
- database.host: ${config.services.panoramax.database.host}
|
||||
'';
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf config.services.panoramax.database.createDB {
|
||||
systemd.services.panoramax = {
|
||||
after = ["postgresql.service"];
|
||||
requires = ["postgresql.service"];
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [config.services.panoramax.database.name];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = config.services.panoramax.database.username;
|
||||
ensureDBOwnership = true;
|
||||
ensureClauses.login = true;
|
||||
}
|
||||
];
|
||||
extensions = ps: with ps; [postgis];
|
||||
settings = {
|
||||
shared_preload_libraries = ["postgis"];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.postgresql.serviceConfig.ExecStartPost = let
|
||||
sqlFile = pkgs.writeText "panoramax-postgis-setup.sql" ''
|
||||
CREATE EXTENSION IF NOT EXISTS postgis;
|
||||
CREATE EXTENSION IF NOT EXISTS postgis_topology;
|
||||
CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;
|
||||
CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;
|
||||
|
||||
ALTER SCHEMA public OWNER TO ${config.services.panoramax.database.username};
|
||||
GRANT ALL ON SCHEMA public TO ${config.services.panoramax.database.username};
|
||||
'';
|
||||
in [
|
||||
''
|
||||
${lib.getExe' config.services.postgresql.package "psql"} -d "${config.services.panoramax.database.name}" -f "${sqlFile}"
|
||||
''
|
||||
];
|
||||
})
|
||||
]);
|
||||
}
|
11
modules/nixos-modules/server/panoramax/fail2ban.nix
Normal file
11
modules/nixos-modules/server/panoramax/fail2ban.nix
Normal file
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.panoramax.enable && config.services.fail2ban.enable) {
|
||||
# TODO: configure options for fail2ban
|
||||
# This is a placeholder - panoramax fail2ban configuration would need to be defined
|
||||
# based on the specific log patterns and security requirements
|
||||
};
|
||||
}
|
14
modules/nixos-modules/server/panoramax/impermanence.nix
Normal file
14
modules/nixos-modules/server/panoramax/impermanence.nix
Normal file
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
osConfig,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.panoramax.enable && osConfig.host.impermanence.enable) {
|
||||
# TODO: configure impermanence for panoramax data
|
||||
# This would typically include directories like:
|
||||
# - /var/lib/panoramax
|
||||
# - panoramax storage directories
|
||||
# - any cache or temporary directories that need to persist
|
||||
};
|
||||
}
|
27
modules/nixos-modules/server/panoramax/proxy.nix
Normal file
27
modules/nixos-modules/server/panoramax/proxy.nix
Normal file
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.panoramax.enable && config.host.reverse_proxy.enable) {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.panoramax.subdomain} = {
|
||||
target = "http://localhost:${toString config.services.panoramax.port}";
|
||||
|
||||
websockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
# allow large file uploads for panoramic images
|
||||
client_max_body_size 100M;
|
||||
|
||||
# set timeout for image processing
|
||||
proxy_read_timeout 300s;
|
||||
proxy_send_timeout 300s;
|
||||
send_timeout 300s;
|
||||
proxy_redirect off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/paperless";
|
||||
in {
|
||||
options.services.paperless = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that paperless will be hosted at";
|
||||
default = "paperless";
|
||||
};
|
||||
database = {
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what is the user and database that we are going to use for paperless";
|
||||
default = "paperless";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.paperless.enable (lib.mkMerge [
|
||||
{
|
||||
host = {
|
||||
postgres = {
|
||||
enable = true;
|
||||
extraUsers = {
|
||||
${config.services.paperless.database.user} = {
|
||||
isClient = true;
|
||||
createUser = true;
|
||||
};
|
||||
};
|
||||
extraDatabases = {
|
||||
${config.services.paperless.database.user} = {
|
||||
name = config.services.paperless.database.user;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
services.paperless = {
|
||||
domain = "${config.services.paperless.subdomain}.${config.host.reverse_proxy.hostname}";
|
||||
configureTika = true;
|
||||
settings = {
|
||||
PAPERLESS_DBENGINE = "postgresql";
|
||||
PAPERLESS_DBHOST = "/run/postgresql";
|
||||
PAPERLESS_DBNAME = config.services.paperless.database.user;
|
||||
PAPERLESS_DBUSER = config.services.paperless.database.user;
|
||||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.host.reverse_proxy.enable {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.paperless.subdomain} = {
|
||||
target = "http://${config.services.paperless.address}:${toString config.services.paperless.port}";
|
||||
|
||||
websockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
# allow large file uploads
|
||||
client_max_body_size 50000M;
|
||||
'';
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.services.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/paperless.local".text = (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = Login failed for user `.*` from (?:IP|private IP) `<HOST>`\.$
|
||||
ignoreregex =
|
||||
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
paperless.settings = {
|
||||
enabled = true;
|
||||
filter = "paperless";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.paperless.dataDir}/log/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.paperless.dataDir == dataDir;
|
||||
message = "paperless data location does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = dataDir;
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
]);
|
||||
}
|
34
modules/nixos-modules/server/paperless/database.nix
Normal file
34
modules/nixos-modules/server/paperless/database.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf config.services.paperless.enable (lib.mkMerge [
|
||||
{
|
||||
host = {
|
||||
postgres = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
(
|
||||
lib.mkIf config.host.postgres.enable {
|
||||
host = {
|
||||
postgres = {
|
||||
extraUsers = {
|
||||
${config.services.paperless.database.user} = {
|
||||
isClient = true;
|
||||
createUser = true;
|
||||
};
|
||||
};
|
||||
extraDatabases = {
|
||||
${config.services.paperless.database.user} = {
|
||||
name = config.services.paperless.database.user;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
]);
|
||||
}
|
40
modules/nixos-modules/server/paperless/default.nix
Normal file
40
modules/nixos-modules/server/paperless/default.nix
Normal file
|
@ -0,0 +1,40 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
./proxy.nix
|
||||
./database.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
];
|
||||
|
||||
options.services.paperless = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that paperless will be hosted at";
|
||||
default = "paperless";
|
||||
};
|
||||
database = {
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what is the user and database that we are going to use for paperless";
|
||||
default = "paperless";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.paperless.enable {
|
||||
services.paperless = {
|
||||
domain = "${config.services.paperless.subdomain}.${config.host.reverse_proxy.hostname}";
|
||||
configureTika = true;
|
||||
settings = {
|
||||
PAPERLESS_DBENGINE = "postgresql";
|
||||
PAPERLESS_DBHOST = "/run/postgresql";
|
||||
PAPERLESS_DBNAME = config.services.paperless.database.user;
|
||||
PAPERLESS_DBUSER = config.services.paperless.database.user;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
34
modules/nixos-modules/server/paperless/fail2ban.nix
Normal file
34
modules/nixos-modules/server/paperless/fail2ban.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.paperless.enable && config.services.fail2ban.enable) {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/paperless.local".text = (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = Login failed for user `.*` from (?:IP|private IP) `<HOST>`\.$
|
||||
ignoreregex =
|
||||
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
paperless.settings = {
|
||||
enabled = true;
|
||||
filter = "paperless";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.paperless.dataDir}/log/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
25
modules/nixos-modules/server/paperless/impermanence.nix
Normal file
25
modules/nixos-modules/server/paperless/impermanence.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/paperless";
|
||||
in {
|
||||
config = lib.mkIf (config.services.paperless.enable && config.host.impermanence.enable) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.paperless.dataDir == dataDir;
|
||||
message = "paperless data location does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = dataDir;
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
21
modules/nixos-modules/server/paperless/proxy.nix
Normal file
21
modules/nixos-modules/server/paperless/proxy.nix
Normal file
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.paperless.enable && config.host.reverse_proxy.enable) {
|
||||
host = {
|
||||
reverse_proxy.subdomains.${config.services.paperless.subdomain} = {
|
||||
target = "http://${config.services.paperless.address}:${toString config.services.paperless.port}";
|
||||
|
||||
websockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
extraConfig = ''
|
||||
# allow large file uploads
|
||||
client_max_body_size 50000M;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}: {
|
||||
options.services.searx = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that searx will be hosted at";
|
||||
default = "searx";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.searx.enable (
|
||||
lib.mkMerge [
|
||||
{
|
||||
sops.secrets = {
|
||||
"services/searx" = {
|
||||
sopsFile = "${inputs.secrets}/defiant-services.yaml";
|
||||
};
|
||||
};
|
||||
services.searx = {
|
||||
environmentFile = config.sops.secrets."services/searx".path;
|
||||
|
||||
# Rate limiting
|
||||
limiterSettings = {
|
||||
real_ip = {
|
||||
x_for = 1;
|
||||
ipv4_prefix = 32;
|
||||
ipv6_prefix = 56;
|
||||
};
|
||||
|
||||
botdetection = {
|
||||
ip_limit = {
|
||||
filter_link_local = true;
|
||||
link_token = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
port = 8083;
|
||||
secret_key = "@SEARXNG_SECRET@";
|
||||
};
|
||||
|
||||
# Search engine settings
|
||||
search = {
|
||||
safe_search = 2;
|
||||
autocomplete_min = 2;
|
||||
autocomplete = "duckduckgo";
|
||||
};
|
||||
|
||||
# Enabled plugins
|
||||
enabled_plugins = [
|
||||
"Basic Calculator"
|
||||
"Hash plugin"
|
||||
"Tor check plugin"
|
||||
"Open Access DOI rewrite"
|
||||
"Hostnames plugin"
|
||||
"Unit converter plugin"
|
||||
"Tracker URL remover"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.host.reverse_proxy.enable {
|
||||
host = {
|
||||
reverse_proxy.subdomains.searx = {
|
||||
subdomain = config.services.searx.subdomain;
|
||||
target = "http://localhost:${toString config.services.searx.settings.server.port}";
|
||||
};
|
||||
};
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
71
modules/nixos-modules/server/searx/default.nix
Normal file
71
modules/nixos-modules/server/searx/default.nix
Normal file
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
./proxy.nix
|
||||
];
|
||||
|
||||
options.services.searx = {
|
||||
subdomain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "subdomain of base domain that searx will be hosted at";
|
||||
default = "searx";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.searx.enable {
|
||||
sops.secrets = {
|
||||
"services/searx" = {
|
||||
sopsFile = "${inputs.secrets}/defiant-services.yaml";
|
||||
};
|
||||
};
|
||||
|
||||
services.searx = {
|
||||
environmentFile = config.sops.secrets."services/searx".path;
|
||||
|
||||
# Rate limiting
|
||||
limiterSettings = {
|
||||
real_ip = {
|
||||
x_for = 1;
|
||||
ipv4_prefix = 32;
|
||||
ipv6_prefix = 56;
|
||||
};
|
||||
|
||||
botdetection = {
|
||||
ip_limit = {
|
||||
filter_link_local = true;
|
||||
link_token = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
port = 8083;
|
||||
secret_key = "@SEARXNG_SECRET@";
|
||||
};
|
||||
|
||||
# Search engine settings
|
||||
search = {
|
||||
safe_search = 2;
|
||||
autocomplete_min = 2;
|
||||
autocomplete = "duckduckgo";
|
||||
};
|
||||
|
||||
# Enabled plugins
|
||||
enabled_plugins = [
|
||||
"Basic Calculator"
|
||||
"Hash plugin"
|
||||
"Tor check plugin"
|
||||
"Open Access DOI rewrite"
|
||||
"Hostnames plugin"
|
||||
"Unit converter plugin"
|
||||
"Tracker URL remover"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
14
modules/nixos-modules/server/searx/proxy.nix
Normal file
14
modules/nixos-modules/server/searx/proxy.nix
Normal file
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.searx.enable && config.host.reverse_proxy.enable) {
|
||||
host = {
|
||||
reverse_proxy.subdomains.searx = {
|
||||
subdomain = config.services.searx.subdomain;
|
||||
target = "http://localhost:${toString config.services.searx.settings.server.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue