refactor: moved nixos modules to dendrite pattern
This commit is contained in:
parent
df8dd110ad
commit
0ea11e0236
219 changed files with 4802 additions and 4820 deletions
86
modules/nixos/desktop.nix
Normal file
86
modules/nixos/desktop.nix
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
{...}: {
|
||||
flake.nixosModules.nixos-desktop = {
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.host.desktop.enable = lib.mkEnableOption "should desktop configuration be enabled";
|
||||
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
host.desktop.enable = lib.mkDefault true;
|
||||
}
|
||||
(lib.mkIf config.host.desktop.enable {
|
||||
environment.gnome.excludePackages = with pkgs; [
|
||||
xterm # default terminal
|
||||
atomix # puzzle game
|
||||
cheese # webcam tool
|
||||
epiphany # web browser
|
||||
geary # email reader
|
||||
gedit # text editor
|
||||
decibels # audio player
|
||||
gnome-characters # character set viewer
|
||||
gnome-music # music player
|
||||
gnome-photos # photo viewer
|
||||
gnome-logs # log viewer
|
||||
gnome-maps # map viewer
|
||||
gnome-tour # welcome tour
|
||||
hitori # sudoku game
|
||||
iagno # go game
|
||||
tali # poker game
|
||||
yelp # help viewer
|
||||
];
|
||||
services = {
|
||||
# Enable CUPS to print documents.
|
||||
printing = {
|
||||
enable = true;
|
||||
drivers = [
|
||||
pkgs.hplip
|
||||
pkgs.gutenprint
|
||||
pkgs.gutenprintBin
|
||||
];
|
||||
};
|
||||
|
||||
xserver = {
|
||||
# Enable the X11 windowing system.
|
||||
enable = true;
|
||||
|
||||
# Get rid of xTerm
|
||||
desktopManager.xterm.enable = false;
|
||||
excludePackages = with pkgs; [
|
||||
xterm
|
||||
];
|
||||
};
|
||||
|
||||
# Enable the GNOME Desktop Environment.
|
||||
displayManager.gdm.enable = true;
|
||||
desktopManager.gnome.enable = true;
|
||||
|
||||
pipewire = {
|
||||
enable = true;
|
||||
alsa.enable = true;
|
||||
alsa.support32Bit = true;
|
||||
pulse.enable = true;
|
||||
|
||||
# If you want to use JACK applications, uncomment this
|
||||
#jack.enable = true;
|
||||
|
||||
# use the example session manager (no others are packaged yet so this is enabled by default,
|
||||
# no need to redefine it in your config for now)
|
||||
#media-session.enable = true;
|
||||
};
|
||||
automatic-timezoned = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
# Enable sound with pipewire.
|
||||
pulseaudio.enable = false;
|
||||
};
|
||||
|
||||
# enable RealtimeKit for pulse audio
|
||||
security.rtkit.enable = true;
|
||||
})
|
||||
];
|
||||
};
|
||||
}
|
||||
36
modules/nixos/hardware.nix
Normal file
36
modules/nixos/hardware.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{...}: {
|
||||
flake.nixosModules.nixos-hardware = {
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
options.host.hardware = {
|
||||
piperMouse = {
|
||||
enable = lib.mkEnableOption "host has a piper mouse";
|
||||
};
|
||||
viaKeyboard = {
|
||||
enable = lib.mkEnableOption "host has a via keyboard";
|
||||
};
|
||||
openRGB = {
|
||||
enable = lib.mkEnableOption "host has open rgb hardware";
|
||||
};
|
||||
graphicsAcceleration = {
|
||||
enable = lib.mkEnableOption "host has a gpu for graphical acceleration";
|
||||
};
|
||||
directAccess = {
|
||||
enable = lib.mkEnableOption "can a host be used on its own";
|
||||
};
|
||||
};
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf config.host.hardware.piperMouse.enable {
|
||||
services.ratbagd.enable = true;
|
||||
})
|
||||
(lib.mkIf config.host.hardware.viaKeyboard.enable {
|
||||
hardware.keyboard.qmk.enable = true;
|
||||
|
||||
services.udev.packages = [pkgs.via];
|
||||
})
|
||||
];
|
||||
};
|
||||
}
|
||||
13
modules/nixos/home-manager-adaptors/default.nix
Normal file
13
modules/nixos/home-manager-adaptors/default.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
# modules in this folder are to adapt home-manager modules configs to nixos-module configs
|
||||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.home-manager-adaptors = {
|
||||
imports = [
|
||||
mod.home-manager-flipperzero
|
||||
mod.home-manager-i18n
|
||||
mod.home-manager-openssh
|
||||
mod.home-manager-steam
|
||||
];
|
||||
};
|
||||
}
|
||||
11
modules/nixos/home-manager-adaptors/flipperzero.nix
Normal file
11
modules/nixos/home-manager-adaptors/flipperzero.nix
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-manager-flipperzero = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
home-users = lib.attrsets.mapAttrsToList (_: user: user) config.home-manager.users;
|
||||
in {
|
||||
hardware.flipperzero.enable = lib.lists.any (home-user: home-user.hardware.flipperzero.enable) home-users;
|
||||
};
|
||||
}
|
||||
28
modules/nixos/home-manager-adaptors/i18n.nix
Normal file
28
modules/nixos/home-manager-adaptors/i18n.nix
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-manager-i18n = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
home-users = lib.attrsets.mapAttrsToList (_: user: user) config.home-manager.users;
|
||||
in {
|
||||
config = {
|
||||
i18n.supportedLocales =
|
||||
lib.unique
|
||||
(builtins.map (l: (lib.replaceStrings ["utf8" "utf-8" "UTF8"] ["UTF-8" "UTF-8" "UTF-8"] l) + "/UTF-8") (
|
||||
[
|
||||
"C.UTF-8"
|
||||
"en_US.UTF-8"
|
||||
config.i18n.defaultLocale
|
||||
]
|
||||
++ (lib.attrValues (lib.filterAttrs (n: v: n != "LANGUAGE") config.i18n.extraLocaleSettings))
|
||||
++ (
|
||||
map (user-config: user-config.i18n.defaultLocale) home-users
|
||||
)
|
||||
++ (lib.lists.flatten (
|
||||
map (user-config: lib.attrValues (lib.filterAttrs (n: v: n != "LANGUAGE") user-config.i18n.extraLocaleSettings)) home-users
|
||||
))
|
||||
));
|
||||
};
|
||||
};
|
||||
}
|
||||
13
modules/nixos/home-manager-adaptors/openssh.nix
Normal file
13
modules/nixos/home-manager-adaptors/openssh.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-manager-openssh = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
users.users =
|
||||
lib.attrsets.mapAttrs (name: value: {
|
||||
openssh.authorizedKeys.keys = value.programs.openssh.authorizedKeys;
|
||||
})
|
||||
config.home-manager.users;
|
||||
};
|
||||
}
|
||||
20
modules/nixos/home-manager-adaptors/steam.nix
Normal file
20
modules/nixos/home-manager-adaptors/steam.nix
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-manager-steam = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
setupSteam =
|
||||
lib.lists.any
|
||||
(value: value)
|
||||
(lib.attrsets.mapAttrsToList (name: value: value.programs.steam.enable) config.home-manager.users);
|
||||
in {
|
||||
config = lib.mkIf setupSteam {
|
||||
programs.steam = {
|
||||
enable = true;
|
||||
# TODO: figure out how to not install steam here
|
||||
# package = lib.mkDefault pkgs.emptyFile;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
5
modules/nixos/i18n.nix
Normal file
5
modules/nixos/i18n.nix
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
{...}: {
|
||||
flake.nixosModules.nixos-i18n = {...}: {
|
||||
i18n.defaultLocale = "en_IE.UTF-8";
|
||||
};
|
||||
}
|
||||
21
modules/nixos/nixos-modules.nix
Normal file
21
modules/nixos/nixos-modules.nix
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.nixos-modules-all = {
|
||||
imports = [
|
||||
mod.nixos-system
|
||||
mod.nixos-hardware
|
||||
mod.nixos-users
|
||||
mod.nixos-desktop
|
||||
mod.nixos-ssh
|
||||
mod.nixos-i18n
|
||||
mod.storage
|
||||
mod.home-manager-adaptors
|
||||
mod.programs
|
||||
];
|
||||
|
||||
nixpkgs.config.permittedInsecurePackages = [
|
||||
"dotnet-sdk-6.0.428"
|
||||
];
|
||||
};
|
||||
}
|
||||
25
modules/nixos/programs/actual/actual.nix
Normal file
25
modules/nixos/programs/actual/actual.nix
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
{...}: {
|
||||
flake.nixosModules.actual-service = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataDirectory = "/var/lib/private/actual";
|
||||
in {
|
||||
options.services.actual = {
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
description = "The port to listen on";
|
||||
default = 5006;
|
||||
};
|
||||
};
|
||||
config = lib.mkIf config.services.actual.enable {
|
||||
services.actual = {
|
||||
settings = {
|
||||
port = config.services.actual.port;
|
||||
dataDir = dataDirectory;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
12
modules/nixos/programs/actual/default.nix
Normal file
12
modules/nixos/programs/actual/default.nix
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.actual = {
|
||||
imports = [
|
||||
mod.actual-service
|
||||
mod.actual-proxy
|
||||
mod.actual-fail2ban
|
||||
mod.actual-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
11
modules/nixos/programs/actual/fail2ban.nix
Normal file
11
modules/nixos/programs/actual/fail2ban.nix
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
{...}: {
|
||||
flake.nixosModules.actual-fail2ban = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.actual.enable && config.services.fail2ban.enable) {
|
||||
# TODO: configuration for fail2ban for actual
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos/programs/actual/proxy.nix
Normal file
36
modules/nixos/programs/actual/proxy.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{...}: {
|
||||
flake.nixosModules.actual-proxy = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.actual = {
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "domain that actual will be hosted at";
|
||||
default = "actual.arpa";
|
||||
};
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for actual";
|
||||
default = [];
|
||||
};
|
||||
reverseProxy.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.actual.enable && config.services.reverseProxy.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.actual.reverseProxy.enable {
|
||||
services.reverseProxy.services.actual = {
|
||||
target = "http://localhost:${toString config.services.actual.settings.port}";
|
||||
domain = config.services.actual.domain;
|
||||
extraDomains = config.services.actual.extraDomains;
|
||||
|
||||
settings = {
|
||||
forwardHeaders.enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/actual/storage.nix
Normal file
23
modules/nixos/programs/actual/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.actual-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataDirectory = "/var/lib/private/actual";
|
||||
in {
|
||||
options.services.actual.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.actual.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.actual.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${dataDirectory}" = lib.mkIf config.services.actual.impermanence.enable {
|
||||
owner.name = "actual";
|
||||
group.name = "actual";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
9
modules/nixos/programs/bazarr/default.nix
Normal file
9
modules/nixos/programs/bazarr/default.nix
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.bazarr = {
|
||||
imports = [
|
||||
mod.bazarr-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/bazarr/storage.nix
Normal file
23
modules/nixos/programs/bazarr/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.bazarr-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
bazarr_data_directory = "/var/lib/bazarr";
|
||||
in {
|
||||
options.services.bazarr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.bazarr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.bazarr.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${bazarr_data_directory}" = lib.mkIf config.services.bazarr.impermanence.enable {
|
||||
owner.name = "bazarr";
|
||||
group.name = "bazarr";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
195
modules/nixos/programs/crab-hole/crab-hole.nix
Normal file
195
modules/nixos/programs/crab-hole/crab-hole.nix
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
{...}: {
|
||||
flake.nixosModules.crab-hole-service = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
cfg = config.services.crab-hole;
|
||||
in {
|
||||
options.services.crab-hole = {
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 8080;
|
||||
description = "Port for the crab-hole API to listen on.";
|
||||
};
|
||||
|
||||
openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Whether to open the firewall for the crab-hole API port.";
|
||||
};
|
||||
|
||||
listen = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "0.0.0.0";
|
||||
description = "Address for the crab-hole API to listen on.";
|
||||
};
|
||||
|
||||
show_doc = lib.mkEnableOption "OpenAPI documentation (loads content from third party websites)";
|
||||
|
||||
downstreams = {
|
||||
host = {
|
||||
enable = lib.mkEnableOption "host downstream DNS server accessible from network on all interfaces";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 53;
|
||||
description = "Port for the host downstream DNS server to listen on.";
|
||||
};
|
||||
openFirewall = lib.mkEnableOption "automatic port forwarding for the host downstream";
|
||||
disableSystemdResolved = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Whether to automatically disable systemd-resolved when using port 53. Set to false if you want to handle the conflict manually.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
extraDownstreams = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.submodule {
|
||||
options = {
|
||||
protocol = lib.mkOption {
|
||||
type = lib.types.enum ["udp" "tcp" "tls" "https" "quic"];
|
||||
description = "Protocol for the downstream server.";
|
||||
};
|
||||
|
||||
listen = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Address to listen on for downstream connections.";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
description = "Port to listen on for downstream connections.";
|
||||
};
|
||||
};
|
||||
});
|
||||
default = [];
|
||||
description = "List of additional downstream DNS server configurations.";
|
||||
};
|
||||
|
||||
upstreams = {
|
||||
cloudFlare = {
|
||||
enable = lib.mkEnableOption "Cloudflare DNS over TLS upstream servers (1.1.1.1 and 1.0.0.1)";
|
||||
};
|
||||
};
|
||||
|
||||
extraUpstreams = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.submodule {
|
||||
options = {
|
||||
socket_addr = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Socket address of the upstream DNS server (e.g., \"1.1.1.1:853\" or \"[2606:4700:4700::1111]:853\").";
|
||||
};
|
||||
|
||||
protocol = lib.mkOption {
|
||||
type = lib.types.enum ["udp" "tcp" "tls" "https" "quic"];
|
||||
description = "Protocol to use for upstream DNS queries.";
|
||||
};
|
||||
};
|
||||
});
|
||||
default = [];
|
||||
description = "List of additional upstream DNS server configurations.";
|
||||
};
|
||||
|
||||
blocklists = {
|
||||
ad_malware = {
|
||||
enable = lib.mkEnableOption "Host file for blocking ads and malware";
|
||||
url = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "http://sbc.io/hosts/hosts";
|
||||
description = "URL of the ad and malware blocklist host file";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
extraBlocklists = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
description = "Additional blocklist URLs to be added to the configuration";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# Assertions for proper configuration
|
||||
assertions = [
|
||||
{
|
||||
assertion = !(cfg.downstreams.host.enable && cfg.downstreams.host.port == 53 && config.services.resolved.enable && cfg.downstreams.host.disableSystemdResolved);
|
||||
message = "crab-hole host downstream cannot use port 53 while systemd-resolved is enabled. Either disable systemd-resolved or use a different port.";
|
||||
}
|
||||
{
|
||||
assertion = !(cfg.downstreams.host.enable && cfg.downstreams.host.port == 53 && !cfg.downstreams.host.disableSystemdResolved && config.services.resolved.enable);
|
||||
message = "crab-hole host downstream is configured to use port 53 but systemd-resolved is still enabled and disableSystemdResolved is false. Set disableSystemdResolved = true or manually disable systemd-resolved.";
|
||||
}
|
||||
];
|
||||
|
||||
# Automatically disable systemd-resolved if using port 53
|
||||
services.resolved.enable = lib.mkIf (cfg.downstreams.host.enable && cfg.downstreams.host.port == 53 && cfg.downstreams.host.disableSystemdResolved) (lib.mkForce false);
|
||||
|
||||
# Configure DNS nameservers when disabling systemd-resolved
|
||||
networking.nameservers = lib.mkIf (cfg.downstreams.host.enable && cfg.downstreams.host.port == 53 && cfg.downstreams.host.disableSystemdResolved) (lib.mkDefault ["127.0.0.1" "1.1.1.1" "8.8.8.8"]);
|
||||
|
||||
services.crab-hole.settings = lib.mkMerge [
|
||||
{
|
||||
api = {
|
||||
port = cfg.port;
|
||||
listen = cfg.listen;
|
||||
show_doc = cfg.show_doc;
|
||||
};
|
||||
downstream = cfg.extraDownstreams;
|
||||
upstream.name_servers = cfg.extraUpstreams;
|
||||
blocklist.lists = cfg.extraBlocklists;
|
||||
}
|
||||
(lib.mkIf cfg.blocklists.ad_malware.enable {
|
||||
blocklist.lists = [cfg.blocklists.ad_malware.url];
|
||||
})
|
||||
(lib.mkIf cfg.downstreams.host.enable {
|
||||
downstream = [
|
||||
{
|
||||
protocol = "udp";
|
||||
listen = "0.0.0.0";
|
||||
port = cfg.downstreams.host.port;
|
||||
}
|
||||
];
|
||||
})
|
||||
(lib.mkIf cfg.upstreams.cloudFlare.enable {
|
||||
upstream.name_servers = [
|
||||
{
|
||||
socket_addr = "1.1.1.1:853";
|
||||
protocol = "tls";
|
||||
tls_dns_name = "1dot1dot1dot1.cloudflare-dns.com";
|
||||
trust_nx_responses = false;
|
||||
}
|
||||
{
|
||||
socket_addr = "1.0.0.1:853";
|
||||
protocol = "tls";
|
||||
tls_dns_name = "1dot1dot1dot1.cloudflare-dns.com";
|
||||
trust_nx_responses = false;
|
||||
}
|
||||
{
|
||||
socket_addr = "[2606:4700:4700::1111]:853";
|
||||
protocol = "tls";
|
||||
tls_dns_name = "1dot1dot1dot1.cloudflare-dns.com";
|
||||
trust_nx_responses = false;
|
||||
}
|
||||
{
|
||||
socket_addr = "[2606:4700:4700::1001]:853";
|
||||
protocol = "tls";
|
||||
tls_dns_name = "1dot1dot1dot1.cloudflare-dns.com";
|
||||
trust_nx_responses = false;
|
||||
}
|
||||
];
|
||||
})
|
||||
];
|
||||
|
||||
# Open firewall if requested
|
||||
networking.firewall = lib.mkMerge [
|
||||
(lib.mkIf cfg.openFirewall {
|
||||
allowedTCPPorts = [cfg.port];
|
||||
})
|
||||
(lib.mkIf (cfg.downstreams.host.enable && cfg.downstreams.host.openFirewall) {
|
||||
allowedUDPPorts = [cfg.downstreams.host.port];
|
||||
})
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/crab-hole/default.nix
Normal file
10
modules/nixos/programs/crab-hole/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.crab-hole = {
|
||||
imports = [
|
||||
mod.crab-hole-service
|
||||
mod.crab-hole-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/crab-hole/storage.nix
Normal file
23
modules/nixos/programs/crab-hole/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.crab-hole-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
workingDirectory = "/var/lib/private/crab-hole";
|
||||
in {
|
||||
options.services.crab-hole.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.crab-hole.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.crab-hole.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${workingDirectory}" = lib.mkIf config.services.crab-hole.impermanence.enable {
|
||||
owner.name = "crab-hole";
|
||||
group.name = "crab-hole";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
32
modules/nixos/programs/default.nix
Normal file
32
modules/nixos/programs/default.nix
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.programs = {
|
||||
imports = [
|
||||
mod.steam
|
||||
mod.sync
|
||||
mod.tailscale
|
||||
mod.actual
|
||||
mod.bazarr
|
||||
mod.crab-hole
|
||||
mod.fail2ban
|
||||
mod.flaresolverr
|
||||
mod.forgejo
|
||||
mod.home-assistant
|
||||
mod.immich
|
||||
mod.jackett
|
||||
mod.jellyfin
|
||||
mod.lidarr
|
||||
mod.network-storage
|
||||
mod.panoramax
|
||||
mod.paperless
|
||||
mod.postgres
|
||||
mod.qbittorent
|
||||
mod.radarr
|
||||
mod.reverse-proxy
|
||||
mod.searx
|
||||
mod.sonarr
|
||||
mod.wyoming
|
||||
];
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/fail2ban/default.nix
Normal file
10
modules/nixos/programs/fail2ban/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.fail2ban = {
|
||||
imports = [
|
||||
mod.fail2ban-service
|
||||
mod.fail2ban-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
53
modules/nixos/programs/fail2ban/fail2ban.nix
Normal file
53
modules/nixos/programs/fail2ban/fail2ban.nix
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
{...}: {
|
||||
flake.nixosModules.fail2ban-service = {
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf config.services.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/nginx.local".text = lib.mkIf config.services.nginx.enable (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = "limiting requests, excess:.* by zone.*client: <HOST>"
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
maxretry = 5;
|
||||
ignoreIP = [
|
||||
# Whitelist local networks
|
||||
"10.0.0.0/8"
|
||||
"172.16.0.0/12"
|
||||
"192.168.0.0/16"
|
||||
|
||||
# tail scale tailnet
|
||||
"100.64.0.0/10"
|
||||
"fd7a:115c:a1e0::/48"
|
||||
];
|
||||
bantime = "24h"; # Ban IPs for one day on the first ban
|
||||
bantime-increment = {
|
||||
enable = true; # Enable increment of bantime after each violation
|
||||
formula = "ban.Time * math.exp(float(ban.Count+1)*banFactor)/math.exp(1*banFactor)";
|
||||
maxtime = "168h"; # Do not ban for more than 1 week
|
||||
overalljails = true; # Calculate the ban time based on all the violations
|
||||
};
|
||||
jails = {
|
||||
nginx-iptables.settings = lib.mkIf config.services.nginx.enable {
|
||||
enabled = true;
|
||||
filter = "nginx";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
# TODO; figure out if there is any fail2ban things we can do on searx
|
||||
# searx-iptables.settings = lib.mkIf config.services.searx.enable {};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/fail2ban/storage.nix
Normal file
23
modules/nixos/programs/fail2ban/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.fail2ban-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataFolder = "/var/lib/fail2ban";
|
||||
in {
|
||||
options.services.fail2ban.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.fail2ban.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.fail2ban.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${dataFolder}" = lib.mkIf config.services.fail2ban.impermanence.enable {
|
||||
owner.name = "fail2ban";
|
||||
group.name = "fail2ban";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
9
modules/nixos/programs/flaresolverr/default.nix
Normal file
9
modules/nixos/programs/flaresolverr/default.nix
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.flaresolverr = {
|
||||
imports = [
|
||||
mod.flaresolverr-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
21
modules/nixos/programs/flaresolverr/storage.nix
Normal file
21
modules/nixos/programs/flaresolverr/storage.nix
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
{...}: {
|
||||
flake.nixosModules.flaresolverr-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.flaresolverr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.flaresolverr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.flaresolverr.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."/var/lib/flaresolverr" = lib.mkIf config.services.flaresolverr.impermanence.enable {
|
||||
owner.name = "flaresolverr";
|
||||
group.name = "flaresolverr";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
34
modules/nixos/programs/forgejo/database.nix
Normal file
34
modules/nixos/programs/forgejo/database.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{...}: {
|
||||
flake.nixosModules.forgejo-database = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
usingPostgres = config.services.forgejo.database.type == "postgres";
|
||||
in {
|
||||
config = lib.mkIf config.services.forgejo.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = !usingPostgres || config.services.postgresql.enable;
|
||||
message = "PostgreSQL must be enabled when Forgejo database type is postgres";
|
||||
}
|
||||
{
|
||||
assertion = !(usingPostgres && config.services.forgejo.database.createDatabase) || (builtins.any (db: db == "forgejo") config.services.postgresql.ensureDatabases);
|
||||
message = "Forgejo built-in database creation failed - expected 'forgejo' in ensureDatabases but got: ${builtins.toString config.services.postgresql.ensureDatabases}";
|
||||
}
|
||||
{
|
||||
assertion = !(usingPostgres && config.services.forgejo.database.createDatabase) || (builtins.any (user: user.name == "forgejo") config.services.postgresql.ensureUsers);
|
||||
message = "Forgejo built-in user creation failed - expected user 'forgejo' in ensureUsers but got: ${builtins.toString (builtins.map (u: u.name) config.services.postgresql.ensureUsers)}";
|
||||
}
|
||||
];
|
||||
|
||||
services.forgejo.database.createDatabase = lib.mkDefault usingPostgres;
|
||||
|
||||
systemd.services.forgejo = lib.mkIf usingPostgres {
|
||||
requires = [
|
||||
config.systemd.services.postgresql.name
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
13
modules/nixos/programs/forgejo/default.nix
Normal file
13
modules/nixos/programs/forgejo/default.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.forgejo = {
|
||||
imports = [
|
||||
mod.forgejo-service
|
||||
mod.forgejo-database
|
||||
mod.forgejo-proxy
|
||||
mod.forgejo-fail2ban
|
||||
mod.forgejo-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
43
modules/nixos/programs/forgejo/fail2ban.nix
Normal file
43
modules/nixos/programs/forgejo/fail2ban.nix
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
{...}: {
|
||||
flake.nixosModules.forgejo-fail2ban = {
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
options.services.forgejo = {
|
||||
fail2ban = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.forgejo.enable && config.services.fail2ban.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.forgejo.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/forgejo.local".text = lib.mkIf config.services.forgejo.enable (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = ".*(Failed authentication attempt|invalid credentials|Attempted access of unknown user).* from <HOST>"
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
forgejo-iptables.settings = lib.mkIf config.services.forgejo.enable {
|
||||
enabled = true;
|
||||
filter = "forgejo";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.forgejo.settings.log.ROOT_PATH}/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
47
modules/nixos/programs/forgejo/forgejo.nix
Normal file
47
modules/nixos/programs/forgejo/forgejo.nix
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
{...}: {
|
||||
flake.nixosModules.forgejo-service = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
httpPort = 8081;
|
||||
sshPort = 22222;
|
||||
db_user = "forgejo";
|
||||
in {
|
||||
config = lib.mkIf config.services.forgejo.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.forgejo.settings.server.BUILTIN_SSH_SERVER_USER == config.users.users.git.name;
|
||||
message = "Forgejo BUILTIN_SSH_SERVER_USER hardcoded value does not match expected git user name";
|
||||
}
|
||||
];
|
||||
|
||||
services.forgejo = {
|
||||
database = {
|
||||
type = "postgres";
|
||||
socket = "/run/postgresql";
|
||||
};
|
||||
lfs.enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
DOMAIN = config.services.forgejo.reverseProxy.domain;
|
||||
HTTP_PORT = httpPort;
|
||||
START_SSH_SERVER = true;
|
||||
SSH_LISTEN_PORT = sshPort;
|
||||
SSH_PORT = 22;
|
||||
BUILTIN_SSH_SERVER_USER = "git";
|
||||
ROOT_URL = "https://git.jan-leila.com";
|
||||
};
|
||||
service = {
|
||||
DISABLE_REGISTRATION = true;
|
||||
};
|
||||
database = {
|
||||
DB_TYPE = "postgres";
|
||||
NAME = db_user;
|
||||
USER = db_user;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
44
modules/nixos/programs/forgejo/proxy.nix
Normal file
44
modules/nixos/programs/forgejo/proxy.nix
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
{...}: {
|
||||
flake.nixosModules.forgejo-proxy = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
httpPort = 8081;
|
||||
in {
|
||||
options.services.forgejo = {
|
||||
reverseProxy = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.forgejo.enable && config.services.reverseProxy.enable;
|
||||
};
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "domain that forgejo will be hosted at";
|
||||
default = "git.jan-leila.com";
|
||||
};
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for forgejo";
|
||||
default = [];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.forgejo.reverseProxy.enable {
|
||||
services.reverseProxy.services.forgejo = {
|
||||
target = "http://localhost:${toString httpPort}";
|
||||
domain = config.services.forgejo.reverseProxy.domain;
|
||||
extraDomains = config.services.forgejo.reverseProxy.extraDomains;
|
||||
|
||||
settings = {
|
||||
forwardHeaders.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
config.services.forgejo.settings.server.SSH_LISTEN_PORT
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/forgejo/storage.nix
Normal file
23
modules/nixos/programs/forgejo/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.forgejo-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
stateDir = "/var/lib/forgejo";
|
||||
in {
|
||||
options.services.forgejo.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.forgejo.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.forgejo.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${stateDir}" = lib.mkIf config.services.forgejo.impermanence.enable {
|
||||
owner.name = "forgejo";
|
||||
group.name = "forgejo";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
55
modules/nixos/programs/home-assistant/database.nix
Normal file
55
modules/nixos/programs/home-assistant/database.nix
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-assistant-database = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.home-assistant = {
|
||||
postgres = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Use PostgreSQL instead of SQLite";
|
||||
};
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "hass";
|
||||
description = "Database user name";
|
||||
};
|
||||
database = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "hass";
|
||||
description = "Database name";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.home-assistant.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = !config.services.home-assistant.postgres.enable || config.services.postgresql.enable;
|
||||
message = "PostgreSQL must be enabled when using postgres database for Home Assistant";
|
||||
}
|
||||
];
|
||||
|
||||
services.postgresql.databases.home-assistant = lib.mkIf config.services.home-assistant.postgres.enable {
|
||||
enable = true;
|
||||
user = config.services.home-assistant.postgres.user;
|
||||
database = config.services.home-assistant.postgres.database;
|
||||
};
|
||||
|
||||
services.home-assistant = lib.mkIf config.services.home-assistant.postgres.enable {
|
||||
extraPackages = python3Packages:
|
||||
with python3Packages; [
|
||||
psycopg2
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services.home-assistant = lib.mkIf config.services.home-assistant.postgres.enable {
|
||||
requires = [
|
||||
config.systemd.services.postgresql.name
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
14
modules/nixos/programs/home-assistant/default.nix
Normal file
14
modules/nixos/programs/home-assistant/default.nix
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.home-assistant = {
|
||||
imports = [
|
||||
mod.home-assistant-service
|
||||
mod.home-assistant-proxy
|
||||
mod.home-assistant-database
|
||||
mod.home-assistant-fail2ban
|
||||
mod.home-assistant-storage
|
||||
mod.home-assistant-extensions
|
||||
];
|
||||
};
|
||||
}
|
||||
11
modules/nixos/programs/home-assistant/extensions/default.nix
Normal file
11
modules/nixos/programs/home-assistant/extensions/default.nix
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.home-assistant-extensions = {
|
||||
imports = [
|
||||
mod.home-assistant-sonos
|
||||
mod.home-assistant-jellyfin
|
||||
mod.home-assistant-wyoming
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-assistant-jellyfin = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.home-assistant.extensions.jellyfin.enable) {
|
||||
services.home-assistant.extraComponents = ["jellyfin"];
|
||||
# TODO: configure port, address, and login information here
|
||||
};
|
||||
}
|
||||
13
modules/nixos/programs/home-assistant/extensions/sonos.nix
Normal file
13
modules/nixos/programs/home-assistant/extensions/sonos.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-assistant-sonos = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.home-assistant.extensions.sonos.enable) {
|
||||
services.home-assistant.extraComponents = ["sonos"];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
config.services.home-assistant.extensions.sonos.port
|
||||
];
|
||||
};
|
||||
}
|
||||
11
modules/nixos/programs/home-assistant/extensions/wyoming.nix
Normal file
11
modules/nixos/programs/home-assistant/extensions/wyoming.nix
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-assistant-wyoming = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.home-assistant.extensions.wyoming.enable) {
|
||||
services.home-assistant.extraComponents = ["wyoming"];
|
||||
services.wyoming.enable = true;
|
||||
};
|
||||
}
|
||||
51
modules/nixos/programs/home-assistant/fail2ban.nix
Normal file
51
modules/nixos/programs/home-assistant/fail2ban.nix
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-assistant-fail2ban = {
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.home-assistant = {
|
||||
fail2ban = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.fail2ban.enable && config.services.home-assistant.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.home-assistant.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/hass.local".text = (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[INCLUDES]
|
||||
before = common.conf
|
||||
|
||||
[Definition]
|
||||
failregex = ^%(__prefix_line)s.*Login attempt or request with invalid authentication from <HOST>.*$
|
||||
|
||||
ignoreregex =
|
||||
|
||||
[Init]
|
||||
datepattern = ^%%Y-%%m-%%d %%H:%%M:%%S
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
home-assistant-iptables.settings = {
|
||||
enabled = true;
|
||||
filter = "hass";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.home-assistant.configDir}/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
106
modules/nixos/programs/home-assistant/home-assistant.nix
Normal file
106
modules/nixos/programs/home-assistant/home-assistant.nix
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-assistant-service = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.home-assistant = {
|
||||
database = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"builtin"
|
||||
"postgres"
|
||||
];
|
||||
description = "what database do we want to use";
|
||||
default = "builtin";
|
||||
};
|
||||
|
||||
extensions = {
|
||||
sonos = {
|
||||
enable = lib.mkEnableOption "enable the sonos plugin";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 1400;
|
||||
description = "what port to use for sonos discovery";
|
||||
};
|
||||
};
|
||||
jellyfin = {
|
||||
enable = lib.mkEnableOption "enable the jellyfin plugin";
|
||||
};
|
||||
wyoming = {
|
||||
enable = lib.mkEnableOption "enable wyoming";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.home-assistant.enable (lib.mkMerge [
|
||||
{
|
||||
services.home-assistant = {
|
||||
configDir = "/var/lib/hass";
|
||||
extraComponents = [
|
||||
"default_config"
|
||||
"esphome"
|
||||
"met"
|
||||
"radio_browser"
|
||||
"isal"
|
||||
"zha"
|
||||
"webostv"
|
||||
"tailscale"
|
||||
"syncthing"
|
||||
"analytics_insights"
|
||||
"unifi"
|
||||
"openweathermap"
|
||||
"ollama"
|
||||
"mobile_app"
|
||||
"logbook"
|
||||
"ssdp"
|
||||
"usb"
|
||||
"webhook"
|
||||
"bluetooth"
|
||||
"dhcp"
|
||||
"energy"
|
||||
"history"
|
||||
"backup"
|
||||
"assist_pipeline"
|
||||
"conversation"
|
||||
"sun"
|
||||
"zeroconf"
|
||||
"cpuspeed"
|
||||
];
|
||||
config = {
|
||||
http = {
|
||||
server_port = 8123;
|
||||
use_x_forwarded_for = true;
|
||||
trusted_proxies = ["127.0.0.1" "::1"];
|
||||
ip_ban_enabled = true;
|
||||
login_attempts_threshold = 10;
|
||||
};
|
||||
homeassistant = {
|
||||
external_url = "https://${config.services.home-assistant.domain}";
|
||||
# internal_url = "http://192.168.1.2:8123";
|
||||
};
|
||||
recorder.db_url = "postgresql://@/${config.services.home-assistant.configDir}";
|
||||
"automation manual" = [];
|
||||
"automation ui" = "!include automations.yaml";
|
||||
mobile_app = {};
|
||||
};
|
||||
extraPackages = python3Packages:
|
||||
with python3Packages; [
|
||||
hassil
|
||||
numpy
|
||||
gtts
|
||||
];
|
||||
};
|
||||
|
||||
# TODO: configure /var/lib/hass/secrets.yaml via sops
|
||||
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
1900
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"f ${config.services.home-assistant.configDir}/automations.yaml 0755 hass hass"
|
||||
];
|
||||
}
|
||||
]);
|
||||
};
|
||||
}
|
||||
45
modules/nixos/programs/home-assistant/proxy.nix
Normal file
45
modules/nixos/programs/home-assistant/proxy.nix
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-assistant-proxy = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.home-assistant = {
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "domain that home-assistant will be hosted at";
|
||||
default = "home-assistant.arpa";
|
||||
};
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for home-assistant";
|
||||
default = [];
|
||||
};
|
||||
reverseProxy = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.reverseProxy.enable && config.services.home-assistant.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.home-assistant.reverseProxy.enable {
|
||||
services.reverseProxy.services.home-assistant = {
|
||||
target = "http://localhost:${toString config.services.home-assistant.config.http.server_port}";
|
||||
domain = config.services.home-assistant.domain;
|
||||
extraDomains = config.services.home-assistant.extraDomains;
|
||||
|
||||
settings = {
|
||||
proxyWebsockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
|
||||
# Custom timeout settings
|
||||
proxyHeaders = {
|
||||
enable = true;
|
||||
timeout = 90;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/home-assistant/storage.nix
Normal file
23
modules/nixos/programs/home-assistant/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.home-assistant-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
configDir = "/var/lib/hass";
|
||||
in {
|
||||
options.services.home-assistant.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.home-assistant.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.home-assistant.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${configDir}" = lib.mkIf config.services.home-assistant.impermanence.enable {
|
||||
owner.name = "hass";
|
||||
group.name = "hass";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
32
modules/nixos/programs/immich/database.nix
Normal file
32
modules/nixos/programs/immich/database.nix
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
{...}: {
|
||||
flake.nixosModules.immich-database = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf config.services.immich.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = !config.services.immich.database.enable || config.services.postgresql.enable;
|
||||
message = "PostgreSQL must be enabled when using postgres database for Immich";
|
||||
}
|
||||
{
|
||||
assertion = !(config.services.immich.database.enable && config.services.immich.database.createDB) || (builtins.any (db: db == "immich") config.services.postgresql.ensureDatabases);
|
||||
message = "Immich built-in database creation failed - expected 'immich' in ensureDatabases but got: ${builtins.toString config.services.postgresql.ensureDatabases}";
|
||||
}
|
||||
{
|
||||
assertion = !(config.services.immich.database.enable && config.services.immich.database.createDB) || (builtins.any (user: user.name == "immich") config.services.postgresql.ensureUsers);
|
||||
message = "Immich built-in user creation failed - expected user 'immich' in ensureUsers but got: ${builtins.toString (builtins.map (u: u.name) config.services.postgresql.ensureUsers)}";
|
||||
}
|
||||
];
|
||||
|
||||
# Note: Immich has built-in database creation via services.immich.database.createDB we only add the systemd dependency
|
||||
|
||||
systemd.services.immich-server = lib.mkIf config.services.immich.database.enable {
|
||||
requires = [
|
||||
config.systemd.services.postgresql.name
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
12
modules/nixos/programs/immich/default.nix
Normal file
12
modules/nixos/programs/immich/default.nix
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.immich = {
|
||||
imports = [
|
||||
mod.immich-proxy
|
||||
mod.immich-database
|
||||
mod.immich-fail2ban
|
||||
mod.immich-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
37
modules/nixos/programs/immich/fail2ban.nix
Normal file
37
modules/nixos/programs/immich/fail2ban.nix
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
{...}: {
|
||||
flake.nixosModules.immich-fail2ban = {
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
options.services.immich = {
|
||||
fail2ban = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.fail2ban.enable && config.services.immich.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.immich.fail2ban.enable {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/immich.local".text = pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = immich-server.*Failed login attempt for user.+from ip address\s?<ADDR>
|
||||
journalmatch = CONTAINER_TAG=immich-server
|
||||
'');
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
immich-iptables.settings = {
|
||||
enabled = true;
|
||||
filter = "immich";
|
||||
backend = "systemd";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
46
modules/nixos/programs/immich/proxy.nix
Normal file
46
modules/nixos/programs/immich/proxy.nix
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
{...}: {
|
||||
flake.nixosModules.immich-proxy = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.immich = {
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "domain that immich will be hosted at";
|
||||
default = "immich.arpa";
|
||||
};
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for immich";
|
||||
default = [];
|
||||
};
|
||||
reverseProxy = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.immich.enable && config.services.reverseProxy.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.immich.reverseProxy.enable {
|
||||
services.reverseProxy.services.immich = {
|
||||
target = "http://localhost:${toString config.services.immich.port}";
|
||||
domain = config.services.immich.domain;
|
||||
extraDomains = config.services.immich.extraDomains;
|
||||
|
||||
settings = {
|
||||
proxyWebsockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
maxBodySize = 50000;
|
||||
|
||||
# Custom timeout settings
|
||||
proxyHeaders = {
|
||||
enable = true;
|
||||
timeout = 600;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/immich/storage.nix
Normal file
23
modules/nixos/programs/immich/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.immich-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
mediaLocation = "/var/lib/immich";
|
||||
in {
|
||||
options.services.immich.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.immich.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.immich.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${mediaLocation}" = lib.mkIf config.services.immich.impermanence.enable {
|
||||
owner.name = "immich";
|
||||
group.name = "immich";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
21
modules/nixos/programs/jackett/default.nix
Normal file
21
modules/nixos/programs/jackett/default.nix
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.jackett = {
|
||||
imports = [
|
||||
mod.jackett-storage
|
||||
];
|
||||
|
||||
config = {
|
||||
nixpkgs.overlays = [
|
||||
# Disable jackett tests due to date-related test failures
|
||||
# (ParseDateTimeGoLangTest expects 2024-09-14 but gets 2025-09-14 due to year rollover logic)
|
||||
(final: prev: {
|
||||
jackett = prev.jackett.overrideAttrs (oldAttrs: {
|
||||
doCheck = false;
|
||||
});
|
||||
})
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/jackett/storage.nix
Normal file
23
modules/nixos/programs/jackett/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.jackett-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jackett_data_directory = "/var/lib/jackett/.config/Jackett";
|
||||
in {
|
||||
options.services.jackett.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.jackett.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jackett.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${jackett_data_directory}" = lib.mkIf config.services.jackett.impermanence.enable {
|
||||
owner.name = "jackett";
|
||||
group.name = "jackett";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
12
modules/nixos/programs/jellyfin/default.nix
Normal file
12
modules/nixos/programs/jellyfin/default.nix
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.jellyfin = {
|
||||
imports = [
|
||||
mod.jellyfin-service
|
||||
mod.jellyfin-proxy
|
||||
mod.jellyfin-fail2ban
|
||||
mod.jellyfin-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
34
modules/nixos/programs/jellyfin/fail2ban.nix
Normal file
34
modules/nixos/programs/jellyfin/fail2ban.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{...}: {
|
||||
flake.nixosModules.jellyfin-fail2ban = {
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.jellyfin.enable && config.services.fail2ban.enable) {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/jellyfin.local".text = (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = "^.*Authentication request for .* has been denied \\\\\\(IP: \\\"<ADDR>\\\"\\\\\\)\\\\\\."
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
jellyfin-iptables.settings = {
|
||||
enabled = true;
|
||||
filter = "jellyfin";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.jellyfin.dataDir}/log/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
34
modules/nixos/programs/jellyfin/jellyfin.nix
Normal file
34
modules/nixos/programs/jellyfin/jellyfin.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{...}: {
|
||||
flake.nixosModules.jellyfin-service = {
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfinPort = 8096;
|
||||
dlanPort = 1900;
|
||||
in {
|
||||
options.services.jellyfin = {
|
||||
media_directory = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "directory jellyfin media will be hosted at";
|
||||
default = "/srv/jellyfin/media";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jellyfin.enable {
|
||||
environment.systemPackages = [
|
||||
pkgs.jellyfin
|
||||
pkgs.jellyfin-web
|
||||
pkgs.jellyfin-ffmpeg
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [jellyfinPort dlanPort];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${config.services.jellyfin.media_directory} 2770 jellyfin jellyfin_media"
|
||||
"A ${config.services.jellyfin.media_directory} - - - - u:jellyfin:rwX,g:jellyfin_media:rwX,o::-"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
43
modules/nixos/programs/jellyfin/proxy.nix
Normal file
43
modules/nixos/programs/jellyfin/proxy.nix
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
{...}: {
|
||||
flake.nixosModules.jellyfin-proxy = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfinPort = 8096;
|
||||
in {
|
||||
options.services.jellyfin = {
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "domain that jellyfin will be hosted at";
|
||||
default = "jellyfin.arpa";
|
||||
};
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for jellyfin";
|
||||
default = [];
|
||||
};
|
||||
reverseProxy = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.jellyfin.enable && config.services.reverseProxy.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jellyfin.reverseProxy.enable {
|
||||
services.reverseProxy.services.jellyfin = {
|
||||
target = "http://localhost:${toString jellyfinPort}";
|
||||
domain = config.services.jellyfin.domain;
|
||||
extraDomains = config.services.jellyfin.extraDomains;
|
||||
|
||||
settings = {
|
||||
forwardHeaders.enable = true;
|
||||
maxBodySize = 20;
|
||||
noSniff.enable = true;
|
||||
proxyBuffering.enable = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
58
modules/nixos/programs/jellyfin/storage.nix
Normal file
58
modules/nixos/programs/jellyfin/storage.nix
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
{...}: {
|
||||
flake.nixosModules.jellyfin-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfin_data_directory = "/var/lib/jellyfin";
|
||||
jellyfin_cache_directory = "/var/cache/jellyfin";
|
||||
in {
|
||||
options.services.jellyfin.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.jellyfin.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jellyfin.enable {
|
||||
storage.datasets.replicate = {
|
||||
"system/root" = {
|
||||
directories = {
|
||||
"${jellyfin_data_directory}" = lib.mkIf config.services.jellyfin.impermanence.enable {
|
||||
enable = true;
|
||||
owner.name = "jellyfin";
|
||||
group.name = "jellyfin";
|
||||
};
|
||||
"${jellyfin_cache_directory}" = lib.mkIf config.services.jellyfin.impermanence.enable {
|
||||
enable = true;
|
||||
owner.name = "jellyfin";
|
||||
group.name = "jellyfin";
|
||||
};
|
||||
};
|
||||
};
|
||||
"system/media" = {
|
||||
mount = "/persist/replicate/system/media";
|
||||
|
||||
directories."${config.services.jellyfin.media_directory}" = lib.mkIf config.services.jellyfin.impermanence.enable {
|
||||
enable = true;
|
||||
owner.name = "jellyfin";
|
||||
group.name = "jellyfin_media";
|
||||
owner.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = true;
|
||||
};
|
||||
group.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = true;
|
||||
};
|
||||
other.permissions = {
|
||||
read = false;
|
||||
write = false;
|
||||
execute = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
9
modules/nixos/programs/lidarr/default.nix
Normal file
9
modules/nixos/programs/lidarr/default.nix
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.lidarr = {
|
||||
imports = [
|
||||
mod.lidarr-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/lidarr/storage.nix
Normal file
23
modules/nixos/programs/lidarr/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.lidarr-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
lidarr_data_directory = "/var/lib/lidarr/.config/Lidarr";
|
||||
in {
|
||||
options.services.lidarr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.lidarr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.lidarr.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${lidarr_data_directory}" = lib.mkIf config.services.lidarr.impermanence.enable {
|
||||
owner.name = "lidarr";
|
||||
group.name = "lidarr";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/network_storage/default.nix
Normal file
10
modules/nixos/programs/network_storage/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.network-storage = {
|
||||
imports = [
|
||||
mod.network-storage-service
|
||||
mod.network-storage-nfs
|
||||
];
|
||||
};
|
||||
}
|
||||
88
modules/nixos/programs/network_storage/network_storage.nix
Normal file
88
modules/nixos/programs/network_storage/network_storage.nix
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
{...}: {
|
||||
flake.nixosModules.network-storage-service = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
export_directory = config.host.network_storage.export_directory;
|
||||
in {
|
||||
options = {
|
||||
host.network_storage = {
|
||||
enable = lib.mkEnableOption "is this machine going to export network storage";
|
||||
export_directory = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = "what are exports going to be stored in";
|
||||
default = "/exports";
|
||||
};
|
||||
directories = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.submodule ({config, ...}: {
|
||||
options = {
|
||||
folder = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what is the name of this export directory";
|
||||
};
|
||||
bind = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
description = "is this directory bound to anywhere";
|
||||
default = null;
|
||||
};
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what user owns this directory";
|
||||
default = "nouser";
|
||||
};
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what group owns this directory";
|
||||
default = "nogroup";
|
||||
};
|
||||
_directory = lib.mkOption {
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
type = lib.types.path;
|
||||
default = "${export_directory}/${config.folder}";
|
||||
};
|
||||
};
|
||||
}));
|
||||
description = "list of directory names to export";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.host.network_storage.enable (lib.mkMerge [
|
||||
{
|
||||
# create any folders that we need to have for our exports
|
||||
systemd.tmpfiles.rules =
|
||||
[
|
||||
"d ${config.host.network_storage.export_directory} 2775 nobody nogroup -"
|
||||
]
|
||||
++ (
|
||||
builtins.map (
|
||||
directory: "d ${directory._directory} 2770 ${directory.user} ${directory.group}"
|
||||
)
|
||||
config.host.network_storage.directories
|
||||
);
|
||||
|
||||
# set up any bind mounts that we need for our exports
|
||||
fileSystems = builtins.listToAttrs (
|
||||
builtins.map (directory:
|
||||
lib.attrsets.nameValuePair directory._directory {
|
||||
device = directory.bind;
|
||||
options = ["bind"];
|
||||
}) (
|
||||
builtins.filter (directory: directory.bind != null) config.host.network_storage.directories
|
||||
)
|
||||
);
|
||||
}
|
||||
# (lib.mkIf config.host.impermanence.enable {
|
||||
# environment.persistence."/persist/replicate/system/root" = {
|
||||
# enable = true;
|
||||
# hideMounts = true;
|
||||
# directories = [
|
||||
# config.host.network_storage.export_directory
|
||||
# ];
|
||||
# };
|
||||
# })
|
||||
]);
|
||||
};
|
||||
}
|
||||
109
modules/nixos/programs/network_storage/nfs.nix
Normal file
109
modules/nixos/programs/network_storage/nfs.nix
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
{...}: {
|
||||
flake.nixosModules.network-storage-nfs = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
host.network_storage.nfs = {
|
||||
enable = lib.mkEnableOption "is this server going to export network storage as nfs shares";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 2049;
|
||||
description = "port that nfs will run on";
|
||||
};
|
||||
directories = lib.mkOption {
|
||||
type = lib.types.listOf (
|
||||
lib.types.enum (
|
||||
builtins.map (
|
||||
directory: directory.folder
|
||||
)
|
||||
config.host.network_storage.directories
|
||||
)
|
||||
);
|
||||
description = "list of exported directories to be exported via nfs";
|
||||
};
|
||||
};
|
||||
};
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = !(config.host.network_storage.nfs.enable && !config.host.network_storage.enable);
|
||||
message = "nfs cant be enabled with network storage disabled";
|
||||
}
|
||||
];
|
||||
}
|
||||
(
|
||||
lib.mkIf (config.host.network_storage.nfs.enable && config.host.network_storage.enable) {
|
||||
services.nfs = {
|
||||
settings = {
|
||||
nfsd = {
|
||||
threads = 32;
|
||||
port = config.host.network_storage.nfs.port;
|
||||
};
|
||||
};
|
||||
server = {
|
||||
enable = true;
|
||||
|
||||
lockdPort = 4001;
|
||||
mountdPort = 4002;
|
||||
statdPort = 4000;
|
||||
|
||||
exports = lib.strings.concatLines (
|
||||
[
|
||||
"${config.host.network_storage.export_directory} 100.64.0.0/10(rw,fsid=0,no_subtree_check)"
|
||||
]
|
||||
++ (
|
||||
lib.lists.imap0 (
|
||||
i: directory: let
|
||||
createOptions = fsid: "(rw,fsid=${toString fsid},nohide,insecure,no_subtree_check)";
|
||||
addresses = [
|
||||
# loopback
|
||||
"127.0.0.1"
|
||||
"::1"
|
||||
# tailscale
|
||||
"100.64.0.0/10"
|
||||
"fd7a:115c:a1e0::/48"
|
||||
];
|
||||
options = lib.strings.concatStrings (
|
||||
lib.strings.intersperse " " (
|
||||
lib.lists.imap0 (index: address: "${address}${createOptions (1 + (i * (builtins.length addresses)) + index)}") addresses
|
||||
)
|
||||
);
|
||||
in "${directory._directory} ${options}"
|
||||
)
|
||||
(
|
||||
builtins.filter (
|
||||
directory: lib.lists.any (target: target == directory.folder) config.host.network_storage.nfs.directories
|
||||
)
|
||||
config.host.network_storage.directories
|
||||
)
|
||||
)
|
||||
);
|
||||
};
|
||||
};
|
||||
networking.firewall = let
|
||||
ports = [
|
||||
111
|
||||
config.host.network_storage.nfs.port
|
||||
config.services.nfs.server.lockdPort
|
||||
config.services.nfs.server.mountdPort
|
||||
config.services.nfs.server.statdPort
|
||||
20048
|
||||
];
|
||||
in {
|
||||
# Allow NFS on Tailscale interface
|
||||
interfaces.${config.services.tailscale.interfaceName} = {
|
||||
allowedTCPPorts = ports;
|
||||
allowedUDPPorts = ports;
|
||||
};
|
||||
# Allow NFS on local network (assuming default interface)
|
||||
allowedTCPPorts = ports;
|
||||
allowedUDPPorts = ports;
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
}
|
||||
50
modules/nixos/programs/panoramax/database.nix
Normal file
50
modules/nixos/programs/panoramax/database.nix
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
{...}: {
|
||||
flake.nixosModules.panoramax-database = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.panoramax = {
|
||||
database = {
|
||||
postgres = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Use PostgreSQL instead of SQLite";
|
||||
};
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "panoramax";
|
||||
description = "Database user name";
|
||||
};
|
||||
database = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "panoramax";
|
||||
description = "Database name";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.panoramax.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = !config.services.panoramax.database.postgres.enable || config.services.postgresql.enable;
|
||||
message = "PostgreSQL must be enabled when using postgres database for Panoramax";
|
||||
}
|
||||
];
|
||||
|
||||
services.postgresql.databases.panoramax = lib.mkIf config.services.panoramax.database.postgres.enable {
|
||||
enable = true;
|
||||
user = config.services.panoramax.database.postgres.user;
|
||||
database = config.services.panoramax.database.postgres.database;
|
||||
};
|
||||
|
||||
systemd.services.panoramax = lib.mkIf config.services.panoramax.database.postgres.enable {
|
||||
requires = [
|
||||
config.systemd.services.postgresql.name
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
13
modules/nixos/programs/panoramax/default.nix
Normal file
13
modules/nixos/programs/panoramax/default.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.panoramax = {
|
||||
imports = [
|
||||
mod.panoramax-service
|
||||
mod.panoramax-database
|
||||
mod.panoramax-proxy
|
||||
mod.panoramax-fail2ban
|
||||
mod.panoramax-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
13
modules/nixos/programs/panoramax/fail2ban.nix
Normal file
13
modules/nixos/programs/panoramax/fail2ban.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{...}: {
|
||||
flake.nixosModules.panoramax-fail2ban = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.panoramax.enable && config.services.fail2ban.enable) {
|
||||
# TODO: configure options for fail2ban
|
||||
# This is a placeholder - panoramax fail2ban configuration would need to be defined
|
||||
# based on the specific log patterns and security requirements
|
||||
};
|
||||
};
|
||||
}
|
||||
361
modules/nixos/programs/panoramax/panoramax.nix
Normal file
361
modules/nixos/programs/panoramax/panoramax.nix
Normal file
|
|
@ -0,0 +1,361 @@
|
|||
{...}: {
|
||||
flake.nixosModules.panoramax-service = {
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
options.services = {
|
||||
panoramax = {
|
||||
enable = lib.mkEnableOption "panoramax";
|
||||
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = pkgs.panoramax;
|
||||
description = "The panoramax package to use";
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "panoramax";
|
||||
description = "The user panoramax should run as.";
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "panoramax";
|
||||
description = "The group panoramax should run as.";
|
||||
};
|
||||
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "Host to bind the panoramax service to";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.port;
|
||||
default = 5000;
|
||||
description = "Port for the panoramax service";
|
||||
};
|
||||
|
||||
openFirewall = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Whether to open the panoramax port in the firewall";
|
||||
};
|
||||
|
||||
settings = {
|
||||
urlScheme = lib.mkOption {
|
||||
type = lib.types.enum ["http" "https"];
|
||||
default = "https";
|
||||
description = "URL scheme for the application";
|
||||
};
|
||||
|
||||
storage = {
|
||||
fsUrl = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = "/var/lib/panoramax/storage";
|
||||
description = "File system URL for storage";
|
||||
};
|
||||
};
|
||||
|
||||
infrastructure = {
|
||||
nbProxies = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.int;
|
||||
default = 1;
|
||||
description = "Number of proxies in front of the application";
|
||||
};
|
||||
};
|
||||
|
||||
flask = {
|
||||
secretKey = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Flask secret key for session security";
|
||||
};
|
||||
|
||||
sessionCookieDomain = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Flask session cookie domain";
|
||||
};
|
||||
};
|
||||
|
||||
api = {
|
||||
pictures = {
|
||||
licenseSpdxId = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "SPDX license identifier for API pictures";
|
||||
};
|
||||
|
||||
licenseUrl = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "License URL for API pictures";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
extraEnvironment = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = {};
|
||||
description = "Additional environment variables";
|
||||
example = {
|
||||
CUSTOM_SETTING = "value";
|
||||
DEBUG = "true";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
database = {
|
||||
createDB = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Whether to automatically create the database and user";
|
||||
};
|
||||
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "panoramax";
|
||||
description = "The name of the panoramax database";
|
||||
};
|
||||
|
||||
host = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = "/run/postgresql";
|
||||
description = "Hostname or address of the postgresql server. If an absolute path is given here, it will be interpreted as a unix socket path.";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.port;
|
||||
default = 5432;
|
||||
description = "Port of the postgresql server.";
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = "panoramax";
|
||||
description = "The database user for panoramax.";
|
||||
};
|
||||
|
||||
# TODO: password file for external database
|
||||
};
|
||||
|
||||
sgblur = {
|
||||
# TODO: configs to bind to sgblur
|
||||
};
|
||||
};
|
||||
sgblur = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Whether to enable sgblur integration for face and license plate blurring";
|
||||
};
|
||||
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = pkgs.sgblur;
|
||||
description = "The sgblur package to use";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 8080;
|
||||
description = "Port for the sgblur service";
|
||||
};
|
||||
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "Host to bind the sgblur service to";
|
||||
};
|
||||
|
||||
url = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "http://127.0.0.1:8080";
|
||||
description = "URL where sgblur service is accessible";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.panoramax.enable (lib.mkMerge [
|
||||
{
|
||||
# Create panoramax user and group
|
||||
users.users.${config.services.panoramax.user} = {
|
||||
isSystemUser = true;
|
||||
group = config.services.panoramax.group;
|
||||
home = "/var/lib/panoramax";
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
users.groups.${config.services.panoramax.group} = {};
|
||||
|
||||
# Ensure storage directory exists with correct permissions
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '${config.services.panoramax.settings.storage.fsUrl}' 0755 ${config.services.panoramax.user} ${config.services.panoramax.group} - -"
|
||||
];
|
||||
|
||||
systemd.services.panoramax-api = {
|
||||
description = "Panoramax API server (self hosted map street view)";
|
||||
after = ["network.target" "postgresql.service"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
|
||||
environment =
|
||||
{
|
||||
# Core Flask configuration
|
||||
FLASK_APP = "geovisio";
|
||||
|
||||
# Storage configuration
|
||||
FS_URL = config.services.panoramax.settings.storage.fsUrl;
|
||||
|
||||
# Infrastructure configuration
|
||||
INFRA_NB_PROXIES = toString config.services.panoramax.settings.infrastructure.nbProxies;
|
||||
|
||||
# Application configuration
|
||||
PORT = toString config.services.panoramax.port;
|
||||
|
||||
# Python path to include the panoramax package
|
||||
PYTHONPATH = "${config.services.panoramax.package}/${pkgs.python3.sitePackages}";
|
||||
}
|
||||
// (
|
||||
if config.services.panoramax.database.host == "/run/postgresql"
|
||||
then {
|
||||
DB_URL = "postgresql://${config.services.panoramax.database.user}@/${config.services.panoramax.database.name}?host=/run/postgresql";
|
||||
}
|
||||
else {
|
||||
DB_HOST = config.services.panoramax.database.host;
|
||||
DB_PORT = toString config.services.panoramax.database.port;
|
||||
DB_USERNAME = config.services.panoramax.database.user;
|
||||
DB_NAME = config.services.panoramax.database.name;
|
||||
}
|
||||
)
|
||||
// (lib.optionalAttrs (config.services.panoramax.settings.flask.secretKey != null) {
|
||||
FLASK_SECRET_KEY = config.services.panoramax.settings.flask.secretKey;
|
||||
})
|
||||
// (lib.optionalAttrs (config.services.panoramax.settings.flask.sessionCookieDomain != null) {
|
||||
FLASK_SESSION_COOKIE_DOMAIN = config.services.panoramax.settings.flask.sessionCookieDomain;
|
||||
})
|
||||
// (lib.optionalAttrs (config.services.panoramax.settings.api.pictures.licenseSpdxId != null) {
|
||||
API_PICTURES_LICENSE_SPDX_ID = config.services.panoramax.settings.api.pictures.licenseSpdxId;
|
||||
})
|
||||
// (lib.optionalAttrs (config.services.panoramax.settings.api.pictures.licenseUrl != null) {
|
||||
API_PICTURES_LICENSE_URL = config.services.panoramax.settings.api.pictures.licenseUrl;
|
||||
})
|
||||
// (lib.optionalAttrs config.services.sgblur.enable {
|
||||
SGBLUR_API_URL = config.services.sgblur.url;
|
||||
})
|
||||
// config.services.panoramax.settings.extraEnvironment;
|
||||
|
||||
path = with pkgs; [
|
||||
(python3.withPackages (ps: with ps; [config.services.panoramax.package waitress]))
|
||||
];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3.withPackages (ps: with ps; [config.services.panoramax.package waitress])}/bin/waitress-serve --port ${toString config.services.panoramax.port} --call geovisio:create_app";
|
||||
User = config.services.panoramax.user;
|
||||
Group = config.services.panoramax.group;
|
||||
WorkingDirectory = "/var/lib/panoramax";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
|
||||
# Security hardening
|
||||
PrivateTmp = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
ReadWritePaths = [
|
||||
"/var/lib/panoramax"
|
||||
config.services.panoramax.settings.storage.fsUrl
|
||||
];
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectControlGroups = true;
|
||||
RestrictSUIDSGID = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictNamespaces = true;
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
SystemCallArchitectures = "native";
|
||||
};
|
||||
};
|
||||
|
||||
# Open firewall if requested
|
||||
networking.firewall.allowedTCPPorts = lib.mkIf config.services.panoramax.openFirewall [
|
||||
config.services.panoramax.port
|
||||
];
|
||||
}
|
||||
(lib.mkIf config.services.sgblur.enable {
|
||||
# SGBlur service configuration
|
||||
systemd.services.sgblur = {
|
||||
description = "SGBlur face and license plate blurring service";
|
||||
after = ["network.target"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
|
||||
path = with pkgs; [
|
||||
config.services.sgblur.package
|
||||
python3
|
||||
python3Packages.waitress
|
||||
];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3Packages.waitress}/bin/waitress-serve --host ${config.services.sgblur.host} --port ${toString config.services.sgblur.port} src.detect.detect_api:app";
|
||||
WorkingDirectory = "${config.services.sgblur.package}";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
|
||||
# Basic security hardening
|
||||
PrivateTmp = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectControlGroups = true;
|
||||
RestrictSUIDSGID = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictNamespaces = true;
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
SystemCallArchitectures = "native";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = lib.mkIf config.services.panoramax.openFirewall [
|
||||
config.services.sgblur.port
|
||||
];
|
||||
})
|
||||
(lib.mkIf config.services.panoramax.database.createDB {
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = lib.mkIf config.services.panoramax.database.createDB [config.services.panoramax.database.name];
|
||||
ensureUsers = lib.mkIf config.services.panoramax.database.createDB [
|
||||
{
|
||||
name = config.services.panoramax.database.user;
|
||||
ensureDBOwnership = true;
|
||||
ensureClauses.login = true;
|
||||
}
|
||||
];
|
||||
extensions = ps: with ps; [postgis];
|
||||
};
|
||||
systemd.services.postgresql.serviceConfig.ExecStartPost = let
|
||||
sqlFile = pkgs.writeText "panoramax-postgis-setup.sql" ''
|
||||
CREATE EXTENSION IF NOT EXISTS postgis;
|
||||
|
||||
-- TODO: how can we ensure that this runs after the databases have been created
|
||||
-- ALTER DATABASE ${config.services.panoramax.database.name} SET TIMEZONE TO 'UTC';
|
||||
|
||||
GRANT SET ON PARAMETER session_replication_role TO ${config.services.panoramax.database.user};
|
||||
'';
|
||||
in [
|
||||
''
|
||||
${lib.getExe' config.services.postgresql.package "psql"} -d "${config.services.panoramax.database.user}" -f "${sqlFile}"
|
||||
''
|
||||
];
|
||||
})
|
||||
]);
|
||||
};
|
||||
}
|
||||
41
modules/nixos/programs/panoramax/proxy.nix
Normal file
41
modules/nixos/programs/panoramax/proxy.nix
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
{...}: {
|
||||
flake.nixosModules.panoramax-proxy = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.panoramax = {
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "domain that panoramax will be hosted at";
|
||||
default = "panoramax.arpa";
|
||||
};
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for panoramax";
|
||||
default = [];
|
||||
};
|
||||
reverseProxy = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.panoramax.enable && config.services.reverseProxy.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.panoramax.reverseProxy.enable {
|
||||
services.reverseProxy.services.panoramax = {
|
||||
target = "http://localhost:${toString config.services.panoramax.port}";
|
||||
domain = config.services.panoramax.domain;
|
||||
extraDomains = config.services.panoramax.extraDomains;
|
||||
|
||||
settings = {
|
||||
proxyWebsockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
maxBodySize = 100000;
|
||||
timeout = 300;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
21
modules/nixos/programs/panoramax/storage.nix
Normal file
21
modules/nixos/programs/panoramax/storage.nix
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
{...}: {
|
||||
flake.nixosModules.panoramax-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.panoramax.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.panoramax.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.panoramax.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."/var/lib/panoramax" = lib.mkIf config.services.panoramax.impermanence.enable {
|
||||
owner.name = "panoramax";
|
||||
group.name = "panoramax";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
32
modules/nixos/programs/paperless/database.nix
Normal file
32
modules/nixos/programs/paperless/database.nix
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
{...}: {
|
||||
flake.nixosModules.paperless-database = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf config.services.paperless.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = !config.services.paperless.database.createLocally || config.services.postgresql.enable;
|
||||
message = "PostgreSQL must be enabled when using local postgres database for Paperless";
|
||||
}
|
||||
{
|
||||
assertion = !config.services.paperless.database.createLocally || (builtins.any (db: db == "paperless") config.services.postgresql.ensureDatabases);
|
||||
message = "Paperless built-in database creation failed - expected 'paperless' in ensureDatabases but got: ${builtins.toString config.services.postgresql.ensureDatabases}";
|
||||
}
|
||||
{
|
||||
assertion = !config.services.paperless.database.createLocally || (builtins.any (user: user.name == "paperless") config.services.postgresql.ensureUsers);
|
||||
message = "Paperless built-in user creation failed - expected user 'paperless' in ensureUsers but got: ${builtins.toString (builtins.map (u: u.name) config.services.postgresql.ensureUsers)}";
|
||||
}
|
||||
];
|
||||
|
||||
services.paperless.database.createLocally = lib.mkDefault true;
|
||||
|
||||
systemd.services.paperless-scheduler = lib.mkIf config.services.paperless.database.createLocally {
|
||||
requires = [
|
||||
config.systemd.services.postgresql.name
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
13
modules/nixos/programs/paperless/default.nix
Normal file
13
modules/nixos/programs/paperless/default.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.paperless = {
|
||||
imports = [
|
||||
mod.paperless-service
|
||||
mod.paperless-database
|
||||
mod.paperless-proxy
|
||||
mod.paperless-fail2ban
|
||||
mod.paperless-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
36
modules/nixos/programs/paperless/fail2ban.nix
Normal file
36
modules/nixos/programs/paperless/fail2ban.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{...}: {
|
||||
flake.nixosModules.paperless-fail2ban = {
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf (config.services.paperless.enable && config.services.fail2ban.enable) {
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/paperless.local".text = (
|
||||
pkgs.lib.mkDefault (pkgs.lib.mkAfter ''
|
||||
[Definition]
|
||||
failregex = Login failed for user `.*` from (?:IP|private IP) `<HOST>`\.$
|
||||
ignoreregex =
|
||||
|
||||
'')
|
||||
);
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
jails = {
|
||||
paperless.settings = {
|
||||
enabled = true;
|
||||
filter = "paperless";
|
||||
action = ''iptables-multiport[name=HTTP, port="http,https"]'';
|
||||
logpath = "${config.services.paperless.dataDir}/log/*.log";
|
||||
backend = "auto";
|
||||
findtime = 600;
|
||||
bantime = 600;
|
||||
maxretry = 5;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
29
modules/nixos/programs/paperless/paperless.nix
Normal file
29
modules/nixos/programs/paperless/paperless.nix
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
{...}: {
|
||||
flake.nixosModules.paperless-service = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.services.paperless = {
|
||||
database = {
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what is the user and database that we are going to use for paperless";
|
||||
default = "paperless";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.paperless.enable {
|
||||
services.paperless = {
|
||||
configureTika = true;
|
||||
settings = {
|
||||
PAPERLESS_DBENGINE = "postgresql";
|
||||
PAPERLESS_DBHOST = "/run/postgresql";
|
||||
PAPERLESS_DBNAME = config.services.paperless.database.user;
|
||||
PAPERLESS_DBUSER = config.services.paperless.database.user;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
35
modules/nixos/programs/paperless/proxy.nix
Normal file
35
modules/nixos/programs/paperless/proxy.nix
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
{...}: {
|
||||
flake.nixosModules.paperless-proxy = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.services.paperless = {
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for paperless";
|
||||
default = [];
|
||||
};
|
||||
reverseProxy = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.paperless.enable && config.services.reverseProxy.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.paperless.reverseProxy.enable {
|
||||
services.reverseProxy.services.paperless = {
|
||||
target = "http://${config.services.paperless.address}:${toString config.services.paperless.port}";
|
||||
domain = config.services.paperless.domain;
|
||||
extraDomains = config.services.paperless.extraDomains;
|
||||
|
||||
settings = {
|
||||
proxyWebsockets.enable = true;
|
||||
forwardHeaders.enable = true;
|
||||
maxBodySize = 50000;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/paperless/storage.nix
Normal file
23
modules/nixos/programs/paperless/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.paperless-storage = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/paperless";
|
||||
in {
|
||||
options.services.paperless.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.paperless.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.paperless.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${dataDir}" = lib.mkIf config.services.paperless.impermanence.enable {
|
||||
owner.name = "paperless";
|
||||
group.name = "paperless";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/postgres/default.nix
Normal file
10
modules/nixos/programs/postgres/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.postgres = {
|
||||
imports = [
|
||||
mod.postgres-service
|
||||
mod.postgres-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
124
modules/nixos/programs/postgres/postgres.nix
Normal file
124
modules/nixos/programs/postgres/postgres.nix
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
{...}: {
|
||||
flake.nixosModules.postgres-service = {
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
enabledDatabases = lib.filterAttrs (_: db: db.enable) config.services.postgresql.databases;
|
||||
extraDatabasesList = config.services.postgresql.extraDatabases;
|
||||
|
||||
serviceDatabaseUsers = lib.mapAttrsToList (_: db: {
|
||||
name = db.user;
|
||||
ensureDBOwnership = true;
|
||||
}) (lib.filterAttrs (_: db: db.ensureUser) enabledDatabases);
|
||||
|
||||
extraDatabaseUsers =
|
||||
builtins.map (dbName: {
|
||||
name = dbName;
|
||||
ensureDBOwnership = true;
|
||||
})
|
||||
extraDatabasesList;
|
||||
|
||||
serviceDatabases = lib.mapAttrsToList (_: db: db.database) enabledDatabases;
|
||||
extraDatabaseNames = extraDatabasesList;
|
||||
|
||||
serviceUserMappings = lib.mapAttrsToList (_: db: "user_map ${db.user} ${db.user}") enabledDatabases;
|
||||
extraUserMappings = builtins.map (dbName: "user_map ${dbName} ${dbName}") extraDatabasesList;
|
||||
|
||||
builtinServiceMappings = let
|
||||
forgejoMapping = lib.optional (config.services.forgejo.enable && config.services.forgejo.database.type == "postgres") "user_map forgejo forgejo";
|
||||
immichMapping = lib.optional (config.services.immich.enable && config.services.immich.database.enable) "user_map immich immich";
|
||||
paperlessMapping = lib.optional (config.services.paperless.enable && config.services.paperless.database.createLocally) "user_map paperless paperless";
|
||||
in
|
||||
forgejoMapping ++ immichMapping ++ paperlessMapping;
|
||||
in {
|
||||
options = {
|
||||
services.postgresql = {
|
||||
databases = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule ({name, ...}: {
|
||||
options = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Whether to create this database and user";
|
||||
};
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
description = "Database user name";
|
||||
};
|
||||
database = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
description = "Database name";
|
||||
};
|
||||
ensureUser = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Whether to ensure the user exists";
|
||||
};
|
||||
};
|
||||
}));
|
||||
default = {};
|
||||
description = "Databases to create for services";
|
||||
};
|
||||
|
||||
extraDatabases = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
description = "Additional databases to create (user name will match database name)";
|
||||
example = ["custom_db" "test_db"];
|
||||
};
|
||||
|
||||
adminUsers = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
description = "System users who should have PostgreSQL superuser access";
|
||||
example = ["leyla" "admin"];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.postgresql.enable {
|
||||
services = {
|
||||
postgresql = {
|
||||
package = pkgs.postgresql_16;
|
||||
|
||||
ensureUsers =
|
||||
[
|
||||
{name = "postgres";}
|
||||
]
|
||||
++ serviceDatabaseUsers ++ extraDatabaseUsers;
|
||||
|
||||
ensureDatabases = serviceDatabases ++ extraDatabaseNames;
|
||||
|
||||
identMap =
|
||||
''
|
||||
# ArbitraryMapName systemUser DBUser
|
||||
|
||||
# Administration Users
|
||||
superuser_map root postgres
|
||||
superuser_map postgres postgres
|
||||
''
|
||||
+ (
|
||||
lib.strings.concatLines (builtins.map (user: "superuser_map ${user} postgres") config.services.postgresql.adminUsers)
|
||||
)
|
||||
+ ''
|
||||
|
||||
# Client Users
|
||||
''
|
||||
+ (
|
||||
lib.strings.concatLines (serviceUserMappings ++ extraUserMappings ++ builtinServiceMappings)
|
||||
);
|
||||
|
||||
authentication = pkgs.lib.mkOverride 10 ''
|
||||
# type database DBuser origin-address auth-method optional_ident_map
|
||||
local all postgres peer map=superuser_map
|
||||
local sameuser all peer map=user_map
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/postgres/storage.nix
Normal file
23
modules/nixos/programs/postgres/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.postgres-storage = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/postgresql/16";
|
||||
in {
|
||||
options.services.postgresql.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.postgresql.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.postgresql.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${dataDir}" = lib.mkIf config.services.postgresql.impermanence.enable {
|
||||
owner.name = "postgres";
|
||||
group.name = "postgres";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/qbittorent/default.nix
Normal file
10
modules/nixos/programs/qbittorent/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.qbittorent = {
|
||||
imports = [
|
||||
mod.qbittorent-service
|
||||
mod.qbittorent-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
20
modules/nixos/programs/qbittorent/qbittorent.nix
Normal file
20
modules/nixos/programs/qbittorent/qbittorent.nix
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
{...}: {
|
||||
flake.nixosModules.qbittorent-service = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.qbittorrent = {
|
||||
mediaDir = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = lib.mdDoc ''
|
||||
The directory to create to store qbittorrent media.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.qbittorrent.enable {
|
||||
# Main qbittorrent configuration goes here if needed
|
||||
};
|
||||
};
|
||||
}
|
||||
48
modules/nixos/programs/qbittorent/storage.nix
Normal file
48
modules/nixos/programs/qbittorent/storage.nix
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
{...}: {
|
||||
flake.nixosModules.qbittorent-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
qbittorent_profile_directory = "/var/lib/qBittorrent/";
|
||||
in {
|
||||
options.services.qbittorrent.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.qbittorrent.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.qbittorrent.enable {
|
||||
storage.datasets.replicate = {
|
||||
"system/root" = {
|
||||
directories."${qbittorent_profile_directory}" = lib.mkIf config.services.qbittorrent.impermanence.enable {
|
||||
owner.name = "qbittorrent";
|
||||
group.name = "qbittorrent";
|
||||
};
|
||||
};
|
||||
"system/media" = {
|
||||
mount = "/persist/replicate/system/media";
|
||||
|
||||
directories."${config.services.qbittorrent.mediaDir}" = lib.mkIf config.services.qbittorrent.impermanence.enable {
|
||||
owner.name = "qbittorrent";
|
||||
group.name = "qbittorrent";
|
||||
owner.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = true;
|
||||
};
|
||||
group.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = true;
|
||||
};
|
||||
other.permissions = {
|
||||
read = true;
|
||||
write = false;
|
||||
execute = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
9
modules/nixos/programs/radarr/default.nix
Normal file
9
modules/nixos/programs/radarr/default.nix
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.radarr = {
|
||||
imports = [
|
||||
mod.radarr-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/radarr/storage.nix
Normal file
23
modules/nixos/programs/radarr/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.radarr-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
radarr_data_directory = "/var/lib/radarr/.config/Radarr";
|
||||
in {
|
||||
options.services.radarr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.radarr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.radarr.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${radarr_data_directory}" = lib.mkIf config.services.radarr.impermanence.enable {
|
||||
owner.name = "radarr";
|
||||
group.name = "radarr";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/reverseProxy/default.nix
Normal file
10
modules/nixos/programs/reverseProxy/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.reverse-proxy = {
|
||||
imports = [
|
||||
mod.reverse-proxy-service
|
||||
mod.reverse-proxy-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
178
modules/nixos/programs/reverseProxy/reverseProxy.nix
Normal file
178
modules/nixos/programs/reverseProxy/reverseProxy.nix
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
{...}: {
|
||||
flake.nixosModules.reverse-proxy-service = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.reverseProxy = {
|
||||
enable = lib.mkEnableOption "turn on the reverse proxy";
|
||||
openFirewall = lib.mkEnableOption "open the firewall";
|
||||
refuseUnmatchedDomains = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = "refuse connections for domains that don't match any configured virtual hosts";
|
||||
default = true;
|
||||
};
|
||||
ports = {
|
||||
http = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
description = "HTTP port for the reverse proxy";
|
||||
default = 80;
|
||||
};
|
||||
https = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
description = "HTTPS port for the reverse proxy";
|
||||
default = 443;
|
||||
};
|
||||
};
|
||||
acme = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = "enable ACME certificate management";
|
||||
default = true;
|
||||
};
|
||||
email = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "email address for ACME certificate registration";
|
||||
};
|
||||
};
|
||||
services = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule ({name, ...}: {
|
||||
options = {
|
||||
target = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what url will all traffic to this application be forwarded to";
|
||||
};
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what is the default subdomain to be used for this application to be used for";
|
||||
default = name;
|
||||
};
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for this domain";
|
||||
default = [];
|
||||
};
|
||||
settings = {
|
||||
certificateRenewal.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = "auto renew certificates";
|
||||
default = true;
|
||||
};
|
||||
forceSSL.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = "auto renew certificates";
|
||||
default = true;
|
||||
};
|
||||
proxyHeaders = {
|
||||
enable = lib.mkEnableOption "should we proxy headers";
|
||||
timeout = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 60;
|
||||
};
|
||||
};
|
||||
proxyWebsockets.enable = lib.mkEnableOption "should the default config proxy websockets";
|
||||
forwardHeaders.enable = lib.mkEnableOption "should the default config contain forward headers";
|
||||
noSniff.enable = lib.mkEnableOption "should the no sniff flags be set";
|
||||
proxyBuffering.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
description = "should proxy buffering be enabled";
|
||||
default = true;
|
||||
};
|
||||
maxBodySize = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.int;
|
||||
description = "";
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
|
||||
config = let
|
||||
httpPort = config.services.reverseProxy.ports.http;
|
||||
httpsPort = config.services.reverseProxy.ports.https;
|
||||
in
|
||||
lib.mkIf config.services.reverseProxy.enable {
|
||||
security.acme = lib.mkIf config.services.reverseProxy.acme.enable {
|
||||
acceptTerms = true;
|
||||
defaults.email = config.services.reverseProxy.acme.email;
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts = lib.mkMerge (
|
||||
(lib.optionals config.services.reverseProxy.refuseUnmatchedDomains [
|
||||
{
|
||||
"_" = {
|
||||
default = true;
|
||||
serverName = "_";
|
||||
locations."/" = {
|
||||
extraConfig = ''
|
||||
return 444;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
])
|
||||
++ lib.lists.flatten (
|
||||
lib.attrsets.mapAttrsToList (
|
||||
name: service: let
|
||||
hostConfig = {
|
||||
forceSSL = service.settings.forceSSL.enable;
|
||||
enableACME = service.settings.certificateRenewal.enable;
|
||||
locations = {
|
||||
"/" = {
|
||||
proxyPass = service.target;
|
||||
proxyWebsockets = service.settings.proxyWebsockets.enable;
|
||||
recommendedProxySettings = service.settings.forwardHeaders.enable;
|
||||
extraConfig = let
|
||||
# Client upload size configuration
|
||||
maxBodySizeConfig =
|
||||
lib.optionalString (service.settings.maxBodySize != null)
|
||||
"client_max_body_size ${toString service.settings.maxBodySize}M;";
|
||||
|
||||
# Security header configuration
|
||||
noSniffConfig =
|
||||
lib.optionalString service.settings.noSniff.enable
|
||||
"add_header X-Content-Type-Options nosniff;";
|
||||
|
||||
# Proxy buffering configuration
|
||||
proxyBufferingConfig =
|
||||
lib.optionalString (!service.settings.proxyBuffering.enable)
|
||||
"proxy_buffering off;";
|
||||
|
||||
# Proxy timeout configuration
|
||||
proxyTimeoutConfig =
|
||||
lib.optionalString service.settings.proxyHeaders.enable
|
||||
''
|
||||
proxy_read_timeout ${toString service.settings.proxyHeaders.timeout}s;
|
||||
proxy_connect_timeout ${toString service.settings.proxyHeaders.timeout}s;
|
||||
proxy_send_timeout ${toString service.settings.proxyHeaders.timeout}s;
|
||||
'';
|
||||
in
|
||||
maxBodySizeConfig + noSniffConfig + proxyBufferingConfig + proxyTimeoutConfig;
|
||||
};
|
||||
};
|
||||
};
|
||||
in (
|
||||
[
|
||||
{
|
||||
${service.domain} = hostConfig;
|
||||
}
|
||||
]
|
||||
++ builtins.map (domain: {${domain} = hostConfig;})
|
||||
service.extraDomains
|
||||
)
|
||||
)
|
||||
config.services.reverseProxy.services
|
||||
)
|
||||
);
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = lib.mkIf config.services.reverseProxy.openFirewall [
|
||||
httpPort
|
||||
httpsPort
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/reverseProxy/storage.nix
Normal file
23
modules/nixos/programs/reverseProxy/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.reverse-proxy-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/acme";
|
||||
in {
|
||||
options.services.reverseProxy.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.reverseProxy.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.reverseProxy.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${dataDir}" = lib.mkIf config.services.reverseProxy.impermanence.enable {
|
||||
owner.name = "acme";
|
||||
group.name = "acme";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/searx/default.nix
Normal file
10
modules/nixos/programs/searx/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.searx = {
|
||||
imports = [
|
||||
mod.searx-service
|
||||
mod.searx-proxy
|
||||
];
|
||||
};
|
||||
}
|
||||
33
modules/nixos/programs/searx/proxy.nix
Normal file
33
modules/nixos/programs/searx/proxy.nix
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
{...}: {
|
||||
flake.nixosModules.searx-proxy = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.services.searx = {
|
||||
extraDomains = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "extra domains that should be configured for searx";
|
||||
default = [];
|
||||
};
|
||||
reverseProxy = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.searx.enable && config.services.reverseProxy.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.searx.reverseProxy.enable {
|
||||
services.reverseProxy.services.searx = {
|
||||
target = "http://localhost:${toString config.services.searx.settings.server.port}";
|
||||
domain = config.services.searx.domain;
|
||||
extraDomains = config.services.searx.extraDomains;
|
||||
|
||||
settings = {
|
||||
forwardHeaders.enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
61
modules/nixos/programs/searx/searx.nix
Normal file
61
modules/nixos/programs/searx/searx.nix
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
{...}: {
|
||||
flake.nixosModules.searx-service = {
|
||||
config,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}: {
|
||||
config = lib.mkIf config.services.searx.enable {
|
||||
sops.secrets = {
|
||||
"services/searx" = {
|
||||
sopsFile = "${inputs.secrets}/defiant-services.yaml";
|
||||
};
|
||||
};
|
||||
|
||||
services.searx = {
|
||||
environmentFile = config.sops.secrets."services/searx".path;
|
||||
|
||||
# Rate limiting
|
||||
limiterSettings = {
|
||||
real_ip = {
|
||||
x_for = 1;
|
||||
ipv4_prefix = 32;
|
||||
ipv6_prefix = 56;
|
||||
};
|
||||
|
||||
botdetection = {
|
||||
ip_limit = {
|
||||
filter_link_local = true;
|
||||
link_token = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
port = 8083;
|
||||
secret_key = "@SEARXNG_SECRET@";
|
||||
};
|
||||
|
||||
# Search engine settings
|
||||
search = {
|
||||
safe_search = 2;
|
||||
autocomplete_min = 2;
|
||||
autocomplete = "duckduckgo";
|
||||
};
|
||||
|
||||
# Enabled plugins
|
||||
enabled_plugins = [
|
||||
"Basic Calculator"
|
||||
"Hash plugin"
|
||||
"Tor check plugin"
|
||||
"Open Access DOI rewrite"
|
||||
"Hostnames plugin"
|
||||
"Unit converter plugin"
|
||||
"Tracker URL remover"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
9
modules/nixos/programs/sonarr/default.nix
Normal file
9
modules/nixos/programs/sonarr/default.nix
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.sonarr = {
|
||||
imports = [
|
||||
mod.sonarr-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
23
modules/nixos/programs/sonarr/storage.nix
Normal file
23
modules/nixos/programs/sonarr/storage.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{...}: {
|
||||
flake.nixosModules.sonarr-storage = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
sonarr_data_directory = "/var/lib/sonarr/.config/NzbDrone";
|
||||
in {
|
||||
options.services.sonarr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.sonarr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.sonarr.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${sonarr_data_directory}" = lib.mkIf config.services.sonarr.impermanence.enable {
|
||||
owner.name = "sonarr";
|
||||
group.name = "sonarr";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
11
modules/nixos/programs/steam.nix
Normal file
11
modules/nixos/programs/steam.nix
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
{...}: {
|
||||
flake.nixosModules.steam = {...}: {
|
||||
programs = {
|
||||
steam = {
|
||||
remotePlay.openFirewall = true; # Open ports in the firewall for Steam Remote Play
|
||||
dedicatedServer.openFirewall = true; # Open ports in the firewall for Source Dedicated Server
|
||||
localNetworkGameTransfers.openFirewall = true; # Open ports in the firewall for Steam Local Network Game Transfers
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/sync/default.nix
Normal file
10
modules/nixos/programs/sync/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.sync = {
|
||||
imports = [
|
||||
mod.sync-service
|
||||
mod.sync-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
34
modules/nixos/programs/sync/storage.nix
Normal file
34
modules/nixos/programs/sync/storage.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{...}: {
|
||||
flake.nixosModules.sync-storage = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
mountDir = "/mnt/sync";
|
||||
configDir = "/etc/syncthing";
|
||||
in {
|
||||
options = {
|
||||
services.syncthing.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.syncthing.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.syncthing.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories = {
|
||||
"${mountDir}" = lib.mkIf config.services.syncthing.impermanence.enable {
|
||||
enable = true;
|
||||
owner.name = "syncthing";
|
||||
group.name = "syncthing";
|
||||
};
|
||||
"${configDir}" = lib.mkIf config.services.syncthing.impermanence.enable {
|
||||
enable = true;
|
||||
owner.name = "syncthing";
|
||||
group.name = "syncthing";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
38
modules/nixos/programs/sync/sync.nix
Normal file
38
modules/nixos/programs/sync/sync.nix
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
{...}: {
|
||||
flake.nixosModules.sync-service = {
|
||||
config,
|
||||
lib,
|
||||
syncthingConfiguration,
|
||||
...
|
||||
}: let
|
||||
mountDir = "/mnt/sync";
|
||||
configDir = "/etc/syncthing";
|
||||
in {
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
systemd = lib.mkIf config.services.syncthing.enable {
|
||||
tmpfiles.rules = [
|
||||
"A ${mountDir} - - - - u:syncthing:rwX,g:syncthing:rwX,o::-"
|
||||
"d ${mountDir} 2755 syncthing syncthing -"
|
||||
"d ${config.services.syncthing.dataDir} 775 syncthing syncthing -"
|
||||
"d ${config.services.syncthing.configDir} 755 syncthing syncthing -"
|
||||
];
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.services.syncthing.enable (lib.mkMerge [
|
||||
{
|
||||
services.syncthing = {
|
||||
user = "syncthing";
|
||||
group = "syncthing";
|
||||
dataDir = "${mountDir}/default";
|
||||
configDir = configDir;
|
||||
overrideDevices = true;
|
||||
overrideFolders = true;
|
||||
configuration = syncthingConfiguration;
|
||||
deviceName = config.networking.hostName;
|
||||
};
|
||||
}
|
||||
]))
|
||||
];
|
||||
};
|
||||
}
|
||||
10
modules/nixos/programs/tailscale/default.nix
Normal file
10
modules/nixos/programs/tailscale/default.nix
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
flake.nixosModules.tailscale = {
|
||||
imports = [
|
||||
mod.tailscale-service
|
||||
mod.tailscale-storage
|
||||
];
|
||||
};
|
||||
}
|
||||
26
modules/nixos/programs/tailscale/storage.nix
Normal file
26
modules/nixos/programs/tailscale/storage.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
{...}: {
|
||||
flake.nixosModules.tailscale-storage = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
tailscale_data_directory = "/var/lib/tailscale";
|
||||
in {
|
||||
options = {
|
||||
services.tailscale.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.tailscale.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.tailscale.enable {
|
||||
storage.datasets.replicate."system/root" = {
|
||||
directories."${tailscale_data_directory}" = lib.mkIf config.services.tailscale.impermanence.enable {
|
||||
enable = true;
|
||||
owner.name = "root";
|
||||
group.name = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
21
modules/nixos/programs/tailscale/tailscale.nix
Normal file
21
modules/nixos/programs/tailscale/tailscale.nix
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
{...}: {
|
||||
flake.nixosModules.tailscale-service = {
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
host.tailscale = {
|
||||
enable = lib.mkEnableOption "should tailscale be enabled on this computer";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.tailscale.enable (
|
||||
lib.mkMerge [
|
||||
{
|
||||
# any configs we want shared between all machines
|
||||
}
|
||||
]
|
||||
);
|
||||
};
|
||||
}
|
||||
65
modules/nixos/programs/wyoming.nix
Normal file
65
modules/nixos/programs/wyoming.nix
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
{...}: {
|
||||
flake.nixosModules.wyoming = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.wyoming.enable = lib.mkEnableOption "should wyoming be enabled on this device";
|
||||
config = lib.mkIf config.services.wyoming.enable (lib.mkMerge [
|
||||
{
|
||||
services.wyoming = {
|
||||
# Text to speech
|
||||
piper = {
|
||||
servers = {
|
||||
"en" = {
|
||||
enable = true;
|
||||
# see https://github.com/rhasspy/rhasspy3/blob/master/programs/tts/piper/script/download.py
|
||||
voice = "en-us-amy-low";
|
||||
uri = "tcp://0.0.0.0:10200";
|
||||
speaker = 0;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Speech to text
|
||||
faster-whisper = {
|
||||
servers = {
|
||||
"en" = {
|
||||
enable = true;
|
||||
# see https://github.com/rhasspy/rhasspy3/blob/master/programs/asr/faster-whisper/script/download.py
|
||||
model = "tiny-int8";
|
||||
language = "en";
|
||||
uri = "tcp://0.0.0.0:10300";
|
||||
device = "cpu";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
openwakeword = {
|
||||
enable = true;
|
||||
uri = "tcp://0.0.0.0:10400";
|
||||
# preloadModels = [
|
||||
# "ok_nabu"
|
||||
# ];
|
||||
# TODO: custom models
|
||||
};
|
||||
};
|
||||
|
||||
# needs access to /proc/cpuinfo
|
||||
systemd.services."wyoming-faster-whisper-en".serviceConfig.ProcSubset = lib.mkForce "all";
|
||||
}
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
environment.persistence."/persist/replicate/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = "/var/lib/private/wyoming";
|
||||
mode = "0700";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
]);
|
||||
};
|
||||
}
|
||||
46
modules/nixos/ssh.nix
Normal file
46
modules/nixos/ssh.nix
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
{...}: {
|
||||
flake.nixosModules.nixos-ssh = {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
services.openssh.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.openssh.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
services = {
|
||||
openssh = {
|
||||
enable = true;
|
||||
ports = [22];
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
UseDns = true;
|
||||
X11Forwarding = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
storage.datasets.replicate."system/root" = {
|
||||
files = lib.mkIf config.services.openssh.impermanence.enable (builtins.listToAttrs (
|
||||
lib.lists.flatten (
|
||||
builtins.map (hostKey: [
|
||||
{
|
||||
name = hostKey.path;
|
||||
value = {enable = true;};
|
||||
}
|
||||
{
|
||||
name = "${hostKey.path}.pub";
|
||||
value = {enable = true;};
|
||||
}
|
||||
])
|
||||
config.services.openssh.hostKeys
|
||||
)
|
||||
));
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
90
modules/nixos/storage/dataset.nix
Normal file
90
modules/nixos/storage/dataset.nix
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
{...}: let
|
||||
submodule = {lib, ...}: {name, ...}: {
|
||||
options = {
|
||||
type = lib.mkOption {
|
||||
type = lib.types.enum ["zfs_fs" "zfs_volume"];
|
||||
default = "zfs_fs";
|
||||
description = "Type of ZFS dataset (filesystem or volume)";
|
||||
};
|
||||
|
||||
acltype = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.enum ["off" "nfsv4" "posixacl"]);
|
||||
default = null;
|
||||
description = "Access control list type";
|
||||
};
|
||||
|
||||
relatime = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.enum ["on" "off"]);
|
||||
default = null;
|
||||
description = "Controls when access time is updated";
|
||||
};
|
||||
|
||||
atime = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.enum ["on" "off"]);
|
||||
default = null;
|
||||
description = "Controls whether access time is updated";
|
||||
};
|
||||
|
||||
xattr = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.enum ["on" "off" "sa" "dir"]);
|
||||
default = null;
|
||||
description = "Extended attribute storage method";
|
||||
};
|
||||
|
||||
compression = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.enum ["on" "off" "lz4" "gzip" "zstd" "lzjb" "zle"]);
|
||||
default = null;
|
||||
description = "Compression algorithm to use";
|
||||
};
|
||||
|
||||
sync = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.enum ["standard" "always" "disabled"]);
|
||||
default = null;
|
||||
description = "Synchronous write behavior";
|
||||
};
|
||||
|
||||
mount = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
description = "Controls the mount point used for this file system";
|
||||
default = null;
|
||||
};
|
||||
|
||||
encryption = {
|
||||
enable = lib.mkEnableOption "should encryption be enabled";
|
||||
type = lib.mkOption {
|
||||
type = lib.types.enum ["aes-128-ccm" "aes-192-ccm" "aes-256-ccm" "aes-128-gcm" "aes-192-gcm" "aes-256-gcm"];
|
||||
description = "What encryption type to use";
|
||||
};
|
||||
keyformat = lib.mkOption {
|
||||
type = lib.types.enum ["raw" "hex" "passphrase"];
|
||||
description = "Format of the encryption key";
|
||||
};
|
||||
keylocation = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Location of the encryption key";
|
||||
};
|
||||
};
|
||||
|
||||
snapshot = {
|
||||
# This option should set this option flag
|
||||
autoSnapshot = lib.mkEnableOption "Enable automatic snapshots for this dataset";
|
||||
# Creates a blank snapshot in the post create hook for rollback purposes
|
||||
blankSnapshot = lib.mkEnableOption "Should a blank snapshot be auto created in the post create hook";
|
||||
};
|
||||
|
||||
recordSize = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Suggested block size for files in the file system";
|
||||
};
|
||||
|
||||
postCreateHook = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Script to run after dataset creation";
|
||||
};
|
||||
};
|
||||
};
|
||||
in {
|
||||
flake.commonModules.storage-dataset-submodule = submodule;
|
||||
}
|
||||
17
modules/nixos/storage/default.nix
Normal file
17
modules/nixos/storage/default.nix
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
{config, ...}: let
|
||||
mod = config.flake.nixosModules;
|
||||
in {
|
||||
# TODO: we should have an impermanence module for home manager that proxies its values namespaced to the user down here that matches the same interface
|
||||
|
||||
# TODO: we should have a way of enabling impermanence for a systemd config
|
||||
# these should have an option to put their folder into their own dataset (this needs to support private vs non private)
|
||||
# options for features that can be added to the dataset
|
||||
|
||||
flake.nixosModules.storage = {...}: {
|
||||
imports = [
|
||||
mod.storage-impermanence
|
||||
mod.storage-zfs
|
||||
mod.storage-config
|
||||
];
|
||||
};
|
||||
}
|
||||
60
modules/nixos/storage/impermanence-dataset.nix
Normal file
60
modules/nixos/storage/impermanence-dataset.nix
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
{config, ...}: let
|
||||
datasetSubmodule = config.flake.commonModules.storage-dataset-submodule;
|
||||
submodule = args @ {lib, ...}: {name, ...}: let
|
||||
pathPermissions = {
|
||||
read = lib.mkEnableOption "should the path have read permissions";
|
||||
write = lib.mkEnableOption "should the path have read permissions";
|
||||
execute = lib.mkEnableOption "should the path have read permissions";
|
||||
};
|
||||
pathTypeSubmodule = {name, ...}: {
|
||||
options = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
};
|
||||
owner = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "root";
|
||||
};
|
||||
permissions = pathPermissions;
|
||||
};
|
||||
group = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "root";
|
||||
};
|
||||
permissions = pathPermissions;
|
||||
};
|
||||
other = {
|
||||
permissions = pathPermissions;
|
||||
};
|
||||
};
|
||||
};
|
||||
in {
|
||||
imports = [
|
||||
(datasetSubmodule args)
|
||||
];
|
||||
|
||||
options = {
|
||||
files = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule pathTypeSubmodule);
|
||||
default = {};
|
||||
};
|
||||
directories = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule pathTypeSubmodule);
|
||||
default = {};
|
||||
};
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
mount = lib.mkDefault "/${name}";
|
||||
};
|
||||
};
|
||||
in {
|
||||
flake.commonModules.storage-impermanence-dataset-submodule = submodule;
|
||||
}
|
||||
147
modules/nixos/storage/impermanence.nix
Normal file
147
modules/nixos/storage/impermanence.nix
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
{config, ...}: let
|
||||
datasetSubmodule = config.flake.commonModules.storage-dataset-submodule;
|
||||
impermanenceDatasetSubmodule = config.flake.commonModules.storage-impermanence-dataset-submodule;
|
||||
in {
|
||||
flake.nixosModules.storage-impermanence = args @ {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
datasetSub = datasetSubmodule args;
|
||||
impermanenceDatasetSub = impermanenceDatasetSubmodule args;
|
||||
|
||||
permissionsToMode = permissions: let
|
||||
permSetToDigit = permSet:
|
||||
(
|
||||
if permSet.read
|
||||
then 4
|
||||
else 0
|
||||
)
|
||||
+ (
|
||||
if permSet.write
|
||||
then 2
|
||||
else 0
|
||||
)
|
||||
+ (
|
||||
if permSet.execute
|
||||
then 1
|
||||
else 0
|
||||
);
|
||||
|
||||
ownerDigit = permSetToDigit permissions.owner.permissions;
|
||||
groupDigit = permSetToDigit permissions.group.permissions;
|
||||
otherDigit = permSetToDigit permissions.other.permissions;
|
||||
in
|
||||
toString ownerDigit + toString groupDigit + toString otherDigit;
|
||||
|
||||
# Get the option names from both submodules to automatically determine which are impermanence-specific
|
||||
regularDatasetEval = lib.evalModules {
|
||||
modules = [datasetSub];
|
||||
specialArgs = args;
|
||||
};
|
||||
impermanenceDatasetEval = lib.evalModules {
|
||||
modules = [impermanenceDatasetSub];
|
||||
specialArgs = args;
|
||||
};
|
||||
|
||||
regularDatasetOptions = builtins.attrNames regularDatasetEval.options;
|
||||
impermanenceDatasetOptions = builtins.attrNames impermanenceDatasetEval.options;
|
||||
|
||||
# Find options that are only in impermanence datasets (not in regular ZFS datasets)
|
||||
impermanenceOnlyOptions = lib.lists.subtractLists regularDatasetOptions impermanenceDatasetOptions;
|
||||
in {
|
||||
options.storage = {
|
||||
impermanence = {
|
||||
enable = lib.mkEnableOption "should impermanence be enabled for this system";
|
||||
|
||||
datasets = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule impermanenceDatasetSub);
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.storage.impermanence.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.storage.zfs.enable;
|
||||
message = "storage.impermanence can not be used without storage.zfs.";
|
||||
}
|
||||
];
|
||||
|
||||
system.activationScripts = {
|
||||
# fixes issues with /var/lib/private not having the correct permissions https://github.com/nix-community/impermanence/issues/254
|
||||
"createPersistentStorageDirs".deps = ["var-lib-private-permissions" "users" "groups"];
|
||||
|
||||
"var-lib-private-permissions" = lib.mkIf config.storage.generateBase {
|
||||
deps = ["specialfs"];
|
||||
text = ''
|
||||
mkdir -p /persist/replicate/system/root/var/lib/private
|
||||
chmod 0700 /persist/replicate/system/root/var/lib/private
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
programs.fuse.userAllowOther = true;
|
||||
|
||||
# Suppress sudo lecture on every boot since impermanence wipes the lecture status file
|
||||
security.sudo.extraConfig = "Defaults lecture=never";
|
||||
|
||||
fileSystems =
|
||||
lib.mapAttrs' (
|
||||
datasetName: dataset:
|
||||
lib.nameValuePair "/${datasetName}" {
|
||||
device = "rpool/${datasetName}";
|
||||
fsType = "zfs";
|
||||
neededForBoot = true;
|
||||
}
|
||||
)
|
||||
(lib.filterAttrs (
|
||||
datasetName: dataset: dataset.impermanence.enable
|
||||
)
|
||||
config.storage.impermanence.datasets);
|
||||
|
||||
environment.persistence =
|
||||
lib.mapAttrs (datasetName: dataset: {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
persistentStoragePath = "/${datasetName}";
|
||||
directories = lib.mapAttrsToList (path: dirConfig: {
|
||||
directory = path;
|
||||
user = dirConfig.owner.name;
|
||||
group = dirConfig.group.name;
|
||||
mode = permissionsToMode dirConfig;
|
||||
}) (lib.filterAttrs (_: dirConfig: dirConfig.enable) dataset.directories);
|
||||
files = lib.mapAttrsToList (path: fileConfig: {
|
||||
file = path;
|
||||
parentDirectory = {
|
||||
user = fileConfig.owner.name;
|
||||
group = fileConfig.group.name;
|
||||
mode = permissionsToMode fileConfig;
|
||||
};
|
||||
}) (lib.filterAttrs (_: fileConfig: fileConfig.enable) dataset.files);
|
||||
})
|
||||
(lib.filterAttrs (
|
||||
datasetName: dataset: let
|
||||
enabledDirectories = lib.filterAttrs (_: dirConfig: dirConfig.enable) dataset.directories;
|
||||
enabledFiles = lib.filterAttrs (_: fileConfig: fileConfig.enable) dataset.files;
|
||||
in
|
||||
(enabledDirectories != {}) || (enabledFiles != {})
|
||||
)
|
||||
(lib.filterAttrs (
|
||||
datasetName: dataset: dataset.impermanence.enable
|
||||
)
|
||||
config.storage.impermanence.datasets));
|
||||
}
|
||||
(lib.mkIf config.storage.zfs.enable {
|
||||
storage.zfs.datasets =
|
||||
lib.mapAttrs (
|
||||
datasetName: dataset:
|
||||
builtins.removeAttrs dataset impermanenceOnlyOptions
|
||||
)
|
||||
config.storage.impermanence.datasets;
|
||||
})
|
||||
]);
|
||||
};
|
||||
}
|
||||
221
modules/nixos/storage/storage.nix
Normal file
221
modules/nixos/storage/storage.nix
Normal file
|
|
@ -0,0 +1,221 @@
|
|||
{config, ...}: let
|
||||
datasetSubmodule = config.flake.commonModules.storage-dataset-submodule;
|
||||
impermanenceDatasetSubmodule = config.flake.commonModules.storage-impermanence-dataset-submodule;
|
||||
in {
|
||||
flake.nixosModules.storage-config = args @ {
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
datasetSub = datasetSubmodule args;
|
||||
impermanenceDatasetSub = impermanenceDatasetSubmodule args;
|
||||
|
||||
# Get the option names from both submodules to automatically determine which are impermanence-specific
|
||||
regularDatasetEval = lib.evalModules {
|
||||
modules = [datasetSub];
|
||||
specialArgs = args;
|
||||
};
|
||||
impermanenceDatasetEval = lib.evalModules {
|
||||
modules = [impermanenceDatasetSub];
|
||||
specialArgs = args;
|
||||
};
|
||||
|
||||
regularDatasetOptions = builtins.attrNames regularDatasetEval.options;
|
||||
impermanenceDatasetOptions = builtins.attrNames impermanenceDatasetEval.options;
|
||||
|
||||
# Find options that are only in impermanence datasets (not in regular ZFS datasets)
|
||||
impermanenceOnlyOptions = lib.lists.subtractLists regularDatasetOptions impermanenceDatasetOptions;
|
||||
in {
|
||||
options.storage = {
|
||||
generateBase = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
When enabled, enables automatic generation of base datasets (ephemeral, local, replicate roots).
|
||||
This allows manual definition of datasets matching an existing system layout for migration purposes.
|
||||
'';
|
||||
};
|
||||
datasets = {
|
||||
ephemeral = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule datasetSub);
|
||||
default = {};
|
||||
};
|
||||
local = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule impermanenceDatasetSub);
|
||||
default = {};
|
||||
};
|
||||
replicate = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule impermanenceDatasetSub);
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf (config.storage.zfs.enable && config.storage.generateBase) {
|
||||
# Create ZFS datasets based on storage.datasets configuration
|
||||
storage.datasets = {
|
||||
local = {
|
||||
"nix" = {
|
||||
impermanence.enable = false;
|
||||
type = "zfs_fs";
|
||||
mount = "/nix";
|
||||
snapshot = {
|
||||
autoSnapshot = false;
|
||||
};
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf (config.storage.zfs.enable && config.storage.impermanence.enable && config.storage.generateBase) {
|
||||
storage.datasets = {
|
||||
ephemeral = {
|
||||
"" = {
|
||||
type = "zfs_fs";
|
||||
mount = null;
|
||||
};
|
||||
"system/root" = {
|
||||
type = "zfs_fs";
|
||||
mount = "/";
|
||||
snapshot = {
|
||||
blankSnapshot = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
# TODO: can we auto set the mount points on these to just be `"/persist/local/${name}"`
|
||||
local = {
|
||||
"" = {
|
||||
mount = "/persist/local";
|
||||
};
|
||||
};
|
||||
# TODO: can we auto set the mount points on these to just be `"/persist/replicate/${name}"`
|
||||
replicate = {
|
||||
"" = {
|
||||
mount = "/persist/replicate";
|
||||
};
|
||||
"system/root" = {
|
||||
mount = "/persist/replicate/system/root";
|
||||
snapshot = {
|
||||
autoSnapshot = true;
|
||||
};
|
||||
directories = {
|
||||
"/var/lib/nixos".enable = true;
|
||||
"/var/lib/systemd/coredump".enable = true;
|
||||
};
|
||||
files = {
|
||||
"/etc/machine-id".enable = true;
|
||||
};
|
||||
};
|
||||
"home" = {
|
||||
mount = "/persist/replicate/home";
|
||||
snapshot = {
|
||||
autoSnapshot = true;
|
||||
};
|
||||
};
|
||||
"system/var/log" = {
|
||||
type = "zfs_fs";
|
||||
directories = {
|
||||
"/var/log".enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
storage.zfs.datasets = lib.mkMerge [
|
||||
(lib.mapAttrs' (name: dataset: {
|
||||
name =
|
||||
if name == ""
|
||||
then "ephemeral"
|
||||
else "ephemeral/${name}";
|
||||
value = dataset;
|
||||
})
|
||||
config.storage.datasets.ephemeral)
|
||||
];
|
||||
|
||||
boot.initrd.postResumeCommands = lib.mkAfter ''
|
||||
zfs rollback -r rpool/ephemeral/system/root@blank
|
||||
'';
|
||||
|
||||
storage.impermanence.datasets = lib.mkMerge [
|
||||
(lib.mapAttrs' (name: dataset: {
|
||||
name =
|
||||
if name == ""
|
||||
then "persist/local"
|
||||
else "persist/local/${name}";
|
||||
value = dataset;
|
||||
})
|
||||
config.storage.datasets.local)
|
||||
(lib.mapAttrs' (name: dataset: {
|
||||
name =
|
||||
if name == ""
|
||||
then "persist/replicate"
|
||||
else "persist/replicate/${name}";
|
||||
value = dataset;
|
||||
})
|
||||
config.storage.datasets.replicate)
|
||||
];
|
||||
})
|
||||
(lib.mkIf (config.storage.zfs.enable && !config.storage.impermanence.enable && config.storage.generateBase) {
|
||||
storage.datasets = {
|
||||
# Base organizational datasets (only needed when impermanence is disabled)
|
||||
local = {
|
||||
"" = {
|
||||
type = "zfs_fs";
|
||||
mount = null;
|
||||
};
|
||||
"root" = {
|
||||
type = "zfs_fs";
|
||||
mount = "/";
|
||||
compression = "lz4";
|
||||
acltype = "posixacl";
|
||||
relatime = "on";
|
||||
xattr = "sa";
|
||||
snapshot = {
|
||||
autoSnapshot = true;
|
||||
blankSnapshot = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
replicate = {
|
||||
"" = {
|
||||
type = "zfs_fs";
|
||||
mount = null;
|
||||
};
|
||||
"system/var/log" = {
|
||||
type = "zfs_fs";
|
||||
mount = "/var/log";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
storage.zfs.datasets = lib.mkMerge [
|
||||
(lib.mapAttrs' (name: dataset: {
|
||||
name =
|
||||
if name == ""
|
||||
then "persist/local"
|
||||
else "persist/local/${name}";
|
||||
value = builtins.removeAttrs dataset impermanenceOnlyOptions;
|
||||
})
|
||||
config.storage.datasets.local)
|
||||
(lib.mapAttrs' (name: dataset: {
|
||||
name =
|
||||
if name == ""
|
||||
then "persist/replicate"
|
||||
else "persist/replicate/${name}";
|
||||
value = builtins.removeAttrs dataset impermanenceOnlyOptions;
|
||||
})
|
||||
config.storage.datasets.replicate)
|
||||
];
|
||||
})
|
||||
];
|
||||
|
||||
# TODO: set up datasets for systemd services that want a dataset created
|
||||
# TODO: home-manager.users.<user>.storage.impermanence.enable
|
||||
# is false then persist the entire directory of the user
|
||||
# if true persist home-manager.users.<user>.storage.impermanence.datasets
|
||||
# TODO: systemd.services.<name>.storage.datasets persists
|
||||
# TODO: configure other needed storage modes here
|
||||
};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue