Compare commits
10 commits
730eeef242
...
318a0a9748
| Author | SHA1 | Date | |
|---|---|---|---|
| 318a0a9748 | |||
| 4d7d11e0c8 | |||
| 4da5d65d8f | |||
| 1310b50794 | |||
| 5acf060e9e | |||
| 703530ddfe | |||
| ab555f50ff | |||
| d283f88160 | |||
| b67be1472a | |||
| 3ca0e9bf0a |
77 changed files with 1313 additions and 1271 deletions
|
|
@ -100,12 +100,12 @@
|
|||
]
|
||||
];
|
||||
# We are having to boot off of the nvm cache drive because I cant figure out how to boot via the HBA
|
||||
cache = {
|
||||
cache0 = {
|
||||
cache = [
|
||||
{
|
||||
device = "nvme-Samsung_SSD_990_PRO_4TB_S7KGNU0X907881F";
|
||||
boot = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
impermanence = {
|
||||
|
|
|
|||
|
|
@ -59,12 +59,22 @@
|
|||
hardware = {
|
||||
piperMouse.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
storage = {
|
||||
storage = {
|
||||
zfs = {
|
||||
enable = true;
|
||||
pool = {
|
||||
mode = "";
|
||||
drives = ["wwn-0x5000039fd0cf05eb"];
|
||||
mode = "stripe";
|
||||
vdevs = [
|
||||
[
|
||||
{
|
||||
device = "wwn-0x5000039fd0cf05eb";
|
||||
boot = true;
|
||||
}
|
||||
]
|
||||
];
|
||||
cache = [];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -8,12 +8,10 @@
|
|||
./desktop.nix
|
||||
./ssh.nix
|
||||
./i18n.nix
|
||||
./sync.nix
|
||||
./impermanence.nix
|
||||
./disko.nix
|
||||
./ollama.nix
|
||||
./sync
|
||||
./ollama
|
||||
./ai.nix
|
||||
./tailscale.nix
|
||||
./tailscale
|
||||
./steam.nix
|
||||
./server
|
||||
./storage
|
||||
|
|
|
|||
|
|
@ -1,267 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
inputs,
|
||||
...
|
||||
}: let
|
||||
# there currently is a bug with disko that causes long disk names to be generated improperly this hash function should alleviate it when used for disk names instead of what we are defaulting to
|
||||
# max gpt length is 36 and disk adds formats it like disk-xxxx-zfs which means we need to be 9 characters under that
|
||||
hashDisk = drive: (builtins.substring 0 27 (builtins.hashString "sha256" drive));
|
||||
|
||||
vdevs =
|
||||
builtins.map (
|
||||
disks:
|
||||
builtins.map (disk: lib.attrsets.nameValuePair (hashDisk disk) disk) disks
|
||||
)
|
||||
config.host.storage.pool.vdevs;
|
||||
cache =
|
||||
builtins.map (
|
||||
disk: lib.attrsets.nameValuePair (hashDisk disk) disk
|
||||
)
|
||||
config.host.storage.pool.cache;
|
||||
|
||||
datasets = config.host.storage.pool.datasets // config.host.storage.pool.extraDatasets;
|
||||
in {
|
||||
options.host.storage = {
|
||||
enable = lib.mkEnableOption "are we going create zfs disks with disko on this device";
|
||||
encryption = lib.mkEnableOption "is the vdev going to be encrypted";
|
||||
notifications = {
|
||||
enable = lib.mkEnableOption "are notifications enabled";
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what is the host that we are going to send the email to";
|
||||
};
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
description = "what port is the host using to receive mail on";
|
||||
};
|
||||
to = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what account is the email going to be sent to";
|
||||
};
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "what user is the email going to be set from";
|
||||
};
|
||||
tokenFile = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "file containing the password to be used by msmtp for notifications";
|
||||
};
|
||||
};
|
||||
pool = {
|
||||
mode = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "raidz2";
|
||||
description = "what level of redundancy should this pool have";
|
||||
};
|
||||
# list of drives in pool that will have a boot partition put onto them
|
||||
bootDrives = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "list of disks that are going to have a boot partition installed on them";
|
||||
default = lib.lists.flatten config.host.storage.pool.vdevs;
|
||||
};
|
||||
# shorthand for vdevs if you only have 1 vdev
|
||||
drives = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "list of drives that are going to be in the vdev";
|
||||
default = [];
|
||||
};
|
||||
# list of all drives in each vdev
|
||||
vdevs = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.listOf lib.types.str);
|
||||
description = "list of disks that are going to be in";
|
||||
default = [config.host.storage.pool.drives];
|
||||
};
|
||||
# list of cache drives for pool
|
||||
cache = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "list of drives that are going to be used as cache";
|
||||
default = [];
|
||||
};
|
||||
# Default datasets that are needed to make a functioning system
|
||||
datasets = lib.mkOption {
|
||||
type = lib.types.attrsOf (inputs.disko.lib.subType {
|
||||
types = {inherit (inputs.disko.lib.types) zfs_fs zfs_volume;};
|
||||
});
|
||||
default = {
|
||||
"local" = {
|
||||
type = "zfs_fs";
|
||||
options.canmount = "off";
|
||||
};
|
||||
# nix directory needs to be available pre persist and doesn't need to be snapshotted or backed up
|
||||
"local/system/nix" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/nix";
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
# dataset for root that gets rolled back on every boot
|
||||
"local/system/root" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/";
|
||||
options = {
|
||||
canmount = "on";
|
||||
};
|
||||
postCreateHook = ''
|
||||
zfs snapshot rpool/local/system/root@blank
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
extraDatasets = lib.mkOption {
|
||||
type = lib.types.attrsOf (inputs.disko.lib.subType {
|
||||
types = {inherit (inputs.disko.lib.types) zfs_fs zfs_volume;};
|
||||
});
|
||||
description = "List of datasets to define";
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.host.storage.enable {
|
||||
programs.msmtp = lib.mkIf config.host.storage.notifications.enable {
|
||||
enable = true;
|
||||
setSendmail = true;
|
||||
defaults = {
|
||||
aliases = "/etc/aliases";
|
||||
port = config.host.storage.notifications.port;
|
||||
tls_trust_file = "/etc/ssl/certs/ca-certificates.crt";
|
||||
tls = "on";
|
||||
auth = "login";
|
||||
tls_starttls = "off";
|
||||
};
|
||||
accounts = {
|
||||
zfs_notifications = {
|
||||
auth = true;
|
||||
tls = true;
|
||||
host = config.host.storage.notifications.host;
|
||||
passwordeval = "cat ${config.host.storage.notifications.tokenFile}";
|
||||
user = config.host.storage.notifications.user;
|
||||
from = config.host.storage.notifications.user;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.zfs = {
|
||||
autoScrub.enable = true;
|
||||
autoSnapshot.enable = true;
|
||||
|
||||
zed = lib.mkIf config.host.storage.notifications.enable {
|
||||
enableMail = true;
|
||||
|
||||
settings = {
|
||||
ZED_DEBUG_LOG = "/tmp/zed.debug.log";
|
||||
ZED_EMAIL_ADDR = [config.host.storage.notifications.to];
|
||||
ZED_EMAIL_PROG = "${pkgs.msmtp}/bin/msmtp";
|
||||
ZED_EMAIL_OPTS = "-a zfs_notifications @ADDRESS@";
|
||||
|
||||
ZED_NOTIFY_INTERVAL_SECS = 3600;
|
||||
ZED_NOTIFY_VERBOSE = true;
|
||||
|
||||
ZED_USE_ENCLOSURE_LEDS = true;
|
||||
ZED_SCRUB_AFTER_RESILVER = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
disko.devices = {
|
||||
disk = (
|
||||
builtins.listToAttrs (
|
||||
builtins.map
|
||||
(drive:
|
||||
lib.attrsets.nameValuePair (drive.name) {
|
||||
type = "disk";
|
||||
device = "/dev/disk/by-id/${drive.value}";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
ESP = lib.mkIf (builtins.elem drive.value config.host.storage.pool.bootDrives) {
|
||||
# The 2GB here for the boot partition might be a bit overkill we probably only need like 1/4th of that but storage is cheap
|
||||
size = "2G";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = ["umask=0077"];
|
||||
};
|
||||
};
|
||||
zfs = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "zfs";
|
||||
pool = "rpool";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
(
|
||||
(lib.lists.flatten vdevs) ++ cache
|
||||
)
|
||||
)
|
||||
);
|
||||
zpool = {
|
||||
rpool = {
|
||||
type = "zpool";
|
||||
mode = {
|
||||
topology = {
|
||||
type = "topology";
|
||||
vdev = (
|
||||
builtins.map (disks: {
|
||||
mode = config.host.storage.pool.mode;
|
||||
members =
|
||||
builtins.map (disk: disk.name) disks;
|
||||
})
|
||||
vdevs
|
||||
);
|
||||
cache = builtins.map (disk: disk.name) cache;
|
||||
};
|
||||
};
|
||||
|
||||
options = {
|
||||
ashift = "12";
|
||||
autotrim = "on";
|
||||
};
|
||||
|
||||
rootFsOptions =
|
||||
{
|
||||
canmount = "off";
|
||||
mountpoint = "none";
|
||||
|
||||
xattr = "sa";
|
||||
acltype = "posixacl";
|
||||
relatime = "on";
|
||||
|
||||
compression = "lz4";
|
||||
|
||||
"com.sun:auto-snapshot" = "false";
|
||||
}
|
||||
// (
|
||||
lib.attrsets.optionalAttrs config.host.storage.encryption {
|
||||
encryption = "on";
|
||||
keyformat = "hex";
|
||||
keylocation = "prompt";
|
||||
}
|
||||
);
|
||||
|
||||
datasets = lib.mkMerge [
|
||||
(
|
||||
lib.attrsets.mapAttrs (name: value: {
|
||||
type = value.type;
|
||||
options = value.options;
|
||||
mountpoint = value.mountpoint;
|
||||
postCreateHook = value.postCreateHook;
|
||||
})
|
||||
datasets
|
||||
)
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.host.impermanence.enable = lib.mkEnableOption "are we going to use impermanence on this device";
|
||||
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = !(config.host.impermanence.enable && !config.host.storage.enable);
|
||||
message = ''
|
||||
Disko storage must be enabled to use impermanence.
|
||||
'';
|
||||
}
|
||||
];
|
||||
}
|
||||
(
|
||||
lib.mkIf config.host.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.host.impermanence.enable && config.host.storage.enable;
|
||||
message = "Impermanence can not be used without managed host storage.";
|
||||
}
|
||||
];
|
||||
|
||||
# fixes issues with /var/lib/private not having the correct permissions https://github.com/nix-community/impermanence/issues/254
|
||||
system.activationScripts."createPersistentStorageDirs".deps = ["var-lib-private-permissions" "users" "groups"];
|
||||
system.activationScripts = {
|
||||
"var-lib-private-permissions" = {
|
||||
deps = ["specialfs"];
|
||||
text = ''
|
||||
mkdir -p /persist/system/root/var/lib/private
|
||||
chmod 0700 /persist/system/root/var/lib/private
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
programs.fuse.userAllowOther = true;
|
||||
|
||||
boot.initrd.postResumeCommands = lib.mkAfter ''
|
||||
zfs rollback -r rpool/local/system/root@blank
|
||||
'';
|
||||
|
||||
fileSystems = {
|
||||
"/".neededForBoot = true;
|
||||
"/persist/system/root".neededForBoot = true;
|
||||
"/persist/system/var/log".neededForBoot = true;
|
||||
};
|
||||
|
||||
host.storage.pool.extraDatasets = {
|
||||
# persist datasets are datasets that contain information that we would like to keep around
|
||||
"persist" = {
|
||||
type = "zfs_fs";
|
||||
options.canmount = "off";
|
||||
options = {
|
||||
"com.sun:auto-snapshot" = "true";
|
||||
};
|
||||
};
|
||||
# this is where root data actually lives
|
||||
"persist/system/root" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/root";
|
||||
};
|
||||
"persist/system/var/log" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/var/log";
|
||||
# logs should be append only so we shouldn't need to snapshot them
|
||||
options = {
|
||||
"com.sun:auto-snapshot" = "false";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist/system/var/log" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
"/var/log"
|
||||
];
|
||||
};
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
"/var/lib/nixos"
|
||||
"/var/lib/systemd/coredump"
|
||||
];
|
||||
files = [
|
||||
"/etc/machine-id"
|
||||
];
|
||||
};
|
||||
|
||||
# TODO: this should live in leylas home manager configuration
|
||||
security.sudo.extraConfig = "Defaults lecture=never";
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
6
modules/nixos-modules/ollama/default.nix
Normal file
6
modules/nixos-modules/ollama/default.nix
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./ollama.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
@ -27,20 +27,6 @@
|
|||
allowedUDPPorts = ports;
|
||||
};
|
||||
}))
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = "/var/lib/private/ollama";
|
||||
user = config.services.ollama.user;
|
||||
group = config.services.ollama.group;
|
||||
mode = "0700";
|
||||
}
|
||||
];
|
||||
};
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
||||
49
modules/nixos-modules/ollama/storage.nix
Normal file
49
modules/nixos-modules/ollama/storage.nix
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
services.ollama.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.ollama.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.ollama.enable (
|
||||
lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
# Ollama needs persistent storage for models and configuration
|
||||
}
|
||||
(lib.mkIf (!config.services.ollama.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.ollama.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."/var/lib/private/ollama" = {
|
||||
enable = true;
|
||||
owner.name = config.services.ollama.user;
|
||||
group.name = config.services.ollama.group;
|
||||
owner.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = false;
|
||||
};
|
||||
group.permissions = {
|
||||
read = false;
|
||||
write = false;
|
||||
execute = false;
|
||||
};
|
||||
other.permissions = {
|
||||
read = false;
|
||||
write = false;
|
||||
execute = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]
|
||||
);
|
||||
}
|
||||
|
|
@ -3,6 +3,6 @@
|
|||
./actual.nix
|
||||
./proxy.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,37 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
const = import ./const.nix;
|
||||
dataDirectory = const.dataDirectory;
|
||||
in {
|
||||
options.services.actual = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.actual.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.actual.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.actual.settings.dataDir == dataDirectory;
|
||||
message = "actual data location does not match persistence\nconfig directory: ${config.services.actual.settings.dataDir}\npersistence directory: ${dataDirectory}";
|
||||
}
|
||||
{
|
||||
assertion = config.systemd.services.actual.serviceConfig.DynamicUser or false;
|
||||
message = "actual systemd service must have DynamicUser enabled to use private directory";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = dataDirectory;
|
||||
user = "actual";
|
||||
group = "actual";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
41
modules/nixos-modules/server/actual/storage.nix
Normal file
41
modules/nixos-modules/server/actual/storage.nix
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
const = import ./const.nix;
|
||||
dataDirectory = const.dataDirectory;
|
||||
in {
|
||||
options.services.actual.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.actual.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.actual.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.actual.settings.dataDir == dataDirectory;
|
||||
message = "actual data location does not match persistence\nconfig directory: ${config.services.actual.settings.dataDir}\npersistence directory: ${dataDirectory}";
|
||||
}
|
||||
{
|
||||
assertion = config.systemd.services.actual.serviceConfig.DynamicUser or false;
|
||||
message = "actual systemd service must have DynamicUser enabled to use private directory";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.actual.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.actual.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${dataDirectory}" = {
|
||||
owner.name = "actual";
|
||||
group.name = "actual";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
bazarr_data_directory = "/var/lib/bazarr";
|
||||
in {
|
||||
options.services.bazarr = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.bazarr.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.bazarr.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.bazarr.dataDir == bazarr_data_directory;
|
||||
message = "bazarr data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = bazarr_data_directory;
|
||||
user = "bazarr";
|
||||
group = "bazarr";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/bazarr/storage.nix
Normal file
36
modules/nixos-modules/server/bazarr/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
bazarr_data_directory = "/var/lib/bazarr";
|
||||
in {
|
||||
options.services.bazarr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.bazarr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.bazarr.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.bazarr.dataDir == bazarr_data_directory;
|
||||
message = "bazarr data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.bazarr.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.bazarr.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${bazarr_data_directory}" = {
|
||||
owner.name = "bazarr";
|
||||
group.name = "bazarr";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./crab-hole.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
workingDirectory = "/var/lib/private/crab-hole";
|
||||
in {
|
||||
options.services.crab-hole = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.crab-hole.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.crab-hole.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion =
|
||||
config.systemd.services.crab-hole.serviceConfig.WorkingDirectory == (builtins.replaceStrings ["/private"] [""] workingDirectory);
|
||||
message = "crab-hole working directory does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = workingDirectory;
|
||||
user = "crab-hole";
|
||||
group = "crab-hole";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
37
modules/nixos-modules/server/crab-hole/storage.nix
Normal file
37
modules/nixos-modules/server/crab-hole/storage.nix
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
workingDirectory = "/var/lib/private/crab-hole";
|
||||
in {
|
||||
options.services.crab-hole.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.crab-hole.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.crab-hole.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion =
|
||||
config.systemd.services.crab-hole.serviceConfig.WorkingDirectory == (builtins.replaceStrings ["/private"] [""] workingDirectory);
|
||||
message = "crab-hole working directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.crab-hole.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.crab-hole.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${workingDirectory}" = {
|
||||
owner.name = "crab-hole";
|
||||
group.name = "crab-hole";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,34 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataFolder = "/var/lib/fail2ban";
|
||||
dataFile = "fail2ban.sqlite3";
|
||||
in {
|
||||
options.services.fail2ban = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.fail2ban.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.fail2ban.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.fail2ban.daemonSettings.Definition.dbfile == "${dataFolder}/${dataFile}";
|
||||
message = "fail2ban data file does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = dataFolder;
|
||||
user = "fail2ban";
|
||||
group = "fail2ban";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
37
modules/nixos-modules/server/fail2ban/storage.nix
Normal file
37
modules/nixos-modules/server/fail2ban/storage.nix
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataFolder = "/var/lib/fail2ban";
|
||||
dataFile = "fail2ban.sqlite3";
|
||||
in {
|
||||
options.services.fail2ban.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.fail2ban.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.fail2ban.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.fail2ban.daemonSettings.Definition.dbfile == "${dataFolder}/${dataFile}";
|
||||
message = "fail2ban data file does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.fail2ban.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.fail2ban.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${dataFolder}" = {
|
||||
owner.name = "fail2ban";
|
||||
group.name = "fail2ban";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,26 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.flaresolverr = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.flaresolverr.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.flaresolverr.impermanence.enable {
|
||||
# FlareSolverr typically doesn't need persistent storage as it's a proxy service
|
||||
# but we'll add basic structure in case it's needed for logs or configuration
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = "/var/lib/flaresolverr";
|
||||
user = "flaresolverr";
|
||||
group = "flaresolverr";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
26
modules/nixos-modules/server/flaresolverr/storage.nix
Normal file
26
modules/nixos-modules/server/flaresolverr/storage.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.flaresolverr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.flaresolverr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.flaresolverr.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
(lib.mkIf (!config.services.flaresolverr.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.flaresolverr.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."/var/lib/flaresolverr" = {
|
||||
owner.name = "flaresolverr";
|
||||
group.name = "flaresolverr";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -4,6 +4,6 @@
|
|||
./proxy.nix
|
||||
./database.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
stateDir = "/var/lib/forgejo";
|
||||
in {
|
||||
options.services.forgejo = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.forgejo.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.forgejo.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.forgejo.stateDir == stateDir;
|
||||
message = "forgejo state directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = stateDir;
|
||||
user = "forgejo";
|
||||
group = "forgejo";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/forgejo/storage.nix
Normal file
36
modules/nixos-modules/server/forgejo/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
stateDir = "/var/lib/forgejo";
|
||||
in {
|
||||
options.services.forgejo.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.forgejo.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.forgejo.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.forgejo.stateDir == stateDir;
|
||||
message = "forgejo state directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.forgejo.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.forgejo.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${stateDir}" = {
|
||||
owner.name = "forgejo";
|
||||
group.name = "forgejo";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -4,7 +4,7 @@
|
|||
./proxy.nix
|
||||
./database.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
./extensions
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,26 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
configDir = "/var/lib/hass";
|
||||
in
|
||||
lib.mkIf (config.host.impermanence.enable && config.services.home-assistant.enable) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.home-assistant.configDir == configDir;
|
||||
message = "home assistant config directory does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = configDir;
|
||||
user = "hass";
|
||||
group = "hass";
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/home-assistant/storage.nix
Normal file
36
modules/nixos-modules/server/home-assistant/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
configDir = "/var/lib/hass";
|
||||
in {
|
||||
options.services.home-assistant.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.home-assistant.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.home-assistant.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.home-assistant.configDir == configDir;
|
||||
message = "home assistant config directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.home-assistant.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.home-assistant.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${configDir}" = {
|
||||
owner.name = "hass";
|
||||
group.name = "hass";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -3,7 +3,7 @@
|
|||
./proxy.nix
|
||||
./database.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
|
||||
# NOTE: This shouldn't be needed now that we are out of testing
|
||||
|
|
|
|||
|
|
@ -1,32 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
mediaLocation = "/var/lib/immich";
|
||||
in {
|
||||
options.services.immich = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.immich.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.immich.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.immich.mediaLocation == mediaLocation;
|
||||
message = "immich media location does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = mediaLocation;
|
||||
user = "immich";
|
||||
group = "immich";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/immich/storage.nix
Normal file
36
modules/nixos-modules/server/immich/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
mediaLocation = "/var/lib/immich";
|
||||
in {
|
||||
options.services.immich.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.immich.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.immich.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.immich.mediaLocation == mediaLocation;
|
||||
message = "immich media location does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.immich.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.immich.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${mediaLocation}" = {
|
||||
owner.name = "immich";
|
||||
group.name = "immich";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jackett_data_directory = "/var/lib/jackett/.config/Jackett";
|
||||
in {
|
||||
options.services.jackett = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.jackett.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jackett.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.jackett.dataDir == jackett_data_directory;
|
||||
message = "jackett data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = jackett_data_directory;
|
||||
user = "jackett";
|
||||
group = "jackett";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/jackett/storage.nix
Normal file
36
modules/nixos-modules/server/jackett/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jackett_data_directory = "/var/lib/jackett/.config/Jackett";
|
||||
in {
|
||||
options.services.jackett.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.jackett.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jackett.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.jackett.dataDir == jackett_data_directory;
|
||||
message = "jackett data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.jackett.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.jackett.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${jackett_data_directory}" = {
|
||||
owner.name = "jackett";
|
||||
group.name = "jackett";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -3,6 +3,6 @@
|
|||
./jellyfin.nix
|
||||
./proxy.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,73 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfin_data_directory = "/var/lib/jellyfin";
|
||||
jellyfin_cache_directory = "/var/cache/jellyfin";
|
||||
in {
|
||||
options.services.jellyfin = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.jellyfin.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jellyfin.impermanence.enable {
|
||||
fileSystems."/persist/system/jellyfin".neededForBoot = true;
|
||||
|
||||
host.storage.pool.extraDatasets = {
|
||||
# sops age key needs to be available to pre persist for user generation
|
||||
"persist/system/jellyfin" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/jellyfin";
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.jellyfin.dataDir == jellyfin_data_directory;
|
||||
message = "jellyfin data directory does not match persistence";
|
||||
}
|
||||
{
|
||||
assertion = config.services.jellyfin.cacheDir == jellyfin_cache_directory;
|
||||
message = "jellyfin cache directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence = {
|
||||
"/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = jellyfin_data_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin";
|
||||
}
|
||||
{
|
||||
directory = jellyfin_cache_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
"/persist/system/jellyfin" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = config.services.jellyfin.media_directory;
|
||||
user = "jellyfin";
|
||||
group = "jellyfin_media";
|
||||
mode = "1770";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
76
modules/nixos-modules/server/jellyfin/storage.nix
Normal file
76
modules/nixos-modules/server/jellyfin/storage.nix
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
jellyfin_data_directory = "/var/lib/jellyfin";
|
||||
jellyfin_cache_directory = "/var/cache/jellyfin";
|
||||
in {
|
||||
options.services.jellyfin.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.jellyfin.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.jellyfin.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.jellyfin.dataDir == jellyfin_data_directory;
|
||||
message = "jellyfin data directory does not match persistence";
|
||||
}
|
||||
{
|
||||
assertion = config.services.jellyfin.cacheDir == jellyfin_cache_directory;
|
||||
message = "jellyfin cache directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.jellyfin.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.jellyfin.impermanence.enable {
|
||||
storage.impermanence.datasets = {
|
||||
"persist/system/root" = {
|
||||
directories = {
|
||||
"${jellyfin_data_directory}" = {
|
||||
enable = true;
|
||||
owner.name = "jellyfin";
|
||||
group.name = "jellyfin";
|
||||
};
|
||||
"${jellyfin_cache_directory}" = {
|
||||
enable = true;
|
||||
owner.name = "jellyfin";
|
||||
group.name = "jellyfin";
|
||||
};
|
||||
};
|
||||
};
|
||||
"persist/system/jellyfin" = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
|
||||
directories."${config.services.jellyfin.media_directory}" = {
|
||||
enable = true;
|
||||
owner.name = "jellyfin";
|
||||
group.name = "jellyfin_media";
|
||||
owner.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = true;
|
||||
};
|
||||
group.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = true;
|
||||
};
|
||||
other.permissions = {
|
||||
read = false;
|
||||
write = false;
|
||||
execute = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
lidarr_data_directory = "/var/lib/lidarr/.config/Lidarr";
|
||||
in {
|
||||
options.services.lidarr = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.lidarr.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.lidarr.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.lidarr.dataDir == lidarr_data_directory;
|
||||
message = "lidarr data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = lidarr_data_directory;
|
||||
user = "lidarr";
|
||||
group = "lidarr";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/lidarr/storage.nix
Normal file
36
modules/nixos-modules/server/lidarr/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
lidarr_data_directory = "/var/lib/lidarr/.config/Lidarr";
|
||||
in {
|
||||
options.services.lidarr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.lidarr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.lidarr.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.lidarr.dataDir == lidarr_data_directory;
|
||||
message = "lidarr data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.lidarr.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.lidarr.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${lidarr_data_directory}" = {
|
||||
owner.name = "lidarr";
|
||||
group.name = "lidarr";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
imports = [
|
||||
./proxy.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
./panoramax.nix
|
||||
./database.nix
|
||||
];
|
||||
|
|
|
|||
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.panoramax = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.panoramax.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.panoramax.impermanence.enable {
|
||||
# TODO: configure impermanence for panoramax data
|
||||
# This would typically include directories like:
|
||||
# - /var/lib/panoramax
|
||||
# - panoramax storage directories
|
||||
# - any cache or temporary directories that need to persist
|
||||
};
|
||||
}
|
||||
33
modules/nixos-modules/server/panoramax/storage.nix
Normal file
33
modules/nixos-modules/server/panoramax/storage.nix
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options.services.panoramax.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.panoramax.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.panoramax.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
# TODO: configure impermanence for panoramax data
|
||||
# This would typically include directories like:
|
||||
# - /var/lib/panoramax
|
||||
# - panoramax storage directories
|
||||
# - any cache or temporary directories that need to persist
|
||||
}
|
||||
(lib.mkIf (!config.services.panoramax.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.panoramax.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."/var/lib/panoramax" = {
|
||||
owner.name = "panoramax";
|
||||
group.name = "panoramax";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -4,6 +4,6 @@
|
|||
./proxy.nix
|
||||
./database.nix
|
||||
./fail2ban.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,32 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/paperless";
|
||||
in {
|
||||
options.services.paperless = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.paperless.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.paperless.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.paperless.dataDir == dataDir;
|
||||
message = "paperless data location does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = dataDir;
|
||||
user = "paperless";
|
||||
group = "paperless";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/paperless/storage.nix
Normal file
36
modules/nixos-modules/server/paperless/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/paperless";
|
||||
in {
|
||||
options.services.paperless.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.paperless.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.paperless.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.paperless.dataDir == dataDir;
|
||||
message = "paperless data location does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.paperless.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.paperless.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${dataDir}" = {
|
||||
owner.name = "paperless";
|
||||
group.name = "paperless";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./postgres.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/postgresql/16";
|
||||
in {
|
||||
config = lib.mkIf (config.services.postgresql.enable && config.host.impermanence.enable) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.postgresql.dataDir == dataDir;
|
||||
message = "postgres data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = dataDir;
|
||||
user = "postgres";
|
||||
group = "postgres";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/postgres/storage.nix
Normal file
36
modules/nixos-modules/server/postgres/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/postgresql/16";
|
||||
in {
|
||||
options.services.postgresql.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.postgresql.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.postgresql.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.postgresql.dataDir == dataDir;
|
||||
message = "postgres data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.postgresql.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.postgresql.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${dataDir}" = {
|
||||
owner.name = "postgres";
|
||||
group.name = "postgres";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./qbittorent.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,61 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
qbittorent_profile_directory = "/var/lib/qBittorrent/";
|
||||
in {
|
||||
options.services.qbittorrent = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.qbittorrent.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.qbittorrent.impermanence.enable {
|
||||
fileSystems."/persist/system/qbittorrent".neededForBoot = true;
|
||||
|
||||
host.storage.pool.extraDatasets = {
|
||||
# sops age key needs to be available to pre persist for user generation
|
||||
"persist/system/qbittorrent" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/qbittorrent";
|
||||
options = {
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.qbittorrent.profileDir == qbittorent_profile_directory;
|
||||
message = "qbittorrent data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence = {
|
||||
"/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = qbittorent_profile_directory;
|
||||
user = "qbittorrent";
|
||||
group = "qbittorrent";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
"/persist/system/qbittorrent" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = config.services.qbittorrent.mediaDir;
|
||||
user = "qbittorrent";
|
||||
group = "qbittorrent";
|
||||
mode = "1775";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
62
modules/nixos-modules/server/qbittorent/storage.nix
Normal file
62
modules/nixos-modules/server/qbittorent/storage.nix
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
qbittorent_profile_directory = "/var/lib/qBittorrent/";
|
||||
in {
|
||||
options.services.qbittorrent.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.qbittorrent.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.qbittorrent.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.qbittorrent.profileDir == qbittorent_profile_directory;
|
||||
message = "qbittorrent data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.qbittorrent.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(
|
||||
lib.mkIf config.services.qbittorrent.impermanence.enable
|
||||
{
|
||||
storage.impermanence.datasets = {
|
||||
"persist/system/root" = {
|
||||
directories."${qbittorent_profile_directory}" = {
|
||||
owner.name = "qbittorrent";
|
||||
group.name = "qbittorrent";
|
||||
};
|
||||
};
|
||||
"persist/system/qbittorrent" = {
|
||||
directories."${config.services.qbittorrent.mediaDir}" = {
|
||||
owner.name = "qbittorrent";
|
||||
group.name = "qbittorrent";
|
||||
owner.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = true;
|
||||
};
|
||||
group.permissions = {
|
||||
read = true;
|
||||
write = true;
|
||||
execute = true;
|
||||
};
|
||||
other.permissions = {
|
||||
read = true;
|
||||
write = false;
|
||||
execute = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
radarr_data_directory = "/var/lib/radarr/.config/Radarr";
|
||||
in {
|
||||
options.services.radarr = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.radarr.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.radarr.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.radarr.dataDir == radarr_data_directory;
|
||||
message = "radarr data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = radarr_data_directory;
|
||||
user = "radarr";
|
||||
group = "radarr";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/radarr/storage.nix
Normal file
36
modules/nixos-modules/server/radarr/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
radarr_data_directory = "/var/lib/radarr/.config/Radarr";
|
||||
in {
|
||||
options.services.radarr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.radarr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.radarr.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.radarr.dataDir == radarr_data_directory;
|
||||
message = "radarr data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.radarr.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.radarr.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${radarr_data_directory}" = {
|
||||
owner.name = "radarr";
|
||||
group.name = "radarr";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./reverseProxy.nix
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,21 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/acme";
|
||||
in {
|
||||
config = lib.mkIf (config.host.impermanence.enable && config.services.reverseProxy.enable) {
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = dataDir;
|
||||
user = "acme";
|
||||
group = "acme";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
28
modules/nixos-modules/server/reverseProxy/storage.nix
Normal file
28
modules/nixos-modules/server/reverseProxy/storage.nix
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
dataDir = "/var/lib/acme";
|
||||
in {
|
||||
options.services.reverseProxy.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.reverseProxy.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.reverseProxy.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
(lib.mkIf (!config.services.reverseProxy.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.reverseProxy.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${dataDir}" = {
|
||||
owner.name = "acme";
|
||||
group.name = "acme";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./impermanence.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
sonarr_data_directory = "/var/lib/sonarr/.config/NzbDrone";
|
||||
in {
|
||||
options.services.sonarr = {
|
||||
impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.sonarr.enable && config.host.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.sonarr.impermanence.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.sonarr.dataDir == sonarr_data_directory;
|
||||
message = "sonarr data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
directories = [
|
||||
{
|
||||
directory = sonarr_data_directory;
|
||||
user = "sonarr";
|
||||
group = "sonarr";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
36
modules/nixos-modules/server/sonarr/storage.nix
Normal file
36
modules/nixos-modules/server/sonarr/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
sonarr_data_directory = "/var/lib/sonarr/.config/NzbDrone";
|
||||
in {
|
||||
options.services.sonarr.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.sonarr.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.sonarr.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = config.services.sonarr.dataDir == sonarr_data_directory;
|
||||
message = "sonarr data directory does not match persistence";
|
||||
}
|
||||
];
|
||||
}
|
||||
(lib.mkIf (!config.services.sonarr.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.sonarr.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${sonarr_data_directory}" = {
|
||||
owner.name = "sonarr";
|
||||
group.name = "sonarr";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]);
|
||||
}
|
||||
|
|
@ -3,6 +3,13 @@
|
|||
config,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
services.openssh.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.openssh.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
services = {
|
||||
|
|
@ -17,12 +24,32 @@
|
|||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
environment.persistence."/persist/system/root" = {
|
||||
files = lib.lists.flatten (
|
||||
builtins.map (hostKey: [hostKey.path "${hostKey.path}.pub"]) config.services.openssh.hostKeys
|
||||
);
|
||||
};
|
||||
})
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
# SSH host keys need to be persisted to maintain server identity
|
||||
}
|
||||
(lib.mkIf (!config.services.openssh.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.openssh.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
files = builtins.listToAttrs (
|
||||
lib.lists.flatten (
|
||||
builtins.map (hostKey: [
|
||||
{
|
||||
name = hostKey.path;
|
||||
value = {enable = true;};
|
||||
}
|
||||
{
|
||||
name = "${hostKey.path}.pub";
|
||||
value = {enable = true;};
|
||||
}
|
||||
])
|
||||
config.services.openssh.hostKeys
|
||||
)
|
||||
);
|
||||
};
|
||||
})
|
||||
]))
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,10 +66,25 @@ in {
|
|||
}
|
||||
];
|
||||
|
||||
# fixes issues with /var/lib/private not having the correct permissions https://github.com/nix-community/impermanence/issues/254
|
||||
system.activationScripts."createPersistentStorageDirs".deps = ["var-lib-private-permissions" "users" "groups"];
|
||||
system.activationScripts = {
|
||||
"var-lib-private-permissions" = {
|
||||
deps = ["specialfs"];
|
||||
text = ''
|
||||
mkdir -p /persist/system/root/var/lib/private
|
||||
chmod 0700 /persist/system/root/var/lib/private
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
programs.fuse.userAllowOther = true;
|
||||
|
||||
environment.persistence =
|
||||
lib.mapAttrs (datasetName: dataset: {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
persistentStoragePath = "/${datasetName}";
|
||||
directories = lib.mapAttrsToList (path: dirConfig: {
|
||||
directory = path;
|
||||
user = dirConfig.owner.name;
|
||||
|
|
@ -78,18 +93,17 @@ in {
|
|||
}) (lib.filterAttrs (_: dirConfig: dirConfig.enable) dataset.directories);
|
||||
files = lib.mapAttrsToList (path: fileConfig: {
|
||||
file = path;
|
||||
user = fileConfig.owner.name;
|
||||
group = fileConfig.group.name;
|
||||
mode = permissionsToMode fileConfig;
|
||||
parentDirectory = {
|
||||
user = fileConfig.owner.name;
|
||||
group = fileConfig.group.name;
|
||||
mode = permissionsToMode fileConfig;
|
||||
};
|
||||
}) (lib.filterAttrs (_: fileConfig: fileConfig.enable) dataset.files);
|
||||
})
|
||||
config.storage.impermanence.datasets;
|
||||
# TODO: need for boot on filesystems
|
||||
}
|
||||
(lib.mkIf config.storage.zfs.enable {
|
||||
# TODO: activationScripts config for private folders
|
||||
# TODO: rollback post resume
|
||||
# TODO: fuse userAllowOther
|
||||
storage.zfs.datasets =
|
||||
lib.mapAttrs (
|
||||
datasetName: dataset:
|
||||
|
|
|
|||
|
|
@ -32,13 +32,6 @@
|
|||
autoSnapshot = false;
|
||||
};
|
||||
};
|
||||
"persist/system/root" = {
|
||||
type = "zfs_fs";
|
||||
mount = {
|
||||
enable = true;
|
||||
mountPoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf (!config.storage.impermanence.enable) {
|
||||
|
|
@ -46,6 +39,10 @@
|
|||
storage.zfs.datasets = {
|
||||
"persist/system/root" = {
|
||||
type = "zfs_fs";
|
||||
mount = {
|
||||
enable = false;
|
||||
mountPoint = "/";
|
||||
};
|
||||
snapshot = {
|
||||
autoSnapshot = true;
|
||||
};
|
||||
|
|
@ -53,17 +50,10 @@
|
|||
};
|
||||
})
|
||||
(lib.mkIf config.storage.impermanence.enable {
|
||||
storage.impermanence.datasets = {
|
||||
"persist/system/root" = {
|
||||
directories = {
|
||||
"/var/lib/nixos".enable = true;
|
||||
"/var/lib/systemd/coredump".enable = true;
|
||||
};
|
||||
files = {
|
||||
"/etc/machine-id".enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
boot.initrd.postResumeCommands = lib.mkAfter ''
|
||||
zfs rollback -r rpool/local/system/root@blank
|
||||
'';
|
||||
|
||||
storage.zfs.datasets = {
|
||||
"local/system/root" = {
|
||||
type = "zfs_fs";
|
||||
|
|
@ -77,6 +67,22 @@
|
|||
};
|
||||
};
|
||||
|
||||
storage.impermanence.datasets = {
|
||||
"persist/system/root" = {
|
||||
mount = {
|
||||
enable = false;
|
||||
mountPoint = "/";
|
||||
};
|
||||
directories = {
|
||||
"/var/lib/nixos".enable = true;
|
||||
"/var/lib/systemd/coredump".enable = true;
|
||||
};
|
||||
files = {
|
||||
"/etc/machine-id".enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: home-manager.users.<user>.storage.impermanence.enable
|
||||
# is false then persist the entire directory of the user
|
||||
# if true persist home-manager.users.<user>.storage.impermanence.datasets
|
||||
|
|
|
|||
|
|
@ -44,12 +44,12 @@
|
|||
|
||||
mount = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.either lib.types.bool (lib.types.enum ["on" "off" "noauto"]));
|
||||
default = null;
|
||||
type = lib.types.either lib.types.bool (lib.types.enum ["on" "off" "noauto"]);
|
||||
default = true;
|
||||
description = "Whether and how the dataset should be mounted";
|
||||
};
|
||||
mountPoint = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
type = lib.types.str;
|
||||
description = "Controls the mount point used for this file system";
|
||||
};
|
||||
};
|
||||
|
|
@ -57,18 +57,15 @@
|
|||
encryption = {
|
||||
enable = lib.mkEnableOption "should encryption be enabled";
|
||||
type = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.enum ["aes-128-ccm" "aes-192-ccm" "aes-256-ccm" "aes-128-gcm" "aes-192-gcm" "aes-256-gcm"]);
|
||||
default = null;
|
||||
type = lib.types.enum ["aes-128-ccm" "aes-192-ccm" "aes-256-ccm" "aes-128-gcm" "aes-192-gcm" "aes-256-gcm"];
|
||||
description = "What encryption type to use";
|
||||
};
|
||||
keyformat = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.enum ["raw" "hex" "passphrase"]);
|
||||
default = null;
|
||||
type = lib.types.enum ["raw" "hex" "passphrase"];
|
||||
description = "Format of the encryption key";
|
||||
};
|
||||
keylocation = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
type = lib.types.str;
|
||||
description = "Location of the encryption key";
|
||||
};
|
||||
};
|
||||
|
|
@ -77,14 +74,11 @@
|
|||
# This option should set this option flag
|
||||
# "com.sun:auto-snapshot" = "false";
|
||||
autoSnapshot = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.bool;
|
||||
default = null;
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Enable automatic snapshots for this dataset";
|
||||
};
|
||||
# TODO: this is what blank snapshot should set
|
||||
# postCreateHook = ''
|
||||
# zfs snapshot rpool/local/system/root@blank
|
||||
# '';
|
||||
# Creates a blank snapshot in the post create hook for rollback purposes
|
||||
blankSnapshot = lib.mkEnableOption "Should a blank snapshot be auto created in the post create hook";
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@ in {
|
|||
config = {
|
||||
mount = {
|
||||
mountPoint = lib.mkDefault "/${name}";
|
||||
enable = lib.mkDefault true;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,98 @@ args @ {
|
|||
...
|
||||
}: let
|
||||
datasetSubmodule = (import ./submodules/dataset.nix) args;
|
||||
|
||||
# Hash function for disk names (max 27 chars to fit GPT limitations)
|
||||
hashDisk = drive: (builtins.substring 0 27 (builtins.hashString "sha256" drive));
|
||||
|
||||
# Helper to flatten vdevs into list of devices with names
|
||||
allVdevDevices = lib.lists.flatten (builtins.map (
|
||||
vdev:
|
||||
builtins.map (
|
||||
device:
|
||||
lib.attrsets.nameValuePair (hashDisk device.device) device
|
||||
)
|
||||
vdev
|
||||
)
|
||||
config.storage.zfs.pool.vdevs);
|
||||
|
||||
# Cache devices with names
|
||||
allCacheDevices = builtins.map (
|
||||
device:
|
||||
lib.attrsets.nameValuePair (hashDisk device.device) device
|
||||
) (config.storage.zfs.pool.cache);
|
||||
|
||||
# All devices (vdevs + cache)
|
||||
allDevices = allVdevDevices ++ allCacheDevices;
|
||||
|
||||
# Boot devices - filter devices that have boot = true
|
||||
bootDevices = builtins.filter (device: device.value.boot) allDevices;
|
||||
|
||||
# Helper function to convert dataset options to ZFS properties
|
||||
datasetToZfsOptions = dataset: let
|
||||
baseOptions =
|
||||
(lib.attrsets.optionalAttrs (dataset.acltype != null) {acltype = dataset.acltype;})
|
||||
// (lib.attrsets.optionalAttrs (dataset.relatime != null) {relatime = dataset.relatime;})
|
||||
// (lib.attrsets.optionalAttrs (dataset.atime != null) {atime = dataset.atime;})
|
||||
// (lib.attrsets.optionalAttrs (dataset.xattr != null) {xattr = dataset.xattr;})
|
||||
// (lib.attrsets.optionalAttrs (dataset.compression != null) {compression = dataset.compression;})
|
||||
// (lib.attrsets.optionalAttrs (dataset.sync != null) {sync = dataset.sync;})
|
||||
// (lib.attrsets.optionalAttrs (dataset.recordSize != null) {recordSize = dataset.recordSize;});
|
||||
|
||||
encryptionOptions = lib.attrsets.optionalAttrs (dataset.encryption.enable) (
|
||||
(lib.attrsets.optionalAttrs (dataset.encryption ? type) {encryption = dataset.encryption.type;})
|
||||
// (lib.attrsets.optionalAttrs (dataset.encryption ? keyformat) {keyformat = dataset.encryption.keyformat;})
|
||||
// (lib.attrsets.optionalAttrs (dataset.encryption ? keylocation) {keylocation = dataset.encryption.keylocation;})
|
||||
);
|
||||
|
||||
mountOptions = lib.attrsets.optionalAttrs (dataset ? mount && dataset.mount ? enable) (
|
||||
if builtins.isBool dataset.mount.enable
|
||||
then {
|
||||
canmount =
|
||||
if dataset.mount.enable
|
||||
then "on"
|
||||
else "off";
|
||||
}
|
||||
else {canmount = dataset.mount.enable;}
|
||||
);
|
||||
|
||||
snapshotOptions = lib.attrsets.optionalAttrs (dataset ? snapshot && dataset.snapshot ? autoSnapshot) {
|
||||
"com.sun:auto-snapshot" =
|
||||
if dataset.snapshot.autoSnapshot
|
||||
then "true"
|
||||
else "false";
|
||||
};
|
||||
in
|
||||
baseOptions // encryptionOptions // mountOptions // snapshotOptions;
|
||||
|
||||
# Helper to generate post create hooks
|
||||
generatePostCreateHook = name: dataset:
|
||||
dataset.postCreateHook
|
||||
+ (lib.optionalString dataset.snapshot.blankSnapshot ''
|
||||
zfs snapshot rpool/${name}@blank
|
||||
'');
|
||||
|
||||
# Convert datasets to disko format
|
||||
convertedDatasets = builtins.listToAttrs (
|
||||
(lib.attrsets.mapAttrsToList (
|
||||
name: dataset:
|
||||
lib.attrsets.nameValuePair name {
|
||||
type = dataset.type;
|
||||
options = datasetToZfsOptions dataset;
|
||||
mountpoint = dataset.mount.mountPoint or null;
|
||||
postCreateHook = generatePostCreateHook name dataset;
|
||||
}
|
||||
)
|
||||
config.storage.zfs.datasets)
|
||||
++ (lib.optional (config.storage.zfs.rootDataset != null) (
|
||||
lib.attrsets.nameValuePair "" {
|
||||
type = config.storage.zfs.rootDataset.type;
|
||||
options = datasetToZfsOptions config.storage.zfs.rootDataset;
|
||||
mountpoint = config.storage.zfs.rootDataset.mount.mountPoint or null;
|
||||
postCreateHook = generatePostCreateHook "" config.storage.zfs.rootDataset;
|
||||
}
|
||||
))
|
||||
);
|
||||
in {
|
||||
options.storage = {
|
||||
zfs = {
|
||||
|
|
@ -39,12 +131,14 @@ in {
|
|||
lib.types.coercedTo lib.types.str (device: {
|
||||
device = device;
|
||||
boot = false;
|
||||
}) {
|
||||
device = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
}) (lib.types.submodule {
|
||||
options = {
|
||||
device = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
boot = lib.mkEnableOption "should this device be a boot device";
|
||||
};
|
||||
boot = lib.mkEnableOption "should this device be a boot device";
|
||||
};
|
||||
});
|
||||
in {
|
||||
encryption = {
|
||||
enable = lib.mkEnableOption "Should encryption be enabled on this pool.";
|
||||
|
|
@ -75,15 +169,15 @@ in {
|
|||
description = "List of vdevs, where each vdev is a list of devices";
|
||||
};
|
||||
cache = lib.mkOption {
|
||||
type = lib.types.attrsOf deviceType;
|
||||
type = lib.types.listOf deviceType;
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
|
||||
rootDataset = lib.mkOption {
|
||||
type = lib.types.submodule datasetSubmodule;
|
||||
type = lib.types.nullOr (lib.types.submodule datasetSubmodule);
|
||||
description = "Root ZFS dataset to create";
|
||||
default = {};
|
||||
default = null;
|
||||
};
|
||||
|
||||
datasets = lib.mkOption {
|
||||
|
|
@ -96,15 +190,109 @@ in {
|
|||
|
||||
config = lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
services.zfs = {
|
||||
autoScrub.enable = true;
|
||||
autoSnapshot.enable = true;
|
||||
};
|
||||
# Assertion that we have at least one boot device
|
||||
assertions = [
|
||||
{
|
||||
assertion = (builtins.length bootDevices) > 0;
|
||||
message = "ZFS configuration requires at least one boot device. Set boot = true for at least one device in your vdevs or cache.";
|
||||
}
|
||||
];
|
||||
|
||||
# TODO: configure disko
|
||||
# TODO: assertion that we have a boot device
|
||||
# TODO: check that disks on system match configuration and warn user if they don't
|
||||
# TODO: check that datasets on system match configuration and warn user if they don't
|
||||
# # Warning about disk/dataset mismatches - these would be runtime checks
|
||||
# warnings = let
|
||||
# configuredDisks = builtins.map (device: device.device) (builtins.map (dev: dev.value) allDevices);
|
||||
# diskWarnings =
|
||||
# lib.optional (config.storage.zfs.enable)
|
||||
# "ZFS: Please ensure the following disks are available on your system: ${builtins.concatStringsSep ", " configuredDisks}";
|
||||
|
||||
# configuredDatasets = builtins.attrNames config.storage.zfs.datasets;
|
||||
# datasetWarnings =
|
||||
# lib.optional (config.storage.zfs.enable && (builtins.length configuredDatasets) > 0)
|
||||
# "ZFS: Configured datasets: ${builtins.concatStringsSep ", " configuredDatasets}. Ensure these match your intended ZFS layout.";
|
||||
# in
|
||||
# diskWarnings ++ datasetWarnings;
|
||||
|
||||
# services.zfs = {
|
||||
# autoScrub.enable = true;
|
||||
# autoSnapshot.enable = true;
|
||||
# };
|
||||
|
||||
# # Configure disko for ZFS setup
|
||||
disko.devices = {
|
||||
disk = builtins.listToAttrs (
|
||||
builtins.map (
|
||||
drive:
|
||||
lib.attrsets.nameValuePair (drive.name) {
|
||||
type = "disk";
|
||||
device = "/dev/disk/by-id/${drive.value.device}";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
ESP = lib.mkIf drive.value.boot {
|
||||
size = config.storage.zfs.pool.bootPartitionSize;
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = ["umask=0077"];
|
||||
};
|
||||
};
|
||||
zfs = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "zfs";
|
||||
pool = "rpool";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
allDevices
|
||||
);
|
||||
|
||||
zpool = {
|
||||
rpool = {
|
||||
type = "zpool";
|
||||
mode = {
|
||||
topology = {
|
||||
type = "topology";
|
||||
vdev =
|
||||
builtins.map (vdev: {
|
||||
mode = config.storage.zfs.pool.mode;
|
||||
members = builtins.map (device: hashDisk device.device) vdev;
|
||||
})
|
||||
config.storage.zfs.pool.vdevs;
|
||||
cache = builtins.map (device: hashDisk device.device) (builtins.attrValues config.storage.zfs.pool.cache);
|
||||
};
|
||||
};
|
||||
|
||||
options = {
|
||||
ashift = "12";
|
||||
autotrim = "on";
|
||||
};
|
||||
|
||||
rootFsOptions =
|
||||
{
|
||||
canmount = "off";
|
||||
mountpoint = "none";
|
||||
xattr = "sa";
|
||||
acltype = "posixacl";
|
||||
relatime = "on";
|
||||
compression = "lz4";
|
||||
"com.sun:auto-snapshot" = "false";
|
||||
}
|
||||
// (lib.attrsets.optionalAttrs config.storage.zfs.pool.encryption.enable {
|
||||
encryption = "on";
|
||||
keyformat = config.storage.zfs.pool.encryption.keyformat;
|
||||
keylocation = config.storage.zfs.pool.encryption.keylocation;
|
||||
});
|
||||
|
||||
datasets = convertedDatasets;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.storage.zfs.notifications.enable {
|
||||
programs.msmtp = {
|
||||
|
|
|
|||
|
|
@ -1,69 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
syncthingConfiguration,
|
||||
...
|
||||
}: let
|
||||
mountDir = "/mnt/sync";
|
||||
configDir = "/etc/syncthing";
|
||||
in {
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
systemd = lib.mkIf config.services.syncthing.enable {
|
||||
tmpfiles.rules = [
|
||||
"A ${mountDir} - - - - u:syncthing:rwX,g:syncthing:rwX,o::-"
|
||||
"d ${mountDir} 2755 syncthing syncthing -"
|
||||
"d ${config.services.syncthing.dataDir} 775 syncthing syncthing -"
|
||||
"d ${config.services.syncthing.configDir} 755 syncthing syncthing -"
|
||||
];
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.services.syncthing.enable (lib.mkMerge [
|
||||
{
|
||||
services.syncthing = {
|
||||
user = "syncthing";
|
||||
group = "syncthing";
|
||||
dataDir = "${mountDir}/default";
|
||||
configDir = configDir;
|
||||
overrideDevices = true;
|
||||
overrideFolders = true;
|
||||
configuration = syncthingConfiguration;
|
||||
deviceName = config.networking.hostName;
|
||||
};
|
||||
}
|
||||
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
assertions =
|
||||
[
|
||||
{
|
||||
assertion = config.services.syncthing.configDir == configDir;
|
||||
message = "syncthing config dir does not match persistence";
|
||||
}
|
||||
]
|
||||
++ lib.attrsets.mapAttrsToList (_: folder: {
|
||||
assertion = lib.strings.hasPrefix mountDir folder.path;
|
||||
message = "syncthing folder ${folder.label} is stored at ${folder.path} which not under the persisted path of ${mountDir}";
|
||||
})
|
||||
config.services.syncthing.settings.folders;
|
||||
environment.persistence = {
|
||||
"/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = mountDir;
|
||||
user = "syncthing";
|
||||
group = "syncthing";
|
||||
}
|
||||
{
|
||||
directory = configDir;
|
||||
user = "syncthing";
|
||||
group = "syncthing";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
];
|
||||
}
|
||||
6
modules/nixos-modules/sync/default.nix
Normal file
6
modules/nixos-modules/sync/default.nix
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./sync.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
57
modules/nixos-modules/sync/storage.nix
Normal file
57
modules/nixos-modules/sync/storage.nix
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
mountDir = "/mnt/sync";
|
||||
configDir = "/etc/syncthing";
|
||||
in {
|
||||
options = {
|
||||
services.syncthing.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.syncthing.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.syncthing.enable (
|
||||
lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
# Syncthing needs persistent storage for configuration and data
|
||||
}
|
||||
(lib.mkIf (!config.services.syncthing.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.syncthing.impermanence.enable {
|
||||
assertions =
|
||||
[
|
||||
{
|
||||
assertion = config.services.syncthing.configDir == configDir;
|
||||
message = "syncthing config dir does not match persistence";
|
||||
}
|
||||
]
|
||||
++ lib.attrsets.mapAttrsToList (_: folder: {
|
||||
assertion = lib.strings.hasPrefix mountDir folder.path;
|
||||
message = "syncthing folder ${folder.label} is stored at ${folder.path} which not under the persisted path of ${mountDir}";
|
||||
})
|
||||
config.services.syncthing.settings.folders;
|
||||
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories = {
|
||||
"${mountDir}" = {
|
||||
enable = true;
|
||||
owner.name = "syncthing";
|
||||
group.name = "syncthing";
|
||||
};
|
||||
"${configDir}" = {
|
||||
enable = true;
|
||||
owner.name = "syncthing";
|
||||
group.name = "syncthing";
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]
|
||||
);
|
||||
}
|
||||
36
modules/nixos-modules/sync/sync.nix
Normal file
36
modules/nixos-modules/sync/sync.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
syncthingConfiguration,
|
||||
...
|
||||
}: let
|
||||
mountDir = "/mnt/sync";
|
||||
configDir = "/etc/syncthing";
|
||||
in {
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
systemd = lib.mkIf config.services.syncthing.enable {
|
||||
tmpfiles.rules = [
|
||||
"A ${mountDir} - - - - u:syncthing:rwX,g:syncthing:rwX,o::-"
|
||||
"d ${mountDir} 2755 syncthing syncthing -"
|
||||
"d ${config.services.syncthing.dataDir} 775 syncthing syncthing -"
|
||||
"d ${config.services.syncthing.configDir} 755 syncthing syncthing -"
|
||||
];
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.services.syncthing.enable (lib.mkMerge [
|
||||
{
|
||||
services.syncthing = {
|
||||
user = "syncthing";
|
||||
group = "syncthing";
|
||||
dataDir = "${mountDir}/default";
|
||||
configDir = configDir;
|
||||
overrideDevices = true;
|
||||
overrideFolders = true;
|
||||
configuration = syncthingConfiguration;
|
||||
deviceName = config.networking.hostName;
|
||||
};
|
||||
}
|
||||
]))
|
||||
];
|
||||
}
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
tailscale_data_directory = "/var/lib/tailscale";
|
||||
in {
|
||||
options.host.tailscale = {
|
||||
enable = lib.mkEnableOption "should tailscale be enabled on this computer";
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.tailscale.enable (
|
||||
lib.mkMerge [
|
||||
{
|
||||
# any configs we want shared between all machines
|
||||
}
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
environment.persistence = {
|
||||
"/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
{
|
||||
directory = tailscale_data_directory;
|
||||
user = "root";
|
||||
group = "root";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
||||
6
modules/nixos-modules/tailscale/default.nix
Normal file
6
modules/nixos-modules/tailscale/default.nix
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
{...}: {
|
||||
imports = [
|
||||
./tailscale.nix
|
||||
./storage.nix
|
||||
];
|
||||
}
|
||||
36
modules/nixos-modules/tailscale/storage.nix
Normal file
36
modules/nixos-modules/tailscale/storage.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
tailscale_data_directory = "/var/lib/tailscale";
|
||||
in {
|
||||
options = {
|
||||
services.tailscale.impermanence.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.tailscale.enable && config.storage.impermanence.enable;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.tailscale.enable (
|
||||
lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
# Tailscale needs persistent storage for keys and configuration
|
||||
}
|
||||
(lib.mkIf (!config.services.tailscale.impermanence.enable) {
|
||||
# TODO: placeholder to configure a unique dataset for this service
|
||||
})
|
||||
(lib.mkIf config.services.tailscale.impermanence.enable {
|
||||
storage.impermanence.datasets."persist/system/root" = {
|
||||
directories."${tailscale_data_directory}" = {
|
||||
enable = true;
|
||||
owner.name = "root";
|
||||
group.name = "root";
|
||||
};
|
||||
};
|
||||
})
|
||||
]))
|
||||
]
|
||||
);
|
||||
}
|
||||
19
modules/nixos-modules/tailscale/tailscale.nix
Normal file
19
modules/nixos-modules/tailscale/tailscale.nix
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
host.tailscale = {
|
||||
enable = lib.mkEnableOption "should tailscale be enabled on this computer";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.tailscale.enable (
|
||||
lib.mkMerge [
|
||||
{
|
||||
# any configs we want shared between all machines
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
|
|
@ -399,79 +399,75 @@ in {
|
|||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
boot.initrd.postResumeCommands = lib.mkAfter (
|
||||
lib.strings.concatLines (builtins.map (user: "zfs rollback -r rpool/local/home/${user.name}@blank")
|
||||
normalUsers)
|
||||
);
|
||||
(lib.mkIf config.storage.impermanence.enable (lib.mkMerge [
|
||||
(lib.mkIf config.storage.zfs.enable {
|
||||
storage.zfs.datasets."persist/system/sops" = {
|
||||
type = "zfs_fs";
|
||||
mount = {
|
||||
enable = true;
|
||||
mountPoint = SOPS_AGE_KEY_DIRECTORY;
|
||||
};
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
};
|
||||
})
|
||||
]))
|
||||
# (lib.mkIf config.host.impermanence.enable {
|
||||
# boot.initrd.postResumeCommands = lib.mkAfter (
|
||||
# lib.strings.concatLines (builtins.map (user: "zfs rollback -r rpool/local/home/${user.name}@blank")
|
||||
# normalUsers)
|
||||
# );
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules =
|
||||
builtins.map (
|
||||
user: "d /persist/home/${user.name} 700 ${user.name} ${user.name} -"
|
||||
)
|
||||
normalUsers;
|
||||
};
|
||||
# systemd = {
|
||||
# tmpfiles.rules =
|
||||
# builtins.map (
|
||||
# user: "d /persist/home/${user.name} 700 ${user.name} ${user.name} -"
|
||||
# )
|
||||
# normalUsers;
|
||||
# };
|
||||
|
||||
fileSystems = lib.mkMerge [
|
||||
{
|
||||
${SOPS_AGE_KEY_DIRECTORY}.neededForBoot = true;
|
||||
}
|
||||
(
|
||||
builtins.listToAttrs (
|
||||
builtins.map (user:
|
||||
lib.attrsets.nameValuePair "/persist/home/${user.name}" {
|
||||
neededForBoot = true;
|
||||
})
|
||||
normalUsers
|
||||
)
|
||||
)
|
||||
(
|
||||
builtins.listToAttrs (
|
||||
builtins.map (user:
|
||||
lib.attrsets.nameValuePair "/home/${user.name}" {
|
||||
neededForBoot = true;
|
||||
})
|
||||
normalUsers
|
||||
)
|
||||
)
|
||||
];
|
||||
# fileSystems = lib.mkMerge [
|
||||
# (
|
||||
# builtins.listToAttrs (
|
||||
# builtins.map (user:
|
||||
# lib.attrsets.nameValuePair "/persist/home/${user.name}" {
|
||||
# neededForBoot = true;
|
||||
# })
|
||||
# normalUsers
|
||||
# )
|
||||
# )
|
||||
# (
|
||||
# builtins.listToAttrs (
|
||||
# builtins.map (user:
|
||||
# lib.attrsets.nameValuePair "/home/${user.name}" {
|
||||
# neededForBoot = true;
|
||||
# })
|
||||
# normalUsers
|
||||
# )
|
||||
# )
|
||||
# ];
|
||||
|
||||
host.storage.pool.extraDatasets = lib.mkMerge (
|
||||
[
|
||||
{
|
||||
# sops age key needs to be available to pre persist for user generation
|
||||
"local/system/sops" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = SOPS_AGE_KEY_DIRECTORY;
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
}
|
||||
]
|
||||
++ (
|
||||
builtins.map (user: {
|
||||
"local/home/${user.name}" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/home/${user.name}";
|
||||
options = {
|
||||
canmount = "on";
|
||||
};
|
||||
postCreateHook = ''
|
||||
zfs snapshot rpool/local/home/${user.name}@blank
|
||||
'';
|
||||
};
|
||||
"persist/home/${user.name}" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/home/${user.name}";
|
||||
};
|
||||
})
|
||||
normalUsers
|
||||
)
|
||||
);
|
||||
})
|
||||
# host.storage.pool.extraDatasets = lib.mkMerge (
|
||||
# (
|
||||
# builtins.map (user: {
|
||||
# "local/home/${user.name}" = {
|
||||
# type = "zfs_fs";
|
||||
# mountpoint = "/home/${user.name}";
|
||||
# options = {
|
||||
# canmount = "on";
|
||||
# };
|
||||
# postCreateHook = ''
|
||||
# zfs snapshot rpool/local/home/${user.name}@blank
|
||||
# '';
|
||||
# };
|
||||
# "persist/home/${user.name}" = {
|
||||
# type = "zfs_fs";
|
||||
# mountpoint = "/persist/home/${user.name}";
|
||||
# };
|
||||
# })
|
||||
# normalUsers
|
||||
# )
|
||||
# );
|
||||
# })
|
||||
];
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue