made disko/impermanence config into a module

This commit is contained in:
Leyla Becker 2024-12-10 23:43:53 -06:00
parent c28731a1a7
commit 30ad3c91b9
10 changed files with 571 additions and 466 deletions

View file

@ -1,11 +1,6 @@
# server nas # server nas
{ {pkgs, ...}: {
inputs,
pkgs,
...
}: {
imports = [ imports = [
inputs.disko.nixosModules.disko
# ./services.nix # ./services.nix
]; ];
@ -21,6 +16,33 @@
ester.isNormalUser = false; ester.isNormalUser = false;
eve.isNormalUser = false; eve.isNormalUser = false;
}; };
impermanence.enable = true;
storage = {
enable = true;
encryption = true;
pool = {
drives = [
"ata-ST18000NE000-3G6101_ZVTCXVEB"
"ata-ST18000NE000-3G6101_ZVTCXWSC"
"ata-ST18000NE000-3G6101_ZVTD10EH"
"ata-ST18000NT001-3NF101_ZVTE0S3Q"
"ata-ST18000NT001-3NF101_ZVTEF27J"
"ata-ST18000NT001-3NF101_ZVTEZACV"
];
cache = [
"nvme-Samsung_SSD_990_PRO_4TB_S7KGNU0X907881F"
];
# extraDatasets = {
# "persist/system/var/lib/jellyfin/media" = {
# type = "zfs_fs";
# mountpoint = "/persist/system/var/lib/jellyfin/media";
# };
# };
};
};
};
networking = {
hostId = "c51763d6";
}; };
# apps = { # apps = {

View file

@ -1,9 +1,7 @@
# server nas # server nas
{...}: { {...}: {
imports = [ imports = [
./disko-config.nix
./hardware-configuration.nix ./hardware-configuration.nix
./impermanence.nix
./configuration.nix ./configuration.nix
]; ];
} }

View file

@ -1,181 +0,0 @@
{lib, ...}: let
zfsDisk = devicePath: {
type = "disk";
device = devicePath;
content = {
type = "gpt";
partitions = {
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "rpool";
};
};
};
};
};
cacheDisk = devicePath: {
type = "disk";
device = devicePath;
content = {
type = "gpt";
partitions = {
# We are having to boot off of the nvm cache drive because I cant figure out how to boot via the HBA
ESP = {
size = "64M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = ["umask=0077"];
};
};
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "rpool";
};
};
};
};
};
in {
disko.devices = {
disk = {
hd_18_tb_a = zfsDisk "/dev/disk/by-id/ata-ST18000NE000-3G6101_ZVTCXVEB";
hd_18_tb_b = zfsDisk "/dev/disk/by-id/ata-ST18000NE000-3G6101_ZVTCXWSC";
hd_18_tb_c = zfsDisk "/dev/disk/by-id/ata-ST18000NE000-3G6101_ZVTD10EH";
hd_18_tb_d = zfsDisk "/dev/disk/by-id/ata-ST18000NT001-3NF101_ZVTE0S3Q";
hd_18_tb_e = zfsDisk "/dev/disk/by-id/ata-ST18000NT001-3NF101_ZVTEF27J";
hd_18_tb_f = zfsDisk "/dev/disk/by-id/ata-ST18000NT001-3NF101_ZVTEZACV";
ssd_4_tb_a = cacheDisk "/dev/disk/by-id/nvme-Samsung_SSD_990_PRO_4TB_S7KGNU0X907881F";
};
zpool = {
rpool = {
type = "zpool";
mode = {
topology = {
type = "topology";
vdev = [
{
mode = "raidz2";
members = [
"hd_18_tb_a"
"hd_18_tb_b"
"hd_18_tb_c"
"hd_18_tb_d"
"hd_18_tb_e"
"hd_18_tb_f"
];
}
];
cache = ["ssd_4_tb_a"];
};
};
options = {
ashift = "12";
autotrim = "on";
};
rootFsOptions =
{
canmount = "off";
mountpoint = "none";
xattr = "sa";
acltype = "posixacl";
relatime = "on";
compression = "lz4";
"com.sun:auto-snapshot" = "false";
}
# TODO: have an option to enable encryption
// lib.attrsets.optionalAttrs false {
encryption = "on";
keyformat = "hex";
keylocation = "prompt";
};
datasets = {
# local datasets are for data that should be considered ephemeral
"local" = {
type = "zfs_fs";
options.canmount = "off";
};
# the nix directory is local because its all generable from our configuration
"local/system/nix" = {
type = "zfs_fs";
mountpoint = "/nix";
options = {
atime = "off";
relatime = "off";
canmount = "on";
};
};
"local/system/sops" = {
type = "zfs_fs";
mountpoint = import ../../../const/sops_age_key_directory.nix;
options = {
atime = "off";
relatime = "off";
canmount = "on";
};
};
"local/system/root" = {
type = "zfs_fs";
mountpoint = "/";
options = {
canmount = "on";
};
postCreateHook = ''
zfs snapshot rpool/local/system/root@blank
'';
};
"local/home/leyla" = {
type = "zfs_fs";
mountpoint = "/home/leyla";
options = {
canmount = "on";
};
postCreateHook = ''
zfs snapshot rpool/local/home/leyla@blank
'';
};
# persist datasets are datasets that contain information that we would like to keep around
"persist" = {
type = "zfs_fs";
options.canmount = "off";
};
"persist/system/root" = {
type = "zfs_fs";
mountpoint = "/persist/system/root";
options = {
"com.sun:auto-snapshot" = "true";
mountpoint = "/persist/system/root";
};
};
"persist/home/leyla" = {
type = "zfs_fs";
mountpoint = "/persist/home/leyla";
options = {
"com.sun:auto-snapshot" = "true";
mountpoint = "/persist/home/leyla";
};
};
# TODO: separate dataset for logs that wont participate in snapshots and rollbacks with the rest of the system
};
};
};
};
networking = {
hostId = "c51763d6";
};
}

View file

@ -1,83 +1,4 @@
{lib, ...}: { {...}: {
boot.initrd.postResumeCommands = lib.mkAfter '' # fileSystems."/home/leyla".neededForBoot = true;
zfs rollback -r rpool/local/system/root@blank # fileSystems."/persist/home/leyla".neededForBoot = true;
zfs rollback -r rpool/local/home/leyla@blank
'';
# systemd.services = {
# # https://github.com/openzfs/zfs/issues/10891
# systemd-udev-settle.enable = false;
# # Snapshots are not accessible on boot for some reason this should fix it
# # https://github.com/NixOS/nixpkgs/issues/257505
# zfs-mount = {
# serviceConfig = {
# ExecStart = ["zfs mount -a -o remount"];
# # ExecStart = [
# # "${lib.getExe' pkgs.util-linux "mount"} -t zfs rpool/local -o remount"
# # "${lib.getExe' pkgs.util-linux "mount"} -t zfs rpool/persistent -o remount"
# # ];
# };
# };
# };
# boot.initrd.systemd.services.rollback = {
# description = "Rollback filesystem to a pristine state on boot";
# wantedBy = [
# "initrd.target"
# ];
# after = [
# "zfs-import-rpool.service"
# ];
# before = [
# "sysroot.mount"
# ];
# requiredBy = [
# "sysroot.mount"
# ];
# serviceConfig = {
# Type = "oneshot";
# ExecStart = ''
# zfs rollback -r rpool/local/system/root@blank
# zfs rollback -r rpool/local/home@blank
# '';
# };
# };
fileSystems."/".neededForBoot = true;
fileSystems."/home/leyla".neededForBoot = true;
fileSystems."/persist/system/root".neededForBoot = true;
fileSystems."/persist/home/leyla".neededForBoot = true;
fileSystems.${import ../../../const/sops_age_key_directory.nix}.neededForBoot = true;
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
directories = [
"/run/secrets"
"/etc/ssh"
"/var/log"
"/var/lib/nixos"
"/var/lib/systemd/coredump"
# config.apps.pihole.directory.root
# config.apps.jellyfin.mediaDirectory
# config.services.jellyfin.configDir
# config.services.jellyfin.cacheDir
# config.services.jellyfin.dataDir
# "/var/hass" # config.users.users.hass.home
# "/var/postgresql" # config.users.users.postgresql.home
# "/var/forgejo" # config.users.users.forgejo.home
# "/var/nextcloud" # config.users.users.nextcloud.home
# "/var/headscale" # config.users.users.headscale.home
];
files = [
"/etc/machine-id"
];
};
security.sudo.extraConfig = "Defaults lecture=never";
} }

View file

@ -8,5 +8,7 @@
./desktop.nix ./desktop.nix
./ssh.nix ./ssh.nix
./i18n.nix ./i18n.nix
./impermanence.nix
./disko.nix
]; ];
} }

View file

@ -0,0 +1,168 @@
{
lib,
config,
inputs,
...
}: let
# there currently is a bug with disko that causes long disk names to be generated improperly this hash function should alleviate it when used for disk names instead of what we are defaulting to
# max gpt length is 36 and disk adds formats it like disk-xxxx-zfs which means we need to be 9 characters under that
hashDisk = drive: (builtins.substring 0 27 (builtins.hashString "sha256" drive));
vdevs =
builtins.map (
disks:
builtins.map (disk: lib.attrsets.nameValuePair (hashDisk disk) disk) disks
)
config.host.storage.pool.vdevs;
cache =
builtins.map (
disk: lib.attrsets.nameValuePair (hashDisk disk) disk
)
config.host.storage.pool.cache;
in {
options.host.storage = {
enable = lib.mkEnableOption "are we going create zfs disks with disko on this device";
encryption = lib.mkEnableOption "is the vdev going to be encrypted";
pool = {
vdevs = lib.mkOption {
type = lib.types.listOf (lib.types.listOf lib.types.str);
description = "list of disks that are going to be in";
default = [config.host.storage.pool.drives];
};
drives = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "list of drives that are going to be in the vdev";
default = [];
};
cache = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "list of drives that are going to be used as cache";
default = [];
};
extraDatasets = lib.mkOption {
type = lib.types.attrsOf (inputs.disko.lib.subType {
types = {inherit (inputs.disko.lib.types) zfs_fs zfs_volume;};
});
description = "List of datasets to define";
default = {};
};
};
};
config = lib.mkIf config.host.storage.enable {
disko.devices = {
disk = (
builtins.listToAttrs (
(
builtins.map
(drive:
lib.attrsets.nameValuePair (drive.name) {
type = "disk";
device = "/dev/disk/by-id/${drive.value}";
content = {
type = "gpt";
partitions = {
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "rpool";
};
};
};
};
})
(lib.lists.flatten vdevs)
)
++ (
builtins.map
(drive:
lib.attrsets.nameValuePair (drive.name) {
type = "disk";
device = "/dev/disk/by-id/${drive.value}";
content = {
type = "gpt";
partitions = {
# We are having to boot off of the nvm cache drive because I cant figure out how to boot via the HBA
ESP = {
size = "64M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = ["umask=0077"];
};
};
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "rpool";
};
};
};
};
})
cache
)
)
);
zpool = {
rpool = {
type = "zpool";
mode = {
topology = {
type = "topology";
vdev = (
builtins.map (disks: {
mode = "raidz2";
members =
builtins.map (disk: disk.name) disks;
})
vdevs
);
cache = builtins.map (disk: disk.name) cache;
};
};
options = {
ashift = "12";
autotrim = "on";
};
rootFsOptions =
{
canmount = "off";
mountpoint = "none";
xattr = "sa";
acltype = "posixacl";
relatime = "on";
compression = "lz4";
"com.sun:auto-snapshot" = "false";
}
// (
lib.attrsets.optionalAttrs config.host.storage.encryption {
encryption = "on";
keyformat = "hex";
keylocation = "prompt";
}
);
datasets = lib.mkMerge [
(lib.attrsets.mapAttrs (name: value: {
type = value.type;
options = value.options;
mountpoint = value.mountpoint;
postCreateHook = value.postCreateHook;
})
config.host.storage.pool.extraDatasets)
];
};
};
};
};
}

View file

@ -0,0 +1,110 @@
{
config,
lib,
...
}: {
options.host.impermanence.enable = lib.mkEnableOption "are we going to use impermanence on this device";
# TODO: validate that config.host.storage.enable is enabled
config = lib.mkMerge [
{
assertions = [
{
assertion = !(config.host.impermanence.enable && !config.host.storage.enable);
message = ''
Disko storage must be enabled to use impermanence.
'';
}
];
}
(
lib.mkIf config.host.impermanence.enable {
boot.initrd.postResumeCommands = lib.mkAfter ''
zfs rollback -r rpool/local/system/root@blank
1 '';
fileSystems = {
"/".neededForBoot = true;
"/persist/system/root".neededForBoot = true;
};
host.storage.pool.extraDatasets = {
# local datasets are for data that should be considered ephemeral
"local" = {
type = "zfs_fs";
options.canmount = "off";
};
# nix directory needs to be available pre persist and doesn't need to be snapshotted or backed up
"local/system/nix" = {
type = "zfs_fs";
mountpoint = "/nix";
options = {
atime = "off";
relatime = "off";
canmount = "on";
};
};
# dataset for root that gets rolled back on every boot
"local/system/root" = {
type = "zfs_fs";
mountpoint = "/";
options = {
canmount = "on";
};
postCreateHook = ''
zfs snapshot rpool/local/system/root@blank
'';
};
# persist datasets are datasets that contain information that we would like to keep around
"persist" = {
type = "zfs_fs";
options.canmount = "off";
};
# this is where root data actually lives
"persist/system/root" = {
type = "zfs_fs";
mountpoint = "/persist/system/root";
options = {
"com.sun:auto-snapshot" = "true";
};
};
"persist/system/var/log" = {
type = "zfs_fs";
mountpoint = "/persist/system/var/log";
};
};
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
directories = [
"/etc/ssh"
"/var/log"
"/var/lib/nixos"
"/var/lib/systemd/coredump"
# config.apps.pihole.directory.root
# config.apps.jellyfin.mediaDirectory
# config.services.jellyfin.configDir
# config.services.jellyfin.cacheDir
# config.services.jellyfin.dataDir
# "/var/hass" # config.users.users.hass.home
# "/var/postgresql" # config.users.users.postgresql.home
# "/var/forgejo" # config.users.users.forgejo.home
# "/var/nextcloud" # config.users.users.nextcloud.home
# "/var/headscale" # config.users.users.headscale.home
];
files = [
"/etc/machine-id"
];
};
security.sudo.extraConfig = "Defaults lecture=never";
}
)
];
}

View file

@ -10,7 +10,7 @@
principleUsers = host.principleUsers; principleUsers = host.principleUsers;
terminalUsers = host.terminalUsers; terminalUsers = host.terminalUsers;
# normalUsers = host.normalUsers; normalUsers = host.normalUsers;
uids = { uids = {
leyla = 1000; leyla = 1000;
@ -43,7 +43,8 @@
ester = users.ester.name; ester = users.ester.name;
eve = users.eve.name; eve = users.eve.name;
in { in {
config = { config = lib.mkMerge [
{
# principle users are by definition trusted # principle users are by definition trusted
nix.settings.trusted-users = builtins.map (user: user.name) principleUsers; nix.settings.trusted-users = builtins.map (user: user.name) principleUsers;
@ -251,5 +252,64 @@ in {
}; };
}; };
}; };
}
(lib.mkIf config.host.impermanence.enable {
boot.initrd.postResumeCommands = lib.mkAfter (
lib.strings.concatStrings (builtins.map (user: ''
zfs rollback -r rpool/local/home/${user.name}@blank
'')
normalUsers)
);
fileSystems.${SOPS_AGE_KEY_DIRECTORY}.neededForBoot = true;
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
directories = [
"/run/secrets"
];
};
host.storage.pool.extraDatasets = lib.mkMerge [
{
# sops age key needs to be available to pre persist for user generation
"local/system/sops" = {
type = "zfs_fs";
mountpoint = SOPS_AGE_KEY_DIRECTORY;
options = {
atime = "off";
relatime = "off";
canmount = "on";
};
}; };
} }
(
lib.mkMerge
(
builtins.map (user: {
"local/home/${user.name}" = {
type = "zfs_fs";
mountpoint = "/home/${user.name}";
options = {
canmount = "on";
};
postCreateHook = ''
zfs snapshot rpool/local/home/${user.name}@blank
'';
};
"persist/home/${user.name}" = {
type = "zfs_fs";
mountpoint = "/persist/home/${user.name}";
options = {
"com.sun:auto-snapshot" = "true";
};
};
})
normalUsers
)
)
];
})
];
}

View file

@ -67,10 +67,13 @@ in {
default = lib.lists.filter (user: user.isPrincipleUser) hostUsers; default = lib.lists.filter (user: user.isPrincipleUser) hostUsers;
}; };
normalUsers = lib.mkOption { normalUsers = lib.mkOption {
default = lib.lists.filter (user: user.isTerminalUser) hostUsers; default = lib.lists.filter (user: user.isNormalUser) hostUsers;
};
desktopUsers = lib.mkOption {
default = lib.lists.filter (user: user.isDesktopUser) hostUsers;
}; };
terminalUsers = lib.mkOption { terminalUsers = lib.mkOption {
default = lib.lists.filter (user: user.isNormalUser) hostUsers; default = lib.lists.filter (user: user.isTerminalUser) hostUsers;
}; };
}; };

View file

@ -7,6 +7,7 @@
home-manager = inputs.home-manager; home-manager = inputs.home-manager;
nix-darwin = inputs.nix-darwin; nix-darwin = inputs.nix-darwin;
sops-nix = inputs.sops-nix; sops-nix = inputs.sops-nix;
disko = inputs.disko;
impermanence = inputs.impermanence; impermanence = inputs.impermanence;
systems = [ systems = [
@ -74,6 +75,7 @@ in {
sops-nix.nixosModules.sops sops-nix.nixosModules.sops
impermanence.nixosModules.impermanence impermanence.nixosModules.impermanence
home-manager.nixosModules.home-manager home-manager.nixosModules.home-manager
disko.nixosModules.disko
../modules/nixos-modules ../modules/nixos-modules
../configurations/nixos/${host} ../configurations/nixos/${host}
]; ];