made disko/impermanence config into a module
This commit is contained in:
parent
c28731a1a7
commit
30ad3c91b9
|
@ -1,11 +1,6 @@
|
|||
# server nas
|
||||
{
|
||||
inputs,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
{pkgs, ...}: {
|
||||
imports = [
|
||||
inputs.disko.nixosModules.disko
|
||||
# ./services.nix
|
||||
];
|
||||
|
||||
|
@ -21,6 +16,33 @@
|
|||
ester.isNormalUser = false;
|
||||
eve.isNormalUser = false;
|
||||
};
|
||||
impermanence.enable = true;
|
||||
storage = {
|
||||
enable = true;
|
||||
encryption = true;
|
||||
pool = {
|
||||
drives = [
|
||||
"ata-ST18000NE000-3G6101_ZVTCXVEB"
|
||||
"ata-ST18000NE000-3G6101_ZVTCXWSC"
|
||||
"ata-ST18000NE000-3G6101_ZVTD10EH"
|
||||
"ata-ST18000NT001-3NF101_ZVTE0S3Q"
|
||||
"ata-ST18000NT001-3NF101_ZVTEF27J"
|
||||
"ata-ST18000NT001-3NF101_ZVTEZACV"
|
||||
];
|
||||
cache = [
|
||||
"nvme-Samsung_SSD_990_PRO_4TB_S7KGNU0X907881F"
|
||||
];
|
||||
# extraDatasets = {
|
||||
# "persist/system/var/lib/jellyfin/media" = {
|
||||
# type = "zfs_fs";
|
||||
# mountpoint = "/persist/system/var/lib/jellyfin/media";
|
||||
# };
|
||||
# };
|
||||
};
|
||||
};
|
||||
};
|
||||
networking = {
|
||||
hostId = "c51763d6";
|
||||
};
|
||||
|
||||
# apps = {
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
# server nas
|
||||
{...}: {
|
||||
imports = [
|
||||
./disko-config.nix
|
||||
./hardware-configuration.nix
|
||||
./impermanence.nix
|
||||
./configuration.nix
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,181 +0,0 @@
|
|||
{lib, ...}: let
|
||||
zfsDisk = devicePath: {
|
||||
type = "disk";
|
||||
device = devicePath;
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
zfs = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "zfs";
|
||||
pool = "rpool";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
cacheDisk = devicePath: {
|
||||
type = "disk";
|
||||
device = devicePath;
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
# We are having to boot off of the nvm cache drive because I cant figure out how to boot via the HBA
|
||||
ESP = {
|
||||
size = "64M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = ["umask=0077"];
|
||||
};
|
||||
};
|
||||
zfs = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "zfs";
|
||||
pool = "rpool";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
in {
|
||||
disko.devices = {
|
||||
disk = {
|
||||
hd_18_tb_a = zfsDisk "/dev/disk/by-id/ata-ST18000NE000-3G6101_ZVTCXVEB";
|
||||
hd_18_tb_b = zfsDisk "/dev/disk/by-id/ata-ST18000NE000-3G6101_ZVTCXWSC";
|
||||
hd_18_tb_c = zfsDisk "/dev/disk/by-id/ata-ST18000NE000-3G6101_ZVTD10EH";
|
||||
hd_18_tb_d = zfsDisk "/dev/disk/by-id/ata-ST18000NT001-3NF101_ZVTE0S3Q";
|
||||
hd_18_tb_e = zfsDisk "/dev/disk/by-id/ata-ST18000NT001-3NF101_ZVTEF27J";
|
||||
hd_18_tb_f = zfsDisk "/dev/disk/by-id/ata-ST18000NT001-3NF101_ZVTEZACV";
|
||||
|
||||
ssd_4_tb_a = cacheDisk "/dev/disk/by-id/nvme-Samsung_SSD_990_PRO_4TB_S7KGNU0X907881F";
|
||||
};
|
||||
zpool = {
|
||||
rpool = {
|
||||
type = "zpool";
|
||||
mode = {
|
||||
topology = {
|
||||
type = "topology";
|
||||
vdev = [
|
||||
{
|
||||
mode = "raidz2";
|
||||
members = [
|
||||
"hd_18_tb_a"
|
||||
"hd_18_tb_b"
|
||||
"hd_18_tb_c"
|
||||
"hd_18_tb_d"
|
||||
"hd_18_tb_e"
|
||||
"hd_18_tb_f"
|
||||
];
|
||||
}
|
||||
];
|
||||
cache = ["ssd_4_tb_a"];
|
||||
};
|
||||
};
|
||||
|
||||
options = {
|
||||
ashift = "12";
|
||||
autotrim = "on";
|
||||
};
|
||||
|
||||
rootFsOptions =
|
||||
{
|
||||
canmount = "off";
|
||||
mountpoint = "none";
|
||||
|
||||
xattr = "sa";
|
||||
acltype = "posixacl";
|
||||
relatime = "on";
|
||||
|
||||
compression = "lz4";
|
||||
|
||||
"com.sun:auto-snapshot" = "false";
|
||||
}
|
||||
# TODO: have an option to enable encryption
|
||||
// lib.attrsets.optionalAttrs false {
|
||||
encryption = "on";
|
||||
keyformat = "hex";
|
||||
keylocation = "prompt";
|
||||
};
|
||||
|
||||
datasets = {
|
||||
# local datasets are for data that should be considered ephemeral
|
||||
"local" = {
|
||||
type = "zfs_fs";
|
||||
options.canmount = "off";
|
||||
};
|
||||
# the nix directory is local because its all generable from our configuration
|
||||
"local/system/nix" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/nix";
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
"local/system/sops" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = import ../../../const/sops_age_key_directory.nix;
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
"local/system/root" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/";
|
||||
options = {
|
||||
canmount = "on";
|
||||
};
|
||||
postCreateHook = ''
|
||||
zfs snapshot rpool/local/system/root@blank
|
||||
'';
|
||||
};
|
||||
"local/home/leyla" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/home/leyla";
|
||||
options = {
|
||||
canmount = "on";
|
||||
};
|
||||
postCreateHook = ''
|
||||
zfs snapshot rpool/local/home/leyla@blank
|
||||
'';
|
||||
};
|
||||
|
||||
# persist datasets are datasets that contain information that we would like to keep around
|
||||
"persist" = {
|
||||
type = "zfs_fs";
|
||||
options.canmount = "off";
|
||||
};
|
||||
"persist/system/root" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/root";
|
||||
options = {
|
||||
"com.sun:auto-snapshot" = "true";
|
||||
mountpoint = "/persist/system/root";
|
||||
};
|
||||
};
|
||||
"persist/home/leyla" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/home/leyla";
|
||||
options = {
|
||||
"com.sun:auto-snapshot" = "true";
|
||||
mountpoint = "/persist/home/leyla";
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: separate dataset for logs that wont participate in snapshots and rollbacks with the rest of the system
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
networking = {
|
||||
hostId = "c51763d6";
|
||||
};
|
||||
}
|
|
@ -1,83 +1,4 @@
|
|||
{lib, ...}: {
|
||||
boot.initrd.postResumeCommands = lib.mkAfter ''
|
||||
zfs rollback -r rpool/local/system/root@blank
|
||||
zfs rollback -r rpool/local/home/leyla@blank
|
||||
'';
|
||||
|
||||
# systemd.services = {
|
||||
# # https://github.com/openzfs/zfs/issues/10891
|
||||
# systemd-udev-settle.enable = false;
|
||||
# # Snapshots are not accessible on boot for some reason this should fix it
|
||||
# # https://github.com/NixOS/nixpkgs/issues/257505
|
||||
# zfs-mount = {
|
||||
# serviceConfig = {
|
||||
# ExecStart = ["zfs mount -a -o remount"];
|
||||
# # ExecStart = [
|
||||
# # "${lib.getExe' pkgs.util-linux "mount"} -t zfs rpool/local -o remount"
|
||||
# # "${lib.getExe' pkgs.util-linux "mount"} -t zfs rpool/persistent -o remount"
|
||||
# # ];
|
||||
# };
|
||||
# };
|
||||
# };
|
||||
|
||||
# boot.initrd.systemd.services.rollback = {
|
||||
# description = "Rollback filesystem to a pristine state on boot";
|
||||
# wantedBy = [
|
||||
# "initrd.target"
|
||||
# ];
|
||||
# after = [
|
||||
# "zfs-import-rpool.service"
|
||||
# ];
|
||||
# before = [
|
||||
# "sysroot.mount"
|
||||
# ];
|
||||
# requiredBy = [
|
||||
# "sysroot.mount"
|
||||
# ];
|
||||
# serviceConfig = {
|
||||
# Type = "oneshot";
|
||||
# ExecStart = ''
|
||||
# zfs rollback -r rpool/local/system/root@blank
|
||||
# zfs rollback -r rpool/local/home@blank
|
||||
# '';
|
||||
# };
|
||||
# };
|
||||
|
||||
fileSystems."/".neededForBoot = true;
|
||||
fileSystems."/home/leyla".neededForBoot = true;
|
||||
fileSystems."/persist/system/root".neededForBoot = true;
|
||||
fileSystems."/persist/home/leyla".neededForBoot = true;
|
||||
fileSystems.${import ../../../const/sops_age_key_directory.nix}.neededForBoot = true;
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
"/run/secrets"
|
||||
|
||||
"/etc/ssh"
|
||||
|
||||
"/var/log"
|
||||
"/var/lib/nixos"
|
||||
"/var/lib/systemd/coredump"
|
||||
|
||||
# config.apps.pihole.directory.root
|
||||
|
||||
# config.apps.jellyfin.mediaDirectory
|
||||
# config.services.jellyfin.configDir
|
||||
# config.services.jellyfin.cacheDir
|
||||
# config.services.jellyfin.dataDir
|
||||
|
||||
# "/var/hass" # config.users.users.hass.home
|
||||
# "/var/postgresql" # config.users.users.postgresql.home
|
||||
# "/var/forgejo" # config.users.users.forgejo.home
|
||||
# "/var/nextcloud" # config.users.users.nextcloud.home
|
||||
# "/var/headscale" # config.users.users.headscale.home
|
||||
];
|
||||
files = [
|
||||
"/etc/machine-id"
|
||||
];
|
||||
};
|
||||
|
||||
security.sudo.extraConfig = "Defaults lecture=never";
|
||||
{...}: {
|
||||
# fileSystems."/home/leyla".neededForBoot = true;
|
||||
# fileSystems."/persist/home/leyla".neededForBoot = true;
|
||||
}
|
||||
|
|
|
@ -8,5 +8,7 @@
|
|||
./desktop.nix
|
||||
./ssh.nix
|
||||
./i18n.nix
|
||||
./impermanence.nix
|
||||
./disko.nix
|
||||
];
|
||||
}
|
||||
|
|
168
modules/nixos-modules/disko.nix
Normal file
168
modules/nixos-modules/disko.nix
Normal file
|
@ -0,0 +1,168 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
inputs,
|
||||
...
|
||||
}: let
|
||||
# there currently is a bug with disko that causes long disk names to be generated improperly this hash function should alleviate it when used for disk names instead of what we are defaulting to
|
||||
# max gpt length is 36 and disk adds formats it like disk-xxxx-zfs which means we need to be 9 characters under that
|
||||
hashDisk = drive: (builtins.substring 0 27 (builtins.hashString "sha256" drive));
|
||||
|
||||
vdevs =
|
||||
builtins.map (
|
||||
disks:
|
||||
builtins.map (disk: lib.attrsets.nameValuePair (hashDisk disk) disk) disks
|
||||
)
|
||||
config.host.storage.pool.vdevs;
|
||||
cache =
|
||||
builtins.map (
|
||||
disk: lib.attrsets.nameValuePair (hashDisk disk) disk
|
||||
)
|
||||
config.host.storage.pool.cache;
|
||||
in {
|
||||
options.host.storage = {
|
||||
enable = lib.mkEnableOption "are we going create zfs disks with disko on this device";
|
||||
encryption = lib.mkEnableOption "is the vdev going to be encrypted";
|
||||
pool = {
|
||||
vdevs = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.listOf lib.types.str);
|
||||
description = "list of disks that are going to be in";
|
||||
default = [config.host.storage.pool.drives];
|
||||
};
|
||||
drives = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "list of drives that are going to be in the vdev";
|
||||
default = [];
|
||||
};
|
||||
cache = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "list of drives that are going to be used as cache";
|
||||
default = [];
|
||||
};
|
||||
extraDatasets = lib.mkOption {
|
||||
type = lib.types.attrsOf (inputs.disko.lib.subType {
|
||||
types = {inherit (inputs.disko.lib.types) zfs_fs zfs_volume;};
|
||||
});
|
||||
description = "List of datasets to define";
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.host.storage.enable {
|
||||
disko.devices = {
|
||||
disk = (
|
||||
builtins.listToAttrs (
|
||||
(
|
||||
builtins.map
|
||||
(drive:
|
||||
lib.attrsets.nameValuePair (drive.name) {
|
||||
type = "disk";
|
||||
device = "/dev/disk/by-id/${drive.value}";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
zfs = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "zfs";
|
||||
pool = "rpool";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.lists.flatten vdevs)
|
||||
)
|
||||
++ (
|
||||
builtins.map
|
||||
(drive:
|
||||
lib.attrsets.nameValuePair (drive.name) {
|
||||
type = "disk";
|
||||
device = "/dev/disk/by-id/${drive.value}";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
# We are having to boot off of the nvm cache drive because I cant figure out how to boot via the HBA
|
||||
ESP = {
|
||||
size = "64M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = ["umask=0077"];
|
||||
};
|
||||
};
|
||||
zfs = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "zfs";
|
||||
pool = "rpool";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
cache
|
||||
)
|
||||
)
|
||||
);
|
||||
zpool = {
|
||||
rpool = {
|
||||
type = "zpool";
|
||||
mode = {
|
||||
topology = {
|
||||
type = "topology";
|
||||
vdev = (
|
||||
builtins.map (disks: {
|
||||
mode = "raidz2";
|
||||
members =
|
||||
builtins.map (disk: disk.name) disks;
|
||||
})
|
||||
vdevs
|
||||
);
|
||||
cache = builtins.map (disk: disk.name) cache;
|
||||
};
|
||||
};
|
||||
|
||||
options = {
|
||||
ashift = "12";
|
||||
autotrim = "on";
|
||||
};
|
||||
|
||||
rootFsOptions =
|
||||
{
|
||||
canmount = "off";
|
||||
mountpoint = "none";
|
||||
|
||||
xattr = "sa";
|
||||
acltype = "posixacl";
|
||||
relatime = "on";
|
||||
|
||||
compression = "lz4";
|
||||
|
||||
"com.sun:auto-snapshot" = "false";
|
||||
}
|
||||
// (
|
||||
lib.attrsets.optionalAttrs config.host.storage.encryption {
|
||||
encryption = "on";
|
||||
keyformat = "hex";
|
||||
keylocation = "prompt";
|
||||
}
|
||||
);
|
||||
|
||||
datasets = lib.mkMerge [
|
||||
(lib.attrsets.mapAttrs (name: value: {
|
||||
type = value.type;
|
||||
options = value.options;
|
||||
mountpoint = value.mountpoint;
|
||||
postCreateHook = value.postCreateHook;
|
||||
})
|
||||
config.host.storage.pool.extraDatasets)
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
110
modules/nixos-modules/impermanence.nix
Normal file
110
modules/nixos-modules/impermanence.nix
Normal file
|
@ -0,0 +1,110 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.host.impermanence.enable = lib.mkEnableOption "are we going to use impermanence on this device";
|
||||
|
||||
# TODO: validate that config.host.storage.enable is enabled
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = !(config.host.impermanence.enable && !config.host.storage.enable);
|
||||
message = ''
|
||||
Disko storage must be enabled to use impermanence.
|
||||
'';
|
||||
}
|
||||
];
|
||||
}
|
||||
(
|
||||
lib.mkIf config.host.impermanence.enable {
|
||||
boot.initrd.postResumeCommands = lib.mkAfter ''
|
||||
zfs rollback -r rpool/local/system/root@blank
|
||||
1 '';
|
||||
|
||||
fileSystems = {
|
||||
"/".neededForBoot = true;
|
||||
"/persist/system/root".neededForBoot = true;
|
||||
};
|
||||
|
||||
host.storage.pool.extraDatasets = {
|
||||
# local datasets are for data that should be considered ephemeral
|
||||
"local" = {
|
||||
type = "zfs_fs";
|
||||
options.canmount = "off";
|
||||
};
|
||||
# nix directory needs to be available pre persist and doesn't need to be snapshotted or backed up
|
||||
"local/system/nix" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/nix";
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
# dataset for root that gets rolled back on every boot
|
||||
"local/system/root" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/";
|
||||
options = {
|
||||
canmount = "on";
|
||||
};
|
||||
postCreateHook = ''
|
||||
zfs snapshot rpool/local/system/root@blank
|
||||
'';
|
||||
};
|
||||
|
||||
# persist datasets are datasets that contain information that we would like to keep around
|
||||
"persist" = {
|
||||
type = "zfs_fs";
|
||||
options.canmount = "off";
|
||||
};
|
||||
# this is where root data actually lives
|
||||
"persist/system/root" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/root";
|
||||
options = {
|
||||
"com.sun:auto-snapshot" = "true";
|
||||
};
|
||||
};
|
||||
"persist/system/var/log" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/system/var/log";
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
"/etc/ssh"
|
||||
|
||||
"/var/log"
|
||||
"/var/lib/nixos"
|
||||
"/var/lib/systemd/coredump"
|
||||
|
||||
# config.apps.pihole.directory.root
|
||||
|
||||
# config.apps.jellyfin.mediaDirectory
|
||||
# config.services.jellyfin.configDir
|
||||
# config.services.jellyfin.cacheDir
|
||||
# config.services.jellyfin.dataDir
|
||||
|
||||
# "/var/hass" # config.users.users.hass.home
|
||||
# "/var/postgresql" # config.users.users.postgresql.home
|
||||
# "/var/forgejo" # config.users.users.forgejo.home
|
||||
# "/var/nextcloud" # config.users.users.nextcloud.home
|
||||
# "/var/headscale" # config.users.users.headscale.home
|
||||
];
|
||||
files = [
|
||||
"/etc/machine-id"
|
||||
];
|
||||
};
|
||||
|
||||
security.sudo.extraConfig = "Defaults lecture=never";
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
principleUsers = host.principleUsers;
|
||||
terminalUsers = host.terminalUsers;
|
||||
# normalUsers = host.normalUsers;
|
||||
normalUsers = host.normalUsers;
|
||||
|
||||
uids = {
|
||||
leyla = 1000;
|
||||
|
@ -43,213 +43,273 @@
|
|||
ester = users.ester.name;
|
||||
eve = users.eve.name;
|
||||
in {
|
||||
config = {
|
||||
# principle users are by definition trusted
|
||||
nix.settings.trusted-users = builtins.map (user: user.name) principleUsers;
|
||||
config = lib.mkMerge [
|
||||
{
|
||||
# principle users are by definition trusted
|
||||
nix.settings.trusted-users = builtins.map (user: user.name) principleUsers;
|
||||
|
||||
# we should only be able to ssh into principle users of a computer who are also set up for terminal access
|
||||
services.openssh.settings.AllowUsers = builtins.map (user: user.name) (lib.lists.intersectLists terminalUsers principleUsers);
|
||||
# we should only be able to ssh into principle users of a computer who are also set up for terminal access
|
||||
services.openssh.settings.AllowUsers = builtins.map (user: user.name) (lib.lists.intersectLists terminalUsers principleUsers);
|
||||
|
||||
# we need to set up env variables to nix can find keys to decrypt passwords on rebuild
|
||||
environment = {
|
||||
sessionVariables = {
|
||||
SOPS_AGE_KEY_DIRECTORY = SOPS_AGE_KEY_DIRECTORY;
|
||||
SOPS_AGE_KEY_FILE = "${SOPS_AGE_KEY_DIRECTORY}/key.txt";
|
||||
};
|
||||
};
|
||||
|
||||
# set up user passwords
|
||||
sops = {
|
||||
defaultSopsFormat = "yaml";
|
||||
gnupg.sshKeyPaths = [];
|
||||
|
||||
age = {
|
||||
keyFile = "/var/lib/sops-nix/key.txt";
|
||||
sshKeyPaths = [];
|
||||
# generateKey = true;
|
||||
};
|
||||
|
||||
secrets = {
|
||||
"passwords/leyla" = {
|
||||
neededForUsers = true;
|
||||
sopsFile = "${inputs.secrets}/user-passwords.yaml";
|
||||
};
|
||||
"passwords/ester" = {
|
||||
neededForUsers = true;
|
||||
sopsFile = "${inputs.secrets}/user-passwords.yaml";
|
||||
};
|
||||
"passwords/eve" = {
|
||||
neededForUsers = true;
|
||||
sopsFile = "${inputs.secrets}/user-passwords.yaml";
|
||||
# we need to set up env variables to nix can find keys to decrypt passwords on rebuild
|
||||
environment = {
|
||||
sessionVariables = {
|
||||
SOPS_AGE_KEY_DIRECTORY = SOPS_AGE_KEY_DIRECTORY;
|
||||
SOPS_AGE_KEY_FILE = "${SOPS_AGE_KEY_DIRECTORY}/key.txt";
|
||||
};
|
||||
};
|
||||
|
||||
# set up user passwords
|
||||
sops = {
|
||||
defaultSopsFormat = "yaml";
|
||||
gnupg.sshKeyPaths = [];
|
||||
|
||||
age = {
|
||||
keyFile = "/var/lib/sops-nix/key.txt";
|
||||
sshKeyPaths = [];
|
||||
# generateKey = true;
|
||||
};
|
||||
|
||||
secrets = {
|
||||
"passwords/leyla" = {
|
||||
neededForUsers = true;
|
||||
sopsFile = "${inputs.secrets}/user-passwords.yaml";
|
||||
};
|
||||
"passwords/ester" = {
|
||||
neededForUsers = true;
|
||||
sopsFile = "${inputs.secrets}/user-passwords.yaml";
|
||||
};
|
||||
"passwords/eve" = {
|
||||
neededForUsers = true;
|
||||
sopsFile = "${inputs.secrets}/user-passwords.yaml";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
users = {
|
||||
mutableUsers = false;
|
||||
users = {
|
||||
leyla = {
|
||||
uid = lib.mkForce uids.leyla;
|
||||
name = lib.mkForce host.users.leyla.name;
|
||||
description = "Leyla";
|
||||
extraGroups =
|
||||
(lib.lists.optionals host.users.leyla.isNormalUser ["networkmanager"])
|
||||
++ (lib.lists.optionals host.users.leyla.isPrincipleUser ["wheel" "dialout"])
|
||||
++ (lib.lists.optionals host.users.leyla.isDesktopUser ["adbusers"]);
|
||||
hashedPasswordFile = config.sops.secrets."passwords/leyla".path;
|
||||
isNormalUser = host.users.leyla.isNormalUser;
|
||||
isSystemUser = !host.users.leyla.isNormalUser;
|
||||
group = config.users.users.leyla.name;
|
||||
};
|
||||
|
||||
ester = {
|
||||
uid = lib.mkForce uids.ester;
|
||||
name = lib.mkForce host.users.ester.name;
|
||||
description = "Ester";
|
||||
extraGroups = lib.optionals host.users.ester.isNormalUser ["networkmanager"];
|
||||
hashedPasswordFile = config.sops.secrets."passwords/ester".path;
|
||||
isNormalUser = host.users.ester.isNormalUser;
|
||||
isSystemUser = !host.users.ester.isNormalUser;
|
||||
group = config.users.users.ester.name;
|
||||
};
|
||||
|
||||
eve = {
|
||||
uid = lib.mkForce uids.eve;
|
||||
name = lib.mkForce host.users.eve.name;
|
||||
description = "Eve";
|
||||
extraGroups = lib.optionals host.users.eve.isNormalUser ["networkmanager"];
|
||||
hashedPasswordFile = config.sops.secrets."passwords/eve".path;
|
||||
isNormalUser = host.users.eve.isNormalUser;
|
||||
isSystemUser = !host.users.eve.isNormalUser;
|
||||
group = config.users.users.eve.name;
|
||||
};
|
||||
|
||||
jellyfin = {
|
||||
uid = lib.mkForce uids.jellyfin;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.jellyfin.name;
|
||||
};
|
||||
|
||||
forgejo = {
|
||||
uid = lib.mkForce uids.forgejo;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.forgejo.name;
|
||||
};
|
||||
|
||||
pihole = {
|
||||
uid = lib.mkForce uids.pihole;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.pihole.name;
|
||||
};
|
||||
|
||||
hass = {
|
||||
uid = lib.mkForce uids.hass;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.hass.name;
|
||||
};
|
||||
|
||||
headscale = {
|
||||
uid = lib.mkForce uids.headscale;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.headscale.name;
|
||||
};
|
||||
|
||||
nextcloud = {
|
||||
uid = lib.mkForce uids.nextcloud;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.nextcloud.name;
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
leyla = {
|
||||
gid = lib.mkForce gids.leyla;
|
||||
members = [
|
||||
leyla
|
||||
];
|
||||
};
|
||||
|
||||
ester = {
|
||||
gid = lib.mkForce gids.ester;
|
||||
members = [
|
||||
ester
|
||||
];
|
||||
};
|
||||
|
||||
eve = {
|
||||
gid = lib.mkForce gids.eve;
|
||||
members = [
|
||||
eve
|
||||
];
|
||||
};
|
||||
|
||||
mutableUsers = false;
|
||||
users = {
|
||||
gid = lib.mkForce gids.users;
|
||||
members = [
|
||||
leyla
|
||||
ester
|
||||
eve
|
||||
];
|
||||
leyla = {
|
||||
uid = lib.mkForce uids.leyla;
|
||||
name = lib.mkForce host.users.leyla.name;
|
||||
description = "Leyla";
|
||||
extraGroups =
|
||||
(lib.lists.optionals host.users.leyla.isNormalUser ["networkmanager"])
|
||||
++ (lib.lists.optionals host.users.leyla.isPrincipleUser ["wheel" "dialout"])
|
||||
++ (lib.lists.optionals host.users.leyla.isDesktopUser ["adbusers"]);
|
||||
hashedPasswordFile = config.sops.secrets."passwords/leyla".path;
|
||||
isNormalUser = host.users.leyla.isNormalUser;
|
||||
isSystemUser = !host.users.leyla.isNormalUser;
|
||||
group = config.users.users.leyla.name;
|
||||
};
|
||||
|
||||
ester = {
|
||||
uid = lib.mkForce uids.ester;
|
||||
name = lib.mkForce host.users.ester.name;
|
||||
description = "Ester";
|
||||
extraGroups = lib.optionals host.users.ester.isNormalUser ["networkmanager"];
|
||||
hashedPasswordFile = config.sops.secrets."passwords/ester".path;
|
||||
isNormalUser = host.users.ester.isNormalUser;
|
||||
isSystemUser = !host.users.ester.isNormalUser;
|
||||
group = config.users.users.ester.name;
|
||||
};
|
||||
|
||||
eve = {
|
||||
uid = lib.mkForce uids.eve;
|
||||
name = lib.mkForce host.users.eve.name;
|
||||
description = "Eve";
|
||||
extraGroups = lib.optionals host.users.eve.isNormalUser ["networkmanager"];
|
||||
hashedPasswordFile = config.sops.secrets."passwords/eve".path;
|
||||
isNormalUser = host.users.eve.isNormalUser;
|
||||
isSystemUser = !host.users.eve.isNormalUser;
|
||||
group = config.users.users.eve.name;
|
||||
};
|
||||
|
||||
jellyfin = {
|
||||
uid = lib.mkForce uids.jellyfin;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.jellyfin.name;
|
||||
};
|
||||
|
||||
forgejo = {
|
||||
uid = lib.mkForce uids.forgejo;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.forgejo.name;
|
||||
};
|
||||
|
||||
pihole = {
|
||||
uid = lib.mkForce uids.pihole;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.pihole.name;
|
||||
};
|
||||
|
||||
hass = {
|
||||
uid = lib.mkForce uids.hass;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.hass.name;
|
||||
};
|
||||
|
||||
headscale = {
|
||||
uid = lib.mkForce uids.headscale;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.headscale.name;
|
||||
};
|
||||
|
||||
nextcloud = {
|
||||
uid = lib.mkForce uids.nextcloud;
|
||||
isSystemUser = true;
|
||||
group = config.users.users.nextcloud.name;
|
||||
};
|
||||
};
|
||||
|
||||
jellyfin_media = {
|
||||
gid = lib.mkForce gids.jellyfin_media;
|
||||
members = [
|
||||
users.jellyfin.name
|
||||
leyla
|
||||
ester
|
||||
eve
|
||||
];
|
||||
};
|
||||
groups = {
|
||||
leyla = {
|
||||
gid = lib.mkForce gids.leyla;
|
||||
members = [
|
||||
leyla
|
||||
];
|
||||
};
|
||||
|
||||
jellyfin = {
|
||||
gid = lib.mkForce gids.jellyfin;
|
||||
members = [
|
||||
users.jellyfin.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
ester = {
|
||||
gid = lib.mkForce gids.ester;
|
||||
members = [
|
||||
ester
|
||||
];
|
||||
};
|
||||
|
||||
forgejo = {
|
||||
gid = lib.mkForce gids.forgejo;
|
||||
members = [
|
||||
users.forgejo.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
eve = {
|
||||
gid = lib.mkForce gids.eve;
|
||||
members = [
|
||||
eve
|
||||
];
|
||||
};
|
||||
|
||||
pihole = {
|
||||
gid = lib.mkForce gids.pihole;
|
||||
members = [
|
||||
users.pihole.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
users = {
|
||||
gid = lib.mkForce gids.users;
|
||||
members = [
|
||||
leyla
|
||||
ester
|
||||
eve
|
||||
];
|
||||
};
|
||||
|
||||
hass = {
|
||||
gid = lib.mkForce gids.hass;
|
||||
members = [
|
||||
users.hass.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
jellyfin_media = {
|
||||
gid = lib.mkForce gids.jellyfin_media;
|
||||
members = [
|
||||
users.jellyfin.name
|
||||
leyla
|
||||
ester
|
||||
eve
|
||||
];
|
||||
};
|
||||
|
||||
headscale = {
|
||||
gid = lib.mkForce gids.headscale;
|
||||
members = [
|
||||
users.headscale.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
jellyfin = {
|
||||
gid = lib.mkForce gids.jellyfin;
|
||||
members = [
|
||||
users.jellyfin.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
|
||||
nextcloud = {
|
||||
gid = lib.mkForce gids.nextcloud;
|
||||
members = [
|
||||
users.nextcloud.name
|
||||
# leyla
|
||||
];
|
||||
forgejo = {
|
||||
gid = lib.mkForce gids.forgejo;
|
||||
members = [
|
||||
users.forgejo.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
|
||||
pihole = {
|
||||
gid = lib.mkForce gids.pihole;
|
||||
members = [
|
||||
users.pihole.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
|
||||
hass = {
|
||||
gid = lib.mkForce gids.hass;
|
||||
members = [
|
||||
users.hass.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
|
||||
headscale = {
|
||||
gid = lib.mkForce gids.headscale;
|
||||
members = [
|
||||
users.headscale.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
|
||||
nextcloud = {
|
||||
gid = lib.mkForce gids.nextcloud;
|
||||
members = [
|
||||
users.nextcloud.name
|
||||
# leyla
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
(lib.mkIf config.host.impermanence.enable {
|
||||
boot.initrd.postResumeCommands = lib.mkAfter (
|
||||
lib.strings.concatStrings (builtins.map (user: ''
|
||||
zfs rollback -r rpool/local/home/${user.name}@blank
|
||||
'')
|
||||
normalUsers)
|
||||
);
|
||||
|
||||
fileSystems.${SOPS_AGE_KEY_DIRECTORY}.neededForBoot = true;
|
||||
|
||||
environment.persistence."/persist/system/root" = {
|
||||
enable = true;
|
||||
hideMounts = true;
|
||||
directories = [
|
||||
"/run/secrets"
|
||||
];
|
||||
};
|
||||
|
||||
host.storage.pool.extraDatasets = lib.mkMerge [
|
||||
{
|
||||
# sops age key needs to be available to pre persist for user generation
|
||||
"local/system/sops" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = SOPS_AGE_KEY_DIRECTORY;
|
||||
options = {
|
||||
atime = "off";
|
||||
relatime = "off";
|
||||
canmount = "on";
|
||||
};
|
||||
};
|
||||
}
|
||||
(
|
||||
lib.mkMerge
|
||||
(
|
||||
builtins.map (user: {
|
||||
"local/home/${user.name}" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/home/${user.name}";
|
||||
options = {
|
||||
canmount = "on";
|
||||
};
|
||||
postCreateHook = ''
|
||||
zfs snapshot rpool/local/home/${user.name}@blank
|
||||
'';
|
||||
};
|
||||
"persist/home/${user.name}" = {
|
||||
type = "zfs_fs";
|
||||
mountpoint = "/persist/home/${user.name}";
|
||||
options = {
|
||||
"com.sun:auto-snapshot" = "true";
|
||||
};
|
||||
};
|
||||
})
|
||||
normalUsers
|
||||
)
|
||||
)
|
||||
];
|
||||
})
|
||||
];
|
||||
}
|
||||
|
|
|
@ -67,10 +67,13 @@ in {
|
|||
default = lib.lists.filter (user: user.isPrincipleUser) hostUsers;
|
||||
};
|
||||
normalUsers = lib.mkOption {
|
||||
default = lib.lists.filter (user: user.isTerminalUser) hostUsers;
|
||||
default = lib.lists.filter (user: user.isNormalUser) hostUsers;
|
||||
};
|
||||
desktopUsers = lib.mkOption {
|
||||
default = lib.lists.filter (user: user.isDesktopUser) hostUsers;
|
||||
};
|
||||
terminalUsers = lib.mkOption {
|
||||
default = lib.lists.filter (user: user.isNormalUser) hostUsers;
|
||||
default = lib.lists.filter (user: user.isTerminalUser) hostUsers;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
home-manager = inputs.home-manager;
|
||||
nix-darwin = inputs.nix-darwin;
|
||||
sops-nix = inputs.sops-nix;
|
||||
disko = inputs.disko;
|
||||
impermanence = inputs.impermanence;
|
||||
|
||||
systems = [
|
||||
|
@ -74,6 +75,7 @@ in {
|
|||
sops-nix.nixosModules.sops
|
||||
impermanence.nixosModules.impermanence
|
||||
home-manager.nixosModules.home-manager
|
||||
disko.nixosModules.disko
|
||||
../modules/nixos-modules
|
||||
../configurations/nixos/${host}
|
||||
];
|
||||
|
|
Loading…
Reference in a new issue