Compare commits

..

59 commits

Author SHA1 Message Date
546cc97b57 Merge pull request 'storage-refactor' (#9) from storage-refactor into main
Reviewed-on: #9
2026-03-12 01:16:31 +00:00
16089e0371 fix: fixed more datasets 2026-03-07 19:00:01 -06:00
2f7bbf3e1c feat: fixed more missing datasets 2026-03-07 14:33:08 -06:00
1145703cfe feat: fixed merge incompatibilities 2026-03-07 12:03:09 -06:00
fa0adaa511 feat: reenabled auto snapshot/scrubbing 2026-03-07 11:15:45 -06:00
1289462220 Merge branch 'main' into storage-refactor 2026-03-07 10:41:38 -06:00
65e0c6e0e5 fix: added missing datasets to config 2026-02-08 18:01:31 -06:00
6ce567a53b fix: added missing impermanence configs 2026-02-08 13:03:05 -06:00
3302af38b3 feat: moved legacy datasets from main into defiant configuration 2026-02-08 12:50:58 -06:00
18c738cc2f feat: disabled impermanence for all the needed services 2026-02-08 12:37:42 -06:00
67eee18d7f Merge branch 'main' into storage-refactor 2026-02-07 22:58:53 -06:00
3370cd7ab3 feat: bound impermanence filesystem datasets filesystem 2026-01-15 21:50:36 -06:00
e6e53141ce feat: switched back to main for impermanence 2026-01-15 20:09:21 -06:00
ff08812447 Merge branch 'main' into storage-refactor 2026-01-15 19:55:56 -06:00
2170040ef4 Merge branch 'main' into storage-refactor 2026-01-08 21:01:01 -06:00
3891f93dba Merge branch 'main' into storage-refactor 2025-12-28 15:18:40 -06:00
e7aa507ea5 Merge branch 'main' into storage-refactor 2025-12-14 16:17:26 -06:00
8060e39b11 feat: updated android studio config to match new patter 2025-11-30 13:46:25 -06:00
6864312e49 Merge branch 'main' into storage-refactor 2025-11-30 13:30:00 -06:00
a0807b014c feat: moved logs to impermanence 2025-11-27 20:29:03 -06:00
adc1c1a918 Merge branch 'main' into storage-refactor 2025-11-27 20:14:09 -06:00
3d1750060d fix: fixed nix flake check 2025-11-23 15:44:59 -06:00
a4f3b3141d fix: fixed trailing mount path issue 2025-11-23 15:35:26 -06:00
2922114367 fix: fixed file system resolution 2025-11-23 11:51:53 -06:00
5dc602339c Merge branch 'main' into storage-refactor 2025-11-22 01:25:30 +00:00
ecdd407abe feat: switched jellyfin media and qbittorent media to being the same dataset 2025-11-17 17:56:31 -06:00
e196541f2a feat: filter out impermanence datasets that dont do anything 2025-11-16 00:12:29 -06:00
57fba596c2 Merge branch 'main' into storage-refactor 2025-11-16 00:11:26 -06:00
dfcacdc6fb feat: moved some datasets to common zfs storage config 2025-11-16 00:04:03 -06:00
c2701ea8f0 feat: moved services over to using the new storage datasets 2025-11-15 16:37:10 -06:00
757a3892e1 feat: updated interface for storage 2025-11-15 13:39:53 -06:00
f8edad75bf feat: updated user configs to better match original config 2025-11-14 22:06:32 -06:00
1eb66d1c31 feat: updated pool names 2025-11-12 19:27:12 -06:00
ac0f1ce2e6 feat: updated flake input to use fork 2025-11-10 15:51:28 -06:00
8aa984a389 feat: made datasets build 2025-11-10 15:49:12 -06:00
61eef3067e feat: made persist build with new impermanence system 2025-11-10 15:42:25 -06:00
d06c25f33f feat: migrated users over to new persistence structure 2025-11-10 02:38:28 -06:00
37f5e65bd6 Merge branch 'main' into storage-refactor 2025-11-09 02:33:38 -06:00
318a0a9748 feat: added sops dataset to users.nix 2025-11-08 22:37:19 -06:00
4d7d11e0c8 feat: removed now unneeded disko and impermanence modules 2025-11-08 21:19:54 -06:00
4da5d65d8f feat: added activation and resume scripts to storage and impermanence 2025-11-08 21:10:18 -06:00
1310b50794 feat: moved ssh config to use new storage config 2025-11-08 19:04:59 -06:00
5acf060e9e feat: updated imports to use new storage module only 2025-11-08 18:49:19 -06:00
703530ddfe feat: updated storage config for emergent 2025-11-08 18:48:41 -06:00
ab555f50ff fix: defiant config cache drive converted to correct format 2025-11-08 18:30:49 -06:00
d283f88160 feat: moved ollama, tailscale, and sync into folders following the new storage pattern 2025-11-08 18:28:34 -06:00
b67be1472a feat: refactored impermanence modules to follow new pattern 2025-11-08 18:17:22 -06:00
3ca0e9bf0a fix: fixed generation of disko configuration 2025-11-08 17:20:37 -06:00
730eeef242 Merge branch 'main' into storage-refactor 2025-11-08 14:26:07 -06:00
39edb65539 feat: removed broken disko config 2025-11-08 14:21:22 -06:00
9df29cc07f feat: refined options for datasets 2025-11-08 13:21:01 -06:00
0de97fa4a2 feat: added more development notes 2025-11-07 18:14:00 -06:00
adc6b90c93 feat: made impermanence create datasets for zfs and persistence 2025-11-07 16:29:56 -06:00
409fdb7276 Merge branch 'main' into storage-refactor 2025-11-06 17:07:07 -06:00
d8989bb43d feat: drafted out zfs vdev, pool, and dataset implementations 2025-11-05 10:56:04 -06:00
2fd14e4cc0 feat: added config block to zfs.nix and gave it notification functionality 2025-11-04 19:39:27 -06:00
573708fd47 moved storage option draft to its own folder 2025-11-04 15:02:49 -06:00
aeb37e658e Merge branch 'main' into storage-refactor 2025-11-04 14:28:28 -06:00
78dd22fed3 feat: started to draft out new storage interface 2025-11-03 11:55:10 -06:00
127 changed files with 2341 additions and 1465 deletions

View file

@ -60,7 +60,7 @@ in {
bitwarden.enable = true;
discord.enable = true;
makemkv.enable = true;
signal-desktop-bin.enable = true;
signal-desktop.enable = true;
steam.enable = true;
piper.enable = hardware.piperMouse.enable;
krita.enable = true;

View file

@ -12,7 +12,7 @@
];
config = {
impermanence.enable = osConfig.host.impermanence.enable;
impermanence.enable = osConfig.storage.impermanence.enable;
# Home Manager needs a bit of information about you and the paths it should
# manage.

View file

@ -4,7 +4,7 @@
...
}: {
config = lib.mkIf (config.impermanence.enable) {
home.persistence."/persist/home/leyla" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"desktop"
"downloads"
@ -14,7 +14,6 @@
".bash_history" # keep shell history around
"${config.xdg.dataHome}/recently-used.xbel" # gnome recently viewed files
];
allowOther = true;
};
};
}

View file

@ -50,7 +50,7 @@ in {
android-studio.enable = true;
makemkv.enable = true;
discord.enable = true;
signal-desktop-bin.enable = true;
signal-desktop.enable = true;
calibre.enable = true;
obsidian.enable = true;
jetbrains.idea-oss.enable = true;

View file

@ -33,44 +33,6 @@
isPrincipleUser = true;
};
};
impermanence.enable = true;
storage = {
enable = true;
encryption = true;
notifications = {
enable = true;
host = "smtp.protonmail.ch";
port = 587;
to = "leyla@jan-leila.com";
user = "noreply@jan-leila.com";
tokenFile = config.sops.secrets."services/zfs_smtp_token".path;
};
pool = {
# We are having to boot off of the nvm cache drive because I cant figure out how to boot via the HBA
bootDrives = ["nvme-Samsung_SSD_990_PRO_4TB_S7KGNU0X907881F"];
vdevs = [
[
"ata-ST18000NE000-3G6101_ZVTCXVEB"
"ata-ST18000NE000-3G6101_ZVTCXWSC"
"ata-ST18000NE000-3G6101_ZVTD10EH"
"ata-ST18000NT001-3NF101_ZVTE0S3Q"
"ata-ST18000NT001-3NF101_ZVTEF27J"
"ata-ST18000NE000-3G6101_ZVTJ7359"
]
[
"ata-ST4000NE001-2MA101_WS2275P3"
"ata-ST4000NE001-2MA101_WS227B9F"
"ata-ST4000NE001-2MA101_WS227CEW"
"ata-ST4000NE001-2MA101_WS227CYN"
"ata-ST4000NE001-2MA101_WS23TBWV"
"ata-ST4000NE001-2MA101_WS23TC5F"
]
];
cache = [
"nvme-Samsung_SSD_990_PRO_4TB_S7KGNU0X907881F"
];
};
};
network_storage = {
enable = true;
directories = [
@ -104,6 +66,53 @@
};
};
storage = {
zfs = {
enable = true;
notifications = {
enable = true;
host = "smtp.protonmail.ch";
port = 587;
to = "leyla@jan-leila.com";
user = "noreply@jan-leila.com";
tokenFile = config.sops.secrets."services/zfs_smtp_token".path;
};
pool = {
encryption = {
enable = true;
};
vdevs = [
[
"ata-ST18000NE000-3G6101_ZVTCXVEB"
"ata-ST18000NE000-3G6101_ZVTCXWSC"
"ata-ST18000NE000-3G6101_ZVTD10EH"
"ata-ST18000NT001-3NF101_ZVTE0S3Q"
"ata-ST18000NT001-3NF101_ZVTEF27J"
"ata-ST18000NE000-3G6101_ZVTJ7359"
]
[
"ata-ST4000NE001-2MA101_WS2275P3"
"ata-ST4000NE001-2MA101_WS227B9F"
"ata-ST4000NE001-2MA101_WS227CEW"
"ata-ST4000NE001-2MA101_WS227CYN"
"ata-ST4000NE001-2MA101_WS23TBWV"
"ata-ST4000NE001-2MA101_WS23TC5F"
]
];
# We are having to boot off of the nvm cache drive because I cant figure out how to boot via the HBA
cache = [
{
device = "nvme-Samsung_SSD_990_PRO_4TB_S7KGNU0X907881F";
boot = true;
}
];
};
};
impermanence = {
enable = true;
};
};
systemd.network = {
enable = true;
@ -218,6 +227,7 @@
postgresql = {
enable = true;
adminUsers = ["leyla"];
impermanence.enable = false;
};
# temp enable desktop environment for setup
@ -236,6 +246,7 @@
reverseProxy = {
enable = true;
openFirewall = true;
impermanence.enable = false;
acme = {
enable = true;
email = "jan-leila@protonmail.com";
@ -245,6 +256,7 @@
ollama = {
enable = true;
exposePort = true;
impermanence.enable = false;
environmentVariables = {
OLLAMA_KEEP_ALIVE = "24h";
@ -279,6 +291,7 @@
enable = true;
authKeyFile = config.sops.secrets."vpn-keys/tailscale-authkey/defiant".path;
useRoutingFeatures = "server";
impermanence.enable = false;
extraUpFlags = [
"--advertise-exit-node"
"--advertise-routes=192.168.0.0/24"
@ -291,24 +304,33 @@
];
};
syncthing.enable = true;
syncthing = {
enable = true;
impermanence.enable = false;
};
fail2ban.enable = true;
fail2ban = {
enable = true;
impermanence.enable = false;
};
jellyfin = {
enable = true;
domain = "media.jan-leila.com";
extraDomains = ["jellyfin.jan-leila.com"];
impermanence.enable = false;
};
immich = {
enable = true;
domain = "photos.jan-leila.com";
impermanence.enable = false;
};
forgejo = {
enable = true;
reverseProxy.domain = "git.jan-leila.com";
impermanence.enable = false;
};
searx = {
@ -319,6 +341,7 @@
actual = {
enable = false;
domain = "budget.jan-leila.com";
impermanence.enable = false;
};
home-assistant = {
@ -326,6 +349,7 @@
domain = "home.jan-leila.com";
openFirewall = true;
postgres.enable = true;
impermanence.enable = false;
extensions = {
sonos.enable = true;
@ -338,11 +362,13 @@
enable = true;
domain = "documents.jan-leila.com";
passwordFile = config.sops.secrets."services/paperless_password".path;
impermanence.enable = false;
};
panoramax = {
enable = false;
openFirewall = true;
impermanence.enable = false;
};
crab-hole = {
@ -350,6 +376,7 @@
port = 8085;
openFirewall = true;
show_doc = true;
impermanence.enable = false;
downstreams = {
host = {
enable = true;
@ -365,31 +392,38 @@
mediaDir = "/srv/qbittorent";
openFirewall = true;
webuiPort = 8084;
impermanence.enable = false;
};
sonarr = {
enable = true;
openFirewall = true;
impermanence.enable = false;
};
radarr = {
enable = true;
openFirewall = true;
impermanence.enable = false;
};
bazarr = {
enable = true;
openFirewall = true;
impermanence.enable = false;
};
lidarr = {
enable = true;
openFirewall = true;
impermanence.enable = false;
};
jackett = {
enable = true;
openFirewall = true;
impermanence.enable = false;
};
flaresolverr = {
enable = true;
openFirewall = true;
impermanence.enable = false;
};
};

View file

@ -4,5 +4,7 @@
./hardware-configuration.nix
./configuration.nix
./packages.nix
./legacy-storage.nix
./legacy-impermanence.nix
];
}

View file

@ -0,0 +1,296 @@
# Legacy impermanence module for defiant
# See legacy-storage.nix for the full incremental migration plan.
#
# This file is consumed in two phases:
#
# Phase 3 (after generateBase is enabled):
# Remove the SYSTEM-LEVEL entries marked [PHASE 3] below. These will be
# handled automatically by storage.nix, ssh.nix, and the impermanence module:
# - var-lib-private-permissions activation script
# - /etc/machine-id
# - SSH host keys
# - /var/lib/nixos
# - /var/lib/systemd/coredump
# - /persist/system/var/log persistence block
#
# Phase 4 (migrate services one at a time, any order):
# For each service:
# 1. Remove the service's section marked [PHASE 4] from this file
# 2. Remove `impermanence.enable = false` for that service in configuration.nix
# For jellyfin/qbittorrent, also remove the separate media persistence blocks.
#
# Phase 5: Delete this file once empty.
{
config,
lib,
...
}: {
config = lib.mkIf config.storage.impermanence.enable {
# [PHASE 3] Remove this activation script after enabling generateBase
system.activationScripts = {
"var-lib-private-permissions" = {
deps = ["specialfs"];
text = ''
mkdir -p /persist/system/root/var/lib/private
chmod 0700 /persist/system/root/var/lib/private
'';
};
};
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
# [PHASE 3] Remove this files block after enabling generateBase
files = lib.mkMerge [
["/etc/machine-id"]
# SSH host keys
(lib.mkIf config.services.openssh.enable (
lib.lists.flatten (
builtins.map (hostKey: [
hostKey.path
"${hostKey.path}.pub"
])
config.services.openssh.hostKeys
)
))
];
directories = lib.mkMerge [
# [PHASE 3] Remove these system directories after enabling generateBase
[
"/var/lib/nixos"
"/var/lib/systemd/coredump"
]
# [PHASE 4] PostgreSQL
(lib.mkIf config.services.postgresql.enable [
{
directory = "/var/lib/postgresql/16";
user = "postgres";
group = "postgres";
}
])
# [PHASE 4] Reverse Proxy (ACME)
(lib.mkIf config.services.reverseProxy.enable [
{
directory = "/var/lib/acme";
user = "acme";
group = "acme";
}
])
# [PHASE 4] Ollama
(lib.mkIf config.services.ollama.enable [
{
directory = "/var/lib/private/ollama";
user = config.services.ollama.user;
group = config.services.ollama.group;
mode = "0700";
}
])
# [PHASE 4] Tailscale
(lib.mkIf config.services.tailscale.enable [
{
directory = "/var/lib/tailscale";
user = "root";
group = "root";
}
])
# [PHASE 4] Syncthing
(lib.mkIf config.services.syncthing.enable [
{
directory = "/mnt/sync";
user = "syncthing";
group = "syncthing";
}
{
directory = "/etc/syncthing";
user = "syncthing";
group = "syncthing";
}
])
# [PHASE 4] Fail2ban
(lib.mkIf config.services.fail2ban.enable [
{
directory = "/var/lib/fail2ban";
user = "fail2ban";
group = "fail2ban";
}
])
# [PHASE 4] Jellyfin (data/cache only - media is on separate dataset)
(lib.mkIf config.services.jellyfin.enable [
{
directory = "/var/lib/jellyfin";
user = "jellyfin";
group = "jellyfin";
}
{
directory = "/var/cache/jellyfin";
user = "jellyfin";
group = "jellyfin";
}
])
# [PHASE 4] Immich
(lib.mkIf config.services.immich.enable [
{
directory = "/var/lib/immich";
user = "immich";
group = "immich";
}
])
# [PHASE 4] Forgejo
(lib.mkIf config.services.forgejo.enable [
{
directory = "/var/lib/forgejo";
user = "forgejo";
group = "forgejo";
}
])
# [PHASE 4] Actual
(lib.mkIf config.services.actual.enable [
{
directory = "/var/lib/private/actual";
user = "actual";
group = "actual";
}
])
# [PHASE 4] Home Assistant
(lib.mkIf config.services.home-assistant.enable [
{
directory = "/var/lib/hass";
user = "hass";
group = "hass";
}
])
# [PHASE 4] Paperless
(lib.mkIf config.services.paperless.enable [
{
directory = "/var/lib/paperless";
user = "paperless";
group = "paperless";
}
])
# [PHASE 4] Crab-hole
(lib.mkIf config.services.crab-hole.enable [
{
directory = "/var/lib/private/crab-hole";
user = "crab-hole";
group = "crab-hole";
}
])
# [PHASE 4] qBittorrent (config only - media is on separate dataset)
(lib.mkIf config.services.qbittorrent.enable [
{
directory = "/var/lib/qBittorrent/";
user = "qbittorrent";
group = "qbittorrent";
}
])
# [PHASE 4] Sonarr
(lib.mkIf config.services.sonarr.enable [
{
directory = "/var/lib/sonarr/.config/NzbDrone";
user = "sonarr";
group = "sonarr";
}
])
# [PHASE 4] Radarr
(lib.mkIf config.services.radarr.enable [
{
directory = "/var/lib/radarr/.config/Radarr";
user = "radarr";
group = "radarr";
}
])
# [PHASE 4] Bazarr
(lib.mkIf config.services.bazarr.enable [
{
directory = "/var/lib/bazarr";
user = "bazarr";
group = "bazarr";
}
])
# [PHASE 4] Lidarr
(lib.mkIf config.services.lidarr.enable [
{
directory = "/var/lib/lidarr/.config/Lidarr";
user = "lidarr";
group = "lidarr";
}
])
# [PHASE 4] Jackett
(lib.mkIf config.services.jackett.enable [
{
directory = "/var/lib/jackett/.config/Jackett";
user = "jackett";
group = "jackett";
}
])
# [PHASE 4] FlareSolverr
(lib.mkIf config.services.flaresolverr.enable [
{
directory = "/var/lib/flaresolverr";
user = "flaresolverr";
group = "flaresolverr";
}
])
];
};
# [PHASE 4 - LAST] Jellyfin media on separate dataset
# Requires Phase 2 media dataset merge before migrating (several days of data copy)
environment.persistence."/persist/system/jellyfin" = lib.mkIf config.services.jellyfin.enable {
enable = true;
hideMounts = true;
directories = [
{
directory = config.services.jellyfin.media_directory;
user = "jellyfin";
group = "jellyfin_media";
mode = "1770";
}
];
};
# [PHASE 4 - LAST] qBittorrent media on separate dataset
# Requires Phase 2 media dataset merge before migrating (several days of data copy)
environment.persistence."/persist/system/qbittorrent" = lib.mkIf config.services.qbittorrent.enable {
enable = true;
hideMounts = true;
directories = [
{
directory = config.services.qbittorrent.mediaDir;
user = "qbittorrent";
group = "qbittorrent";
mode = "1775";
}
];
};
# [PHASE 3] /var/log persistence - handled by storage.nix after generateBase
environment.persistence."/persist/system/var/log" = {
enable = true;
hideMounts = true;
directories = [
"/var/log"
];
};
};
}

View file

@ -0,0 +1,218 @@
# Legacy storage configuration for defiant
# This file manually defines ZFS datasets matching the existing on-disk layout
# to allow incremental migration to the new storage module (generateBase = true).
#
# ============================================================================
# INCREMENTAL MIGRATION PLAN
# ============================================================================
#
# Current disk usage (for reference):
# rpool/local/system/nix ~26G (renamed in place, no copy)
# rpool/local/system/sops ~328K (renamed in place, no copy)
# rpool/persist/system/jellyfin ~32T (renamed in place, no copy)
# rpool/persist/system/qbittorrent ~6.5T (copied into media dataset, ~6.5T temp)
# rpool free space ~30T
#
# Phase 1: Migrate base datasets on disk (boot from live USB or rescue)
# All operations in this phase are instant renames -- no data is copied.
#
# Unlock the pool:
# zfs load-key -a
#
# Step 1a: Move nix and sops out of local/ (they go to persist/local/)
# The -p flag auto-creates the parent datasets.
#
# zfs rename -p rpool/local/system/nix rpool/persist/local/nix
# zfs rename -p rpool/local/system/sops rpool/persist/local/system/sops
#
# Step 1b: Rename local/ -> ephemeral/ (takes remaining children with it)
# zfs rename rpool/local rpool/ephemeral
# # This moves: local/system/root -> ephemeral/system/root
# # local/home/leyla -> ephemeral/home/leyla
#
# Step 1c: Recreate blank snapshots on ephemeral datasets
# zfs destroy rpool/ephemeral/system/root@blank
# zfs snapshot rpool/ephemeral/system/root@blank
# zfs destroy rpool/ephemeral/home/leyla@blank
# zfs snapshot rpool/ephemeral/home/leyla@blank
#
# Step 1d: Move persist/ children under persist/replicate/
# zfs create -o canmount=off rpool/persist/replicate
# zfs create -o canmount=off rpool/persist/replicate/system
# zfs rename rpool/persist/system/root rpool/persist/replicate/system/root
# zfs rename rpool/persist/system/var rpool/persist/replicate/system/var
# zfs rename rpool/persist/home/leyla rpool/persist/replicate/home
# # Clean up the now-empty home parent
# zfs destroy rpool/persist/home
# # NOTE: Do NOT destroy rpool/persist/system -- it still contains
# # persist/system/jellyfin and persist/system/qbittorrent which are
# # migrated in Phase 2.
#
# Verify the new layout:
# zfs list -r rpool -o name,used,mountpoint
#
# Phase 2: Merge media into a single dataset (do this last)
# Strategy: Rename the jellyfin dataset to become the shared media dataset
# (zero copy, instant), then copy qbittorrent data into it (~6.5T copy).
# This avoids duplicating the 32T jellyfin dataset.
#
# Step 2a: Rename jellyfin dataset to the shared media name
# zfs rename rpool/persist/system/jellyfin rpool/persist/replicate/system/media
#
# Step 2b: Copy qbittorrent data into the media dataset
# This copies ~6.5T and may take several hours/days depending on disk speed.
# The qbittorrent data is not critical to back up so no snapshot needed.
#
# systemctl stop qbittorrent
# rsync -avPHAX /persist/system/qbittorrent/ /persist/replicate/system/media/
#
# Step 2c: Verify the data and clean up
# ls -la /persist/replicate/system/media/
# zfs destroy rpool/persist/system/qbittorrent
# # persist/system should now be empty, clean it up:
# zfs destroy rpool/persist/system
#
# Phase 3: Enable generateBase
# In the nix config:
# - Delete this file (legacy-storage.nix) and remove its import from default.nix
# - Remove [PHASE 3] entries from legacy-impermanence.nix:
# - var-lib-private-permissions activation script
# - /etc/machine-id, SSH host keys (files block)
# - /var/lib/nixos, /var/lib/systemd/coredump (directories)
# - /persist/system/var/log persistence block
# These are now handled automatically by storage.nix and ssh.nix.
# Rebuild and verify:
# sudo nixos-rebuild switch --flake .#defiant
# # Verify mounts: findmnt -t fuse.bindfs,fuse
# # Verify persist: ls /persist/replicate/system/root/var/lib/nixos
# # Verify boot: reboot and confirm system comes up cleanly
#
# Phase 4: Migrate services (one at a time, any order)
# For each service (except jellyfin/qbittorrent):
# 1. Remove the service's [PHASE 4] section from legacy-impermanence.nix
# 2. Remove `impermanence.enable = false` for that service in configuration.nix
# 3. Rebuild: sudo nixos-rebuild switch --flake .#defiant
# 4. Verify: systemctl status <service>, check the service's data is intact
# No data migration is needed -- the data already lives on the renamed
# dataset at the new path.
#
# Migrate jellyfin and qbittorrent LAST (after Phase 2 media merge):
# 1. Remove [PHASE 4 - LAST] jellyfin entries from legacy-impermanence.nix
# 2. Remove [PHASE 4 - LAST] qbittorrent entries from legacy-impermanence.nix
# 3. Remove `impermanence.enable = false` for both in configuration.nix
# 4. Rebuild: sudo nixos-rebuild switch --flake .#defiant
# 5. Verify: systemctl status jellyfin qbittorrent
#
# Phase 5: Cleanup
# Once all services are migrated and legacy-impermanence.nix is empty:
# - Delete legacy-impermanence.nix and remove its import from default.nix
# - Rebuild: sudo nixos-rebuild switch --flake .#defiant
#
# ============================================================================
#
# Current on-disk dataset layout:
# rpool/local/ - ephemeral parent
# rpool/local/home/leyla - ephemeral user home (rolled back on boot)
# rpool/local/system/nix - nix store
# rpool/local/system/root - root filesystem (rolled back on boot)
# rpool/local/system/sops - sops age key
# rpool/persist/ - persistent parent
# rpool/persist/home/leyla - persistent user home
# rpool/persist/system/jellyfin - jellyfin media
# rpool/persist/system/qbittorrent - qbittorrent media
# rpool/persist/system/root - persistent root data
# rpool/persist/system/var/log - log persistence
{lib, ...}: {
# Disable automatic base dataset generation so we can define them manually
storage.generateBase = false;
# Manually define ZFS datasets matching main's structure
storage.zfs.datasets = {
# Ephemeral datasets (local/)
"local" = {
type = "zfs_fs";
mount = null;
};
"local/home/leyla" = {
type = "zfs_fs";
mount = "/home/leyla";
snapshot = {
blankSnapshot = true;
};
};
"local/system/nix" = {
type = "zfs_fs";
mount = "/nix";
atime = "off";
relatime = "off";
snapshot = {
autoSnapshot = false;
};
};
"local/system/root" = {
type = "zfs_fs";
mount = "/";
snapshot = {
blankSnapshot = true;
};
};
"local/system/sops" = {
type = "zfs_fs";
mount = "/var/lib/sops-nix";
};
# Persistent datasets (persist/)
"persist" = {
type = "zfs_fs";
mount = null;
};
"persist/home/leyla" = {
type = "zfs_fs";
mount = "/persist/home/leyla";
snapshot = {
autoSnapshot = true;
};
};
"persist/system/jellyfin" = {
type = "zfs_fs";
mount = "/persist/system/jellyfin";
atime = "off";
relatime = "off";
};
"persist/system/qbittorrent" = {
type = "zfs_fs";
mount = "/persist/system/qbittorrent";
atime = "off";
relatime = "off";
};
"persist/system/root" = {
type = "zfs_fs";
mount = "/persist/system/root";
snapshot = {
autoSnapshot = true;
};
};
"persist/system/var/log" = {
type = "zfs_fs";
mount = "/persist/system/var/log";
};
};
# Boot commands to rollback ephemeral root and user homes on boot
boot.initrd.postResumeCommands = lib.mkAfter ''
zfs rollback -r rpool/local/system/root@blank
zfs rollback -r rpool/local/home/leyla@blank
'';
# FileSystems needed for boot
fileSystems = {
"/".neededForBoot = true;
"/persist/system/root".neededForBoot = true;
"/persist/system/var/log".neededForBoot = true;
"/persist/system/jellyfin".neededForBoot = true;
"/persist/system/qbittorrent".neededForBoot = true;
"/var/lib/sops-nix".neededForBoot = true;
"/persist/home/leyla".neededForBoot = true;
"/home/leyla".neededForBoot = true;
};
}

View file

@ -59,12 +59,22 @@
hardware = {
piperMouse.enable = true;
};
};
storage = {
storage = {
zfs = {
enable = true;
pool = {
mode = "";
drives = ["wwn-0x5000039fd0cf05eb"];
mode = "stripe";
vdevs = [
[
{
device = "wwn-0x5000039fd0cf05eb";
boot = true;
}
]
];
cache = [];
};
};
};

View file

@ -3,5 +3,6 @@
imports = [
./configuration.nix
./hardware-configuration.nix
./legacy-storage.nix
];
}

View file

@ -0,0 +1,51 @@
# Legacy storage configuration for emergent
# This file manually defines ZFS datasets matching the existing on-disk layout
# to allow incremental migration to the new storage module (generateBase = true).
#
# Current on-disk dataset layout:
# rpool/local/ - parent (canmount=off)
# rpool/local/system/nix - nix store
# rpool/local/system/root - root filesystem
#
# Migration plan:
# Phase 1: Rename datasets on disk (boot from live USB)
# zfs rename -p rpool/local/system/nix rpool/persist/local/nix
# zfs rename rpool/local rpool/persist/local
# # This moves: local/system/root -> persist/local/root (need to rename after)
# # Actually, since local/system/root needs to become persist/local/root:
# zfs rename rpool/persist/local/system/root rpool/persist/local/root
# zfs destroy rpool/persist/local/system # now empty
# # Recreate blank snapshot:
# zfs destroy rpool/persist/local/root@blank
# zfs snapshot rpool/persist/local/root@blank
#
# Phase 2: Delete this file, remove its import from default.nix, rebuild.
{...}: {
# Disable automatic base dataset generation so we can define them manually
storage.generateBase = false;
# Manually define ZFS datasets matching the existing on-disk layout
storage.zfs.datasets = {
"local" = {
type = "zfs_fs";
mount = null;
};
"local/system/nix" = {
type = "zfs_fs";
mount = "/nix";
atime = "off";
relatime = "off";
snapshot = {
autoSnapshot = false;
};
};
"local/system/root" = {
type = "zfs_fs";
mount = "/";
snapshot = {
blankSnapshot = true;
autoSnapshot = true;
};
};
};
}

125
flake.lock generated
View file

@ -7,11 +7,11 @@
]
},
"locked": {
"lastModified": 1771881364,
"narHash": "sha256-A5uE/hMium5of/QGC6JwF5TGoDAfpNtW00T0s9u/PN8=",
"lastModified": 1772867152,
"narHash": "sha256-RIFgZ4O6Eg+5ysZ8Tqb3YvcqiRaNy440GEY22ltjRrs=",
"owner": "nix-community",
"repo": "disko",
"rev": "a4cb7bf73f264d40560ba527f9280469f1f081c6",
"rev": "eaafb89b56e948661d618eefd4757d9ea8d77514",
"type": "github"
},
"original": {
@ -28,11 +28,11 @@
},
"locked": {
"dir": "pkgs/firefox-addons",
"lastModified": 1771888219,
"narHash": "sha256-XlA/l99y1Qilmd8ttYJ9y5BSse9GKoQlt9hnY8H+EHM=",
"lastModified": 1772856163,
"narHash": "sha256-xD+d1+FVhKJ+oFYMTWOdVSBoXS4yeMyVZyDjMXqWEJE=",
"owner": "rycee",
"repo": "nur-expressions",
"rev": "a347c1da78da64eeb78a0c9005bdaadace33e83c",
"rev": "d358a550c7beac5f04fbc5a786e14af079606689",
"type": "gitlab"
},
"original": {
@ -115,32 +115,11 @@
]
},
"locked": {
"lastModified": 1771851181,
"narHash": "sha256-gFgE6mGUftwseV3DUENMb0k0EiHd739lZexPo5O/sdQ=",
"lastModified": 1772845525,
"narHash": "sha256-Dp5Ir2u4jJDGCgeMRviHvEQDe+U37hMxp6RSNOoMMPc=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "9a4b494b1aa1b93d8edf167f46dc8e0c0011280c",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"home-manager_2": {
"inputs": {
"nixpkgs": [
"impermanence",
"nixpkgs"
]
},
"locked": {
"lastModified": 1768598210,
"narHash": "sha256-kkgA32s/f4jaa4UG+2f8C225Qvclxnqs76mf8zvTVPg=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "c47b2cc64a629f8e075de52e4742de688f930dc6",
"rev": "27b93804fbef1544cb07718d3f0a451f4c4cd6c0",
"type": "github"
},
"original": {
@ -150,12 +129,20 @@
}
},
"impermanence": {
"inputs": {
"home-manager": [
"home-manager"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1737831083,
"narHash": "sha256-LJggUHbpyeDvNagTUrdhe/pRVp4pnS6wVKALS782gRI=",
"lastModified": 1769548169,
"narHash": "sha256-03+JxvzmfwRu+5JafM0DLbxgHttOQZkUtDWBmeUkN8Y=",
"owner": "nix-community",
"repo": "impermanence",
"rev": "4b3e914cdf97a5b536a889e939fb2fd2b043a170",
"rev": "7b1d382faf603b6d264f58627330f9faa5cba149",
"type": "github"
},
"original": {
@ -204,14 +191,14 @@
"mcp-nixos": {
"inputs": {
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_2"
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1769804089,
"narHash": "sha256-Wkot1j0cTx64xxjmLXzPubTckaZBSUJFhESEdOzPYas=",
"lastModified": 1772769318,
"narHash": "sha256-RAyOW5JMXRhiREqxFPOzw80fVsYVBnOPFgBSjnJ6gbY=",
"owner": "utensils",
"repo": "mcp-nixos",
"rev": "37a691ea4ea9c8bdcccfe174c6127847b8213fd3",
"rev": "60c1efbba0de1268b42f1144c904e6c8a9627dde",
"type": "github"
},
"original": {
@ -227,11 +214,11 @@
]
},
"locked": {
"lastModified": 1771520882,
"narHash": "sha256-9SeTZ4Pwr730YfT7V8Azb8GFbwk1ZwiQDAwft3qAD+o=",
"lastModified": 1772379624,
"narHash": "sha256-NG9LLTWlz4YiaTAiRGChbrzbVxBfX+Auq4Ab/SWmk4A=",
"owner": "LnL7",
"repo": "nix-darwin",
"rev": "6a7fdcd5839ec8b135821179eea3b58092171bcf",
"rev": "52d061516108769656a8bd9c6e811c677ec5b462",
"type": "github"
},
"original": {
@ -268,11 +255,11 @@
]
},
"locked": {
"lastModified": 1771901087,
"narHash": "sha256-b5eSke+C8UeR5Er+TZOzHCDStBJ68yyFlqAUc6fNBX0=",
"lastModified": 1772850876,
"narHash": "sha256-Ga19zlfMpakCY4GMwBSOljNLOF0nEYrYBXv0hP/d4rw=",
"owner": "nix-community",
"repo": "nix-vscode-extensions",
"rev": "c22e7adea9adec98b3dc79be954ee17d56a232bd",
"rev": "22f084d4c280dfc8a9d764f7b85af38e5d69c3dc",
"type": "github"
},
"original": {
@ -283,11 +270,11 @@
},
"nixos-hardware": {
"locked": {
"lastModified": 1771423359,
"narHash": "sha256-yRKJ7gpVmXbX2ZcA8nFi6CMPkJXZGjie2unsiMzj3Ig=",
"lastModified": 1771969195,
"narHash": "sha256-qwcDBtrRvJbrrnv1lf/pREQi8t2hWZxVAyeMo7/E9sw=",
"owner": "NixOS",
"repo": "nixos-hardware",
"rev": "740a22363033e9f1bb6270fbfb5a9574067af15b",
"rev": "41c6b421bdc301b2624486e11905c9af7b8ec68e",
"type": "github"
},
"original": {
@ -299,15 +286,15 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1768564909,
"narHash": "sha256-Kell/SpJYVkHWMvnhqJz/8DqQg2b6PguxVWOuadbHCc=",
"owner": "nixos",
"lastModified": 1767640445,
"narHash": "sha256-UWYqmD7JFBEDBHWYcqE6s6c77pWdcU/i+bwD6XxMb8A=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e4bae1bd10c9c57b2cf517953ab70060a828ee6f",
"rev": "9f0c42f8bc7151b8e7e5840fb3bd454ad850d8c5",
"type": "github"
},
"original": {
"owner": "nixos",
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
@ -330,37 +317,21 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1767640445,
"narHash": "sha256-UWYqmD7JFBEDBHWYcqE6s6c77pWdcU/i+bwD6XxMb8A=",
"owner": "NixOS",
"lastModified": 1772773019,
"narHash": "sha256-E1bxHxNKfDoQUuvriG71+f+s/NT0qWkImXsYZNFFfCs=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "9f0c42f8bc7151b8e7e5840fb3bd454ad850d8c5",
"rev": "aca4d95fce4914b3892661bcb80b8087293536c6",
"type": "github"
},
"original": {
"owner": "NixOS",
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1771369470,
"narHash": "sha256-0NBlEBKkN3lufyvFegY4TYv5mCNHbi5OmBDrzihbBMQ=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "0182a361324364ae3f436a63005877674cf45efb",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1759070547,
"narHash": "sha256-JVZl8NaVRYb0+381nl7LvPE+A774/dRpif01FKLrYFQ=",
@ -378,7 +349,7 @@
},
"noita-entangled-worlds": {
"inputs": {
"nixpkgs": "nixpkgs_4",
"nixpkgs": "nixpkgs_3",
"rust-overlay": "rust-overlay",
"systems": "systems_2"
},
@ -410,7 +381,7 @@
"nix-syncthing": "nix-syncthing",
"nix-vscode-extensions": "nix-vscode-extensions",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs_3",
"nixpkgs": "nixpkgs_2",
"noita-entangled-worlds": "noita-entangled-worlds",
"secrets": "secrets",
"sops-nix": "sops-nix"
@ -460,11 +431,11 @@
]
},
"locked": {
"lastModified": 1771889317,
"narHash": "sha256-YV17Q5lEU0S9ppw08Y+cs4eEQJBuc79AzblFoHORLMU=",
"lastModified": 1772495394,
"narHash": "sha256-hmIvE/slLKEFKNEJz27IZ8BKlAaZDcjIHmkZ7GCEjfw=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "b027513c32e5b39b59f64626b87fbe168ae02094",
"rev": "1d9b98a29a45abe9c4d3174bd36de9f28755e3ff",
"type": "github"
},
"original": {

View file

@ -37,6 +37,8 @@
# delete your darlings
impermanence = {
url = "github:nix-community/impermanence";
inputs.nixpkgs.follows = "nixpkgs";
inputs.home-manager.follows = "home-manager";
};
nix-darwin = {

View file

@ -12,21 +12,29 @@ in {
type = lib.types.bool;
default = true;
};
persistencePath = lib.mkOption {
type = lib.types.str;
default =
if osConfig.storage.generateBase
then "/persist/replicate/home"
else "/persist";
description = "The base path for user home persistence. The impermanence module will automatically append the user's home directory path. Automatically adapts based on whether the system uses the new dataset layout or the legacy one.";
};
};
config = lib.mkMerge [
(lib.mkIf config.impermanence.enable {
assertions = [
{
assertion = osConfig.host.impermanence.enable;
assertion = osConfig.storage.impermanence.enable;
message = "impermanence can not be enabled for a user when it is not enabled for the system";
}
];
})
# If impermanence is not enabled for this user but system impermanence is enabled,
# persist the entire home directory as fallback
(lib.mkIf (osConfig.host.impermanence.enable && !cfg.enable && cfg.fallbackPersistence.enable) {
home.persistence."/persist/home/${config.home.username}" = {
(lib.mkIf (osConfig.storage.impermanence.enable && !cfg.enable && cfg.fallbackPersistence.enable) {
home.persistence."${cfg.persistencePath}" = {
directories = ["."];
allowOther = true;
};

View file

@ -96,7 +96,7 @@
}
)
(lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
files = lib.lists.flatten (
builtins.map (hostKey: [".ssh/${hostKey.path}" ".ssh/${hostKey.path}.pub"]) config.programs.openssh.hostKeys
);

View file

@ -14,22 +14,17 @@
android-studio
];
}
# TODO: create this
# (
# lib.mkIf config.impermanence.enable {
# home.persistence."/persist${config.home.homeDirectory}" = {
# directories = [
# # configuration
# "${config.xdg.configHome}/Google/AndroidStudio"
# # Android SDK
# ".android"
# # Gradle cache
# ".gradle"
# # Android Studio projects cache
# "${config.xdg.cacheHome}/Google/AndroidStudio"
# ];
# };
# }
# )
(
lib.mkIf config.impermanence.enable {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/Google/AndroidStudio"
".android"
".gradle"
"${config.xdg.cacheHome}/Google/AndroidStudio"
];
};
}
)
]);
}

View file

@ -1,15 +1,13 @@
{
lib,
config,
osConfig,
...
}: {
config = lib.mkIf (config.programs.anki.enable && osConfig.host.impermanence.enable) {
home.persistence."/persist${config.home.homeDirectory}" = {
config = lib.mkIf (config.programs.anki.enable && config.impermanence.enable) {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.dataHome}/Anki2/"
".local/share/Anki2"
];
allowOther = true;
};
};
}

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/Bitwarden"
];
allowOther = true;
};
}
)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/bruno/"
];
allowOther = true;
};
}
)

View file

@ -12,11 +12,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/calibre"
];
allowOther = true;
};
}
)

View file

@ -16,12 +16,11 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.dataHome}/DaVinciResolve"
"${config.xdg.configHome}/blackmagic"
];
allowOther = true;
};
}
)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.dataHome}/DBeaverData/"
];
allowOther = true;
};
}
)

View file

@ -6,11 +6,10 @@
config = lib.mkIf config.programs.discord.enable (lib.mkMerge [
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/discord/"
];
allowOther = true;
};
}
)

View file

@ -22,11 +22,10 @@
# Extension configuration
".mozilla/firefox/${profile}/extension-settings.json"
];
allowOther = true;
};
in {
config = lib.mkIf (config.programs.firefox.enable && config.impermanence.enable) {
home.persistence."/persist${config.home.homeDirectory}" = lib.mkMerge (
home.persistence."${config.impermanence.persistencePath}" = lib.mkMerge (
(
lib.attrsets.mapAttrsToList
(profile: _: buildProfilePersistence profile)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/FreeCAD"
];
allowOther = true;
};
}
)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/GIMP"
];
allowOther = true;
};
}
)

View file

@ -16,7 +16,7 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
# configuration
"${config.xdg.configHome}/JetBrains/"

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/inkscape"
];
allowOther = true;
};
}
)

View file

@ -23,12 +23,11 @@ in {
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/kdenliverc"
"${config.xdg.dataHome}/kdenlive"
];
allowOther = true;
};
}
)

View file

@ -16,12 +16,11 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/kritarc"
"${config.xdg.dataHome}/krita"
];
allowOther = true;
};
}
)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/libreoffice"
];
allowOther = true;
};
}
)

View file

@ -30,7 +30,7 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
".MakeMKV"
];

View file

@ -17,12 +17,11 @@ in {
}
(
mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/mapillary-uploader"
"${config.xdg.dataHome}/mapillary-uploader"
];
allowOther = true;
};
}
)

View file

@ -6,11 +6,10 @@
config = lib.mkIf config.programs.obs-studio.enable (lib.mkMerge [
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/obs-studio"
];
allowOther = true;
};
}
)

View file

@ -6,7 +6,7 @@
config = lib.mkIf config.programs.obsidian.enable (lib.mkMerge [
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/obsidian"
];

View file

@ -23,12 +23,11 @@ in {
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/olympus"
"${config.xdg.dataHome}/olympus"
];
allowOther = true;
};
}
)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/OpenRGB"
];
allowOther = true;
};
}
)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/MusicBrainz"
];
allowOther = true;
};
}
)

View file

@ -16,7 +16,7 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/ProStudioMasters"
];

View file

@ -16,7 +16,7 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/protonvpn"
"${config.xdg.configHome}/Proton"

View file

@ -16,7 +16,7 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/qBittorrent"
];

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/qFlipper"
];
allowOther = true;
};
}
)

View file

@ -4,19 +4,19 @@
config,
...
}: {
options.programs.signal-desktop-bin = {
options.programs.signal-desktop = {
enable = lib.mkEnableOption "enable signal";
};
config = lib.mkIf config.programs.signal-desktop-bin.enable (lib.mkMerge [
config = lib.mkIf config.programs.signal-desktop.enable (lib.mkMerge [
{
home.packages = with pkgs; [
signal-desktop-bin
signal-desktop
];
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/Signal"
];

View file

@ -18,14 +18,13 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
{
directory = "${config.xdg.dataHome}/Steam";
method = "symlink";
}
];
allowOther = true;
};
}
)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.dataHome}/torbrowser"
];
allowOther = true;
};
}
)

View file

@ -16,11 +16,10 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/chromium"
];
allowOther = true;
};
}
)

View file

@ -16,12 +16,11 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
"${config.xdg.configHome}/via"
"${config.xdg.dataHome}/via"
];
allowOther = true;
};
}
)

View file

@ -17,7 +17,7 @@
}
(
lib.mkIf config.impermanence.enable {
home.persistence."/persist${config.home.homeDirectory}" = {
home.persistence."${config.impermanence.persistencePath}" = {
directories = [
{
directory = ".vmware";
@ -28,7 +28,6 @@
method = "symlink";
}
];
allowOther = true;
};
}
)

View file

@ -8,14 +8,13 @@
./desktop.nix
./ssh.nix
./i18n.nix
./sync.nix
./impermanence.nix
./disko.nix
./ollama.nix
./sync
./ollama
./ai.nix
./tailscale.nix
./tailscale
./steam.nix
./server
./storage
];
nixpkgs.config.permittedInsecurePackages = [

View file

@ -1,267 +0,0 @@
{
lib,
pkgs,
config,
inputs,
...
}: let
# there currently is a bug with disko that causes long disk names to be generated improperly this hash function should alleviate it when used for disk names instead of what we are defaulting to
# max gpt length is 36 and disk adds formats it like disk-xxxx-zfs which means we need to be 9 characters under that
hashDisk = drive: (builtins.substring 0 27 (builtins.hashString "sha256" drive));
vdevs =
builtins.map (
disks:
builtins.map (disk: lib.attrsets.nameValuePair (hashDisk disk) disk) disks
)
config.host.storage.pool.vdevs;
cache =
builtins.map (
disk: lib.attrsets.nameValuePair (hashDisk disk) disk
)
config.host.storage.pool.cache;
datasets = config.host.storage.pool.datasets // config.host.storage.pool.extraDatasets;
in {
options.host.storage = {
enable = lib.mkEnableOption "are we going create zfs disks with disko on this device";
encryption = lib.mkEnableOption "is the vdev going to be encrypted";
notifications = {
enable = lib.mkEnableOption "are notifications enabled";
host = lib.mkOption {
type = lib.types.str;
description = "what is the host that we are going to send the email to";
};
port = lib.mkOption {
type = lib.types.port;
description = "what port is the host using to receive mail on";
};
to = lib.mkOption {
type = lib.types.str;
description = "what account is the email going to be sent to";
};
user = lib.mkOption {
type = lib.types.str;
description = "what user is the email going to be set from";
};
tokenFile = lib.mkOption {
type = lib.types.str;
description = "file containing the password to be used by msmtp for notifications";
};
};
pool = {
mode = lib.mkOption {
type = lib.types.str;
default = "raidz2";
description = "what level of redundancy should this pool have";
};
# list of drives in pool that will have a boot partition put onto them
bootDrives = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "list of disks that are going to have a boot partition installed on them";
default = lib.lists.flatten config.host.storage.pool.vdevs;
};
# shorthand for vdevs if you only have 1 vdev
drives = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "list of drives that are going to be in the vdev";
default = [];
};
# list of all drives in each vdev
vdevs = lib.mkOption {
type = lib.types.listOf (lib.types.listOf lib.types.str);
description = "list of disks that are going to be in";
default = [config.host.storage.pool.drives];
};
# list of cache drives for pool
cache = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "list of drives that are going to be used as cache";
default = [];
};
# Default datasets that are needed to make a functioning system
datasets = lib.mkOption {
type = lib.types.attrsOf (inputs.disko.lib.subType {
types = {inherit (inputs.disko.lib.types) zfs_fs zfs_volume;};
});
default = {
"local" = {
type = "zfs_fs";
options.canmount = "off";
};
# nix directory needs to be available pre persist and doesn't need to be snapshotted or backed up
"local/system/nix" = {
type = "zfs_fs";
mountpoint = "/nix";
options = {
atime = "off";
relatime = "off";
canmount = "on";
};
};
# dataset for root that gets rolled back on every boot
"local/system/root" = {
type = "zfs_fs";
mountpoint = "/";
options = {
canmount = "on";
};
postCreateHook = ''
zfs snapshot rpool/local/system/root@blank
'';
};
};
};
extraDatasets = lib.mkOption {
type = lib.types.attrsOf (inputs.disko.lib.subType {
types = {inherit (inputs.disko.lib.types) zfs_fs zfs_volume;};
});
description = "List of datasets to define";
default = {};
};
};
};
config = lib.mkIf config.host.storage.enable {
programs.msmtp = lib.mkIf config.host.storage.notifications.enable {
enable = true;
setSendmail = true;
defaults = {
aliases = "/etc/aliases";
port = config.host.storage.notifications.port;
tls_trust_file = "/etc/ssl/certs/ca-certificates.crt";
tls = "on";
auth = "login";
tls_starttls = "off";
};
accounts = {
zfs_notifications = {
auth = true;
tls = true;
host = config.host.storage.notifications.host;
passwordeval = "cat ${config.host.storage.notifications.tokenFile}";
user = config.host.storage.notifications.user;
from = config.host.storage.notifications.user;
};
};
};
services.zfs = {
autoScrub.enable = true;
autoSnapshot.enable = true;
zed = lib.mkIf config.host.storage.notifications.enable {
enableMail = true;
settings = {
ZED_DEBUG_LOG = "/tmp/zed.debug.log";
ZED_EMAIL_ADDR = [config.host.storage.notifications.to];
ZED_EMAIL_PROG = "${pkgs.msmtp}/bin/msmtp";
ZED_EMAIL_OPTS = "-a zfs_notifications @ADDRESS@";
ZED_NOTIFY_INTERVAL_SECS = 3600;
ZED_NOTIFY_VERBOSE = true;
ZED_USE_ENCLOSURE_LEDS = true;
ZED_SCRUB_AFTER_RESILVER = true;
};
};
};
disko.devices = {
disk = (
builtins.listToAttrs (
builtins.map
(drive:
lib.attrsets.nameValuePair (drive.name) {
type = "disk";
device = "/dev/disk/by-id/${drive.value}";
content = {
type = "gpt";
partitions = {
ESP = lib.mkIf (builtins.elem drive.value config.host.storage.pool.bootDrives) {
# The 2GB here for the boot partition might be a bit overkill we probably only need like 1/4th of that but storage is cheap
size = "2G";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = ["umask=0077"];
};
};
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "rpool";
};
};
};
};
})
(
(lib.lists.flatten vdevs) ++ cache
)
)
);
zpool = {
rpool = {
type = "zpool";
mode = {
topology = {
type = "topology";
vdev = (
builtins.map (disks: {
mode = config.host.storage.pool.mode;
members =
builtins.map (disk: disk.name) disks;
})
vdevs
);
cache = builtins.map (disk: disk.name) cache;
};
};
options = {
ashift = "12";
autotrim = "on";
};
rootFsOptions =
{
canmount = "off";
mountpoint = "none";
xattr = "sa";
acltype = "posixacl";
relatime = "on";
compression = "lz4";
"com.sun:auto-snapshot" = "false";
}
// (
lib.attrsets.optionalAttrs config.host.storage.encryption {
encryption = "on";
keyformat = "hex";
keylocation = "prompt";
}
);
datasets = lib.mkMerge [
(
lib.attrsets.mapAttrs (name: value: {
type = value.type;
options = value.options;
mountpoint = value.mountpoint;
postCreateHook = value.postCreateHook;
})
datasets
)
];
};
};
};
};
}

View file

@ -1,134 +0,0 @@
{
config,
lib,
...
}: {
# options.storage = {
# zfs = {
# # TODO: enable option
# # when this option is enabled we need to configure and enable disko things
# # TODO: we need some way of managing notifications
# # TODO: we need options to configure zfs pools
# # we should have warnings when the configured pool is missing drives
# # TODO: dataset option that is a submodule that adds datasets to the system
# # warnings for when a dataset was created in the past on a system but it is now missing some of the options defined for it
# # TODO: pools and datasets need to be passed to disko
# };
# impermanence = {
# # TODO: enable option
# # TODO: datasets option that is a submodule that will be used to define what datasets to add to the storage system
# # We should by default create the `local`, `local/system/nix`, `local/system/root`, `persist` `persist/system/root`, and `persist/system/var/log` datasets
# # Then we should make a dataset for user folders local and persist
# # We should also create datasets for systemd modules that have have impermanence enabled for them
# # we need to figure out what options a dataset can have in zfs
# };
# # TODO: we should have an impermanence module for home manager that proxies its values namespaced to the user down here that matches the same interface
# # TODO: we should have a way of enabling impermanence for a systemd config
# # these should have an option to put their folder into their own dataset (this needs to support private vs non private)
# # options for features that can be added to the dataset
# };
options.host.impermanence.enable = lib.mkEnableOption "are we going to use impermanence on this device";
config = lib.mkMerge [
{
assertions = [
{
assertion = !(config.host.impermanence.enable && !config.host.storage.enable);
message = ''
Disko storage must be enabled to use impermanence.
'';
}
];
}
(
lib.mkIf config.host.impermanence.enable {
assertions = [
{
assertion = config.host.impermanence.enable && config.host.storage.enable;
message = "Impermanence can not be used without managed host storage.";
}
];
# fixes issues with /var/lib/private not having the correct permissions https://github.com/nix-community/impermanence/issues/254
system.activationScripts."createPersistentStorageDirs".deps = ["var-lib-private-permissions" "users" "groups"];
system.activationScripts = {
"var-lib-private-permissions" = {
deps = ["specialfs"];
text = ''
mkdir -p /persist/system/root/var/lib/private
chmod 0700 /persist/system/root/var/lib/private
'';
};
};
programs.fuse.userAllowOther = true;
boot.initrd.postResumeCommands = lib.mkAfter ''
zfs rollback -r rpool/local/system/root@blank
'';
fileSystems = {
"/".neededForBoot = true;
"/persist/system/root".neededForBoot = true;
"/persist/system/var/log".neededForBoot = true;
};
host.storage.pool.extraDatasets = {
# persist datasets are datasets that contain information that we would like to keep around
"persist" = {
type = "zfs_fs";
options.canmount = "off";
options = {
"com.sun:auto-snapshot" = "true";
};
};
# this is where root data actually lives
"persist/system/root" = {
type = "zfs_fs";
mountpoint = "/persist/system/root";
};
"persist/system/var/log" = {
type = "zfs_fs";
mountpoint = "/persist/system/var/log";
# logs should be append only so we shouldn't need to snapshot them
options = {
"com.sun:auto-snapshot" = "false";
};
};
};
environment.persistence."/persist/system/var/log" = {
enable = true;
hideMounts = true;
directories = [
"/var/log"
];
};
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
directories = [
"/var/lib/nixos"
"/var/lib/systemd/coredump"
];
files = [
"/etc/machine-id"
];
};
# TODO: this should live in leylas home manager configuration
security.sudo.extraConfig = "Defaults lecture=never";
}
)
];
}

View file

@ -0,0 +1,6 @@
{...}: {
imports = [
./ollama.nix
./storage.nix
];
}

View file

@ -27,20 +27,6 @@
allowedUDPPorts = ports;
};
}))
(lib.mkIf config.host.impermanence.enable {
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
directories = [
{
directory = "/var/lib/private/ollama";
user = config.services.ollama.user;
group = config.services.ollama.group;
mode = "0700";
}
];
};
})
]
);
}

View file

@ -0,0 +1,37 @@
{
config,
lib,
...
}: {
options = {
services.ollama.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.ollama.enable && config.storage.impermanence.enable;
};
};
config = lib.mkIf (config.services.ollama.enable) {
storage.datasets.replicate."system/root" = {
directories."/var/lib/private/ollama" = lib.mkIf config.services.ollama.impermanence.enable {
enable = true;
owner.name = config.services.ollama.user;
group.name = config.services.ollama.group;
owner.permissions = {
read = true;
write = true;
execute = false;
};
group.permissions = {
read = false;
write = false;
execute = false;
};
other.permissions = {
read = false;
write = false;
execute = false;
};
};
};
};
}

View file

@ -3,6 +3,6 @@
./actual.nix
./proxy.nix
./fail2ban.nix
./impermanence.nix
./storage.nix
];
}

View file

@ -1,37 +0,0 @@
{
lib,
config,
...
}: let
const = import ./const.nix;
dataDirectory = const.dataDirectory;
in {
options.services.actual = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.actual.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.actual.impermanence.enable {
assertions = [
{
assertion = config.services.actual.settings.dataDir == dataDirectory;
message = "actual data location does not match persistence\nconfig directory: ${config.services.actual.settings.dataDir}\npersistence directory: ${dataDirectory}";
}
{
assertion = config.systemd.services.actual.serviceConfig.DynamicUser or false;
message = "actual systemd service must have DynamicUser enabled to use private directory";
}
];
environment.persistence."/persist/system/root" = {
directories = [
{
directory = dataDirectory;
user = "actual";
group = "actual";
}
];
};
};
}

View file

@ -0,0 +1,22 @@
{
lib,
config,
...
}: let
const = import ./const.nix;
dataDirectory = const.dataDirectory;
in {
options.services.actual.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.actual.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.actual.enable {
storage.datasets.replicate."system/root" = {
directories."${dataDirectory}" = lib.mkIf config.services.actual.impermanence.enable {
owner.name = "actual";
group.name = "actual";
};
};
};
}

View file

@ -1,5 +1,5 @@
{...}: {
imports = [
./impermanence.nix
./storage.nix
];
}

View file

@ -1,33 +0,0 @@
{
lib,
config,
...
}: let
bazarr_data_directory = "/var/lib/bazarr";
in {
options.services.bazarr = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.bazarr.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.bazarr.impermanence.enable {
assertions = [
{
assertion = config.services.bazarr.dataDir == bazarr_data_directory;
message = "bazarr data directory does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
directories = [
{
directory = bazarr_data_directory;
user = "bazarr";
group = "bazarr";
}
];
};
};
}

View file

@ -0,0 +1,21 @@
{
lib,
config,
...
}: let
bazarr_data_directory = "/var/lib/bazarr";
in {
options.services.bazarr.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.bazarr.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.bazarr.enable {
storage.datasets.replicate."system/root" = {
directories."${bazarr_data_directory}" = lib.mkIf config.services.bazarr.impermanence.enable {
owner.name = "bazarr";
group.name = "bazarr";
};
};
};
}

View file

@ -1,6 +1,6 @@
{...}: {
imports = [
./crab-hole.nix
./impermanence.nix
./storage.nix
];
}

View file

@ -1,33 +0,0 @@
{
lib,
config,
...
}: let
workingDirectory = "/var/lib/private/crab-hole";
in {
options.services.crab-hole = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.crab-hole.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.crab-hole.impermanence.enable {
assertions = [
{
assertion =
config.systemd.services.crab-hole.serviceConfig.WorkingDirectory == (builtins.replaceStrings ["/private"] [""] workingDirectory);
message = "crab-hole working directory does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
directories = [
{
directory = workingDirectory;
user = "crab-hole";
group = "crab-hole";
}
];
};
};
}

View file

@ -0,0 +1,21 @@
{
lib,
config,
...
}: let
workingDirectory = "/var/lib/private/crab-hole";
in {
options.services.crab-hole.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.crab-hole.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.crab-hole.enable {
storage.datasets.replicate."system/root" = {
directories."${workingDirectory}" = lib.mkIf config.services.crab-hole.impermanence.enable {
owner.name = "crab-hole";
group.name = "crab-hole";
};
};
};
}

View file

@ -1,6 +1,6 @@
{...}: {
imports = [
./fail2ban.nix
./impermanence.nix
./storage.nix
];
}

View file

@ -1,34 +0,0 @@
{
lib,
config,
...
}: let
dataFolder = "/var/lib/fail2ban";
dataFile = "fail2ban.sqlite3";
in {
options.services.fail2ban = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.fail2ban.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.fail2ban.impermanence.enable {
assertions = [
{
assertion = config.services.fail2ban.daemonSettings.Definition.dbfile == "${dataFolder}/${dataFile}";
message = "fail2ban data file does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
directories = [
{
directory = dataFolder;
user = "fail2ban";
group = "fail2ban";
}
];
};
};
}

View file

@ -0,0 +1,22 @@
{
lib,
config,
...
}: let
dataFolder = "/var/lib/fail2ban";
dataFile = "fail2ban.sqlite3";
in {
options.services.fail2ban.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.fail2ban.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.fail2ban.enable {
storage.datasets.replicate."system/root" = {
directories."${dataFolder}" = lib.mkIf config.services.fail2ban.impermanence.enable {
owner.name = "fail2ban";
group.name = "fail2ban";
};
};
};
}

View file

@ -1,5 +1,5 @@
{...}: {
imports = [
./impermanence.nix
./storage.nix
];
}

View file

@ -1,26 +0,0 @@
{
lib,
config,
...
}: {
options.services.flaresolverr = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.flaresolverr.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.flaresolverr.impermanence.enable {
# FlareSolverr typically doesn't need persistent storage as it's a proxy service
# but we'll add basic structure in case it's needed for logs or configuration
environment.persistence."/persist/system/root" = {
directories = [
{
directory = "/var/lib/flaresolverr";
user = "flaresolverr";
group = "flaresolverr";
}
];
};
};
}

View file

@ -0,0 +1,19 @@
{
lib,
config,
...
}: {
options.services.flaresolverr.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.flaresolverr.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.flaresolverr.enable {
storage.datasets.replicate."system/root" = {
directories."/var/lib/flaresolverr" = lib.mkIf config.services.flaresolverr.impermanence.enable {
owner.name = "flaresolverr";
group.name = "flaresolverr";
};
};
};
}

View file

@ -4,6 +4,6 @@
./proxy.nix
./database.nix
./fail2ban.nix
./impermanence.nix
./storage.nix
];
}

View file

@ -1,35 +0,0 @@
{
lib,
config,
...
}: let
stateDir = "/var/lib/forgejo";
in {
options.services.forgejo = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.forgejo.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.forgejo.impermanence.enable {
assertions = [
{
assertion = config.services.forgejo.stateDir == stateDir;
message = "forgejo state directory does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
directories = [
{
directory = stateDir;
user = "forgejo";
group = "forgejo";
}
];
};
};
}

View file

@ -0,0 +1,21 @@
{
lib,
config,
...
}: let
stateDir = "/var/lib/forgejo";
in {
options.services.forgejo.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.forgejo.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.forgejo.enable {
storage.datasets.replicate."system/root" = {
directories."${stateDir}" = lib.mkIf config.services.forgejo.impermanence.enable {
owner.name = "forgejo";
group.name = "forgejo";
};
};
};
}

View file

@ -4,7 +4,7 @@
./proxy.nix
./database.nix
./fail2ban.nix
./impermanence.nix
./storage.nix
./extensions
];
}

View file

@ -1,26 +0,0 @@
{
lib,
config,
...
}: let
configDir = "/var/lib/hass";
in
lib.mkIf (config.host.impermanence.enable && config.services.home-assistant.enable) {
assertions = [
{
assertion = config.services.home-assistant.configDir == configDir;
message = "home assistant config directory does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
directories = [
{
directory = configDir;
user = "hass";
group = "hass";
}
];
};
}

View file

@ -0,0 +1,21 @@
{
lib,
config,
...
}: let
configDir = "/var/lib/hass";
in {
options.services.home-assistant.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.home-assistant.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.home-assistant.enable {
storage.datasets.replicate."system/root" = {
directories."${configDir}" = lib.mkIf config.services.home-assistant.impermanence.enable {
owner.name = "hass";
group.name = "hass";
};
};
};
}

View file

@ -3,7 +3,7 @@
./proxy.nix
./database.nix
./fail2ban.nix
./impermanence.nix
./storage.nix
];
# NOTE: This shouldn't be needed now that we are out of testing

View file

@ -1,32 +0,0 @@
{
lib,
config,
...
}: let
mediaLocation = "/var/lib/immich";
in {
options.services.immich = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.immich.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.immich.impermanence.enable {
assertions = [
{
assertion = config.services.immich.mediaLocation == mediaLocation;
message = "immich media location does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
directories = [
{
directory = mediaLocation;
user = "immich";
group = "immich";
}
];
};
};
}

View file

@ -0,0 +1,21 @@
{
lib,
config,
...
}: let
mediaLocation = "/var/lib/immich";
in {
options.services.immich.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.immich.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.immich.enable {
storage.datasets.replicate."system/root" = {
directories."${mediaLocation}" = lib.mkIf config.services.immich.impermanence.enable {
owner.name = "immich";
group.name = "immich";
};
};
};
}

View file

@ -1,6 +1,6 @@
{...}: {
imports = [
./impermanence.nix
./storage.nix
];
config = {

View file

@ -1,33 +0,0 @@
{
lib,
config,
...
}: let
jackett_data_directory = "/var/lib/jackett/.config/Jackett";
in {
options.services.jackett = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.jackett.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.jackett.impermanence.enable {
assertions = [
{
assertion = config.services.jackett.dataDir == jackett_data_directory;
message = "jackett data directory does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
directories = [
{
directory = jackett_data_directory;
user = "jackett";
group = "jackett";
}
];
};
};
}

View file

@ -0,0 +1,21 @@
{
lib,
config,
...
}: let
jackett_data_directory = "/var/lib/jackett/.config/Jackett";
in {
options.services.jackett.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.jackett.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.jackett.enable {
storage.datasets.replicate."system/root" = {
directories."${jackett_data_directory}" = lib.mkIf config.services.jackett.impermanence.enable {
owner.name = "jackett";
group.name = "jackett";
};
};
};
}

View file

@ -3,6 +3,6 @@
./jellyfin.nix
./proxy.nix
./fail2ban.nix
./impermanence.nix
./storage.nix
];
}

View file

@ -1,73 +0,0 @@
{
lib,
config,
...
}: let
jellyfin_data_directory = "/var/lib/jellyfin";
jellyfin_cache_directory = "/var/cache/jellyfin";
in {
options.services.jellyfin = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.jellyfin.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.jellyfin.impermanence.enable {
fileSystems."/persist/system/jellyfin".neededForBoot = true;
host.storage.pool.extraDatasets = {
# sops age key needs to be available to pre persist for user generation
"persist/system/jellyfin" = {
type = "zfs_fs";
mountpoint = "/persist/system/jellyfin";
options = {
atime = "off";
relatime = "off";
canmount = "on";
};
};
};
assertions = [
{
assertion = config.services.jellyfin.dataDir == jellyfin_data_directory;
message = "jellyfin data directory does not match persistence";
}
{
assertion = config.services.jellyfin.cacheDir == jellyfin_cache_directory;
message = "jellyfin cache directory does not match persistence";
}
];
environment.persistence = {
"/persist/system/root" = {
directories = [
{
directory = jellyfin_data_directory;
user = "jellyfin";
group = "jellyfin";
}
{
directory = jellyfin_cache_directory;
user = "jellyfin";
group = "jellyfin";
}
];
};
"/persist/system/jellyfin" = {
enable = true;
hideMounts = true;
directories = [
{
directory = config.services.jellyfin.media_directory;
user = "jellyfin";
group = "jellyfin_media";
mode = "1770";
}
];
};
};
};
}

View file

@ -0,0 +1,56 @@
{
lib,
config,
...
}: let
jellyfin_data_directory = "/var/lib/jellyfin";
jellyfin_cache_directory = "/var/cache/jellyfin";
in {
options.services.jellyfin.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.jellyfin.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.jellyfin.enable {
storage.datasets.replicate = {
"system/root" = {
directories = {
"${jellyfin_data_directory}" = lib.mkIf config.services.jellyfin.impermanence.enable {
enable = true;
owner.name = "jellyfin";
group.name = "jellyfin";
};
"${jellyfin_cache_directory}" = lib.mkIf config.services.jellyfin.impermanence.enable {
enable = true;
owner.name = "jellyfin";
group.name = "jellyfin";
};
};
};
"system/media" = {
mount = "/persist/replicate/system/media";
directories."${config.services.jellyfin.media_directory}" = lib.mkIf config.services.jellyfin.impermanence.enable {
enable = true;
owner.name = "jellyfin";
group.name = "jellyfin_media";
owner.permissions = {
read = true;
write = true;
execute = true;
};
group.permissions = {
read = true;
write = true;
execute = true;
};
other.permissions = {
read = false;
write = false;
execute = false;
};
};
};
};
};
}

View file

@ -1,5 +1,5 @@
{...}: {
imports = [
./impermanence.nix
./storage.nix
];
}

View file

@ -1,33 +0,0 @@
{
lib,
config,
...
}: let
lidarr_data_directory = "/var/lib/lidarr/.config/Lidarr";
in {
options.services.lidarr = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.lidarr.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.lidarr.impermanence.enable {
assertions = [
{
assertion = config.services.lidarr.dataDir == lidarr_data_directory;
message = "lidarr data directory does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
directories = [
{
directory = lidarr_data_directory;
user = "lidarr";
group = "lidarr";
}
];
};
};
}

View file

@ -0,0 +1,21 @@
{
lib,
config,
...
}: let
lidarr_data_directory = "/var/lib/lidarr/.config/Lidarr";
in {
options.services.lidarr.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.lidarr.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.lidarr.enable {
storage.datasets.replicate."system/root" = {
directories."${lidarr_data_directory}" = lib.mkIf config.services.lidarr.impermanence.enable {
owner.name = "lidarr";
group.name = "lidarr";
};
};
};
}

View file

@ -74,7 +74,7 @@ in {
);
}
# (lib.mkIf config.host.impermanence.enable {
# environment.persistence."/persist/system/root" = {
# environment.persistence."/persist/replicate/system/root" = {
# enable = true;
# hideMounts = true;
# directories = [

View file

@ -2,7 +2,7 @@
imports = [
./proxy.nix
./fail2ban.nix
./impermanence.nix
./storage.nix
./panoramax.nix
./database.nix
];

View file

@ -1,20 +0,0 @@
{
lib,
config,
...
}: {
options.services.panoramax = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.panoramax.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.panoramax.impermanence.enable {
# TODO: configure impermanence for panoramax data
# This would typically include directories like:
# - /var/lib/panoramax
# - panoramax storage directories
# - any cache or temporary directories that need to persist
};
}

View file

@ -0,0 +1,19 @@
{
lib,
config,
...
}: {
options.services.panoramax.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.panoramax.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.panoramax.enable {
storage.datasets.replicate."system/root" = {
directories."/var/lib/panoramax" = lib.mkIf config.services.panoramax.impermanence.enable {
owner.name = "panoramax";
group.name = "panoramax";
};
};
};
}

View file

@ -4,6 +4,6 @@
./proxy.nix
./database.nix
./fail2ban.nix
./impermanence.nix
./storage.nix
];
}

View file

@ -1,32 +0,0 @@
{
config,
lib,
...
}: let
dataDir = "/var/lib/paperless";
in {
options.services.paperless = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.paperless.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.paperless.impermanence.enable {
assertions = [
{
assertion = config.services.paperless.dataDir == dataDir;
message = "paperless data location does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
directories = [
{
directory = dataDir;
user = "paperless";
group = "paperless";
}
];
};
};
}

View file

@ -0,0 +1,21 @@
{
config,
lib,
...
}: let
dataDir = "/var/lib/paperless";
in {
options.services.paperless.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.paperless.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.paperless.enable {
storage.datasets.replicate."system/root" = {
directories."${dataDir}" = lib.mkIf config.services.paperless.impermanence.enable {
owner.name = "paperless";
group.name = "paperless";
};
};
};
}

View file

@ -1,6 +1,6 @@
{...}: {
imports = [
./postgres.nix
./impermanence.nix
./storage.nix
];
}

View file

@ -1,27 +0,0 @@
{
config,
lib,
...
}: let
dataDir = "/var/lib/postgresql/16";
in {
config = lib.mkIf (config.services.postgresql.enable && config.host.impermanence.enable) {
assertions = [
{
assertion = config.services.postgresql.dataDir == dataDir;
message = "postgres data directory does not match persistence";
}
];
environment.persistence."/persist/system/root" = {
enable = true;
hideMounts = true;
directories = [
{
directory = dataDir;
user = "postgres";
group = "postgres";
}
];
};
};
}

View file

@ -0,0 +1,21 @@
{
config,
lib,
...
}: let
dataDir = "/var/lib/postgresql/16";
in {
options.services.postgresql.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.postgresql.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.postgresql.enable {
storage.datasets.replicate."system/root" = {
directories."${dataDir}" = lib.mkIf config.services.postgresql.impermanence.enable {
owner.name = "postgres";
group.name = "postgres";
};
};
};
}

View file

@ -1,6 +1,6 @@
{...}: {
imports = [
./qbittorent.nix
./impermanence.nix
./storage.nix
];
}

View file

@ -1,61 +0,0 @@
{
lib,
config,
...
}: let
qbittorent_profile_directory = "/var/lib/qBittorrent/";
in {
options.services.qbittorrent = {
impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.qbittorrent.enable && config.host.impermanence.enable;
};
};
config = lib.mkIf config.services.qbittorrent.impermanence.enable {
fileSystems."/persist/system/qbittorrent".neededForBoot = true;
host.storage.pool.extraDatasets = {
# sops age key needs to be available to pre persist for user generation
"persist/system/qbittorrent" = {
type = "zfs_fs";
mountpoint = "/persist/system/qbittorrent";
options = {
canmount = "on";
};
};
};
assertions = [
{
assertion = config.services.qbittorrent.profileDir == qbittorent_profile_directory;
message = "qbittorrent data directory does not match persistence";
}
];
environment.persistence = {
"/persist/system/root" = {
directories = [
{
directory = qbittorent_profile_directory;
user = "qbittorrent";
group = "qbittorrent";
}
];
};
"/persist/system/qbittorrent" = {
enable = true;
hideMounts = true;
directories = [
{
directory = config.services.qbittorrent.mediaDir;
user = "qbittorrent";
group = "qbittorrent";
mode = "1775";
}
];
};
};
};
}

View file

@ -0,0 +1,46 @@
{
lib,
config,
...
}: let
qbittorent_profile_directory = "/var/lib/qBittorrent/";
in {
options.services.qbittorrent.impermanence.enable = lib.mkOption {
type = lib.types.bool;
default = config.services.qbittorrent.enable && config.storage.impermanence.enable;
};
config = lib.mkIf config.services.qbittorrent.enable {
storage.datasets.replicate = {
"system/root" = {
directories."${qbittorent_profile_directory}" = lib.mkIf config.services.qbittorrent.impermanence.enable {
owner.name = "qbittorrent";
group.name = "qbittorrent";
};
};
"system/media" = {
mount = "/persist/replicate/system/media";
directories."${config.services.qbittorrent.mediaDir}" = lib.mkIf config.services.qbittorrent.impermanence.enable {
owner.name = "qbittorrent";
group.name = "qbittorrent";
owner.permissions = {
read = true;
write = true;
execute = true;
};
group.permissions = {
read = true;
write = true;
execute = true;
};
other.permissions = {
read = true;
write = false;
execute = true;
};
};
};
};
};
}

Some files were not shown because too many files have changed in this diff Show more