feat: removed broken disko config
This commit is contained in:
parent
9df29cc07f
commit
39edb65539
1 changed files with 4 additions and 424 deletions
|
|
@ -5,52 +5,6 @@ args @ {
|
|||
...
|
||||
}: let
|
||||
datasetSubmodule = (import ./submodules/dataset.nix) args;
|
||||
|
||||
# max gpt length is 36 and disk adds formats it like disk-xxxx-zfs which means we need to be 9 characters under that
|
||||
hashDisk = drive: (builtins.substring 0 27 (builtins.hashString "sha256" drive));
|
||||
|
||||
poolVdevs =
|
||||
builtins.map (
|
||||
vdev:
|
||||
builtins.map (
|
||||
device: let
|
||||
deviceStr =
|
||||
if builtins.isString device
|
||||
then device
|
||||
else device.device;
|
||||
in
|
||||
lib.attrsets.nameValuePair (hashDisk deviceStr) deviceStr
|
||||
)
|
||||
vdev
|
||||
)
|
||||
config.storage.zfs.pool.vdevs;
|
||||
|
||||
poolCache = builtins.map (
|
||||
name: let
|
||||
device = config.storage.zfs.pool.cache.${name};
|
||||
deviceStr =
|
||||
if builtins.isString device
|
||||
then device
|
||||
else device.device;
|
||||
in
|
||||
lib.attrsets.nameValuePair (hashDisk deviceStr) deviceStr
|
||||
) (builtins.attrNames config.storage.zfs.pool.cache);
|
||||
|
||||
bootDrives =
|
||||
builtins.map (
|
||||
device:
|
||||
if builtins.isString device
|
||||
then device
|
||||
else device.device
|
||||
) (builtins.filter (
|
||||
device:
|
||||
if builtins.isString device
|
||||
then false
|
||||
else device.boot
|
||||
)
|
||||
(lib.lists.flatten config.storage.zfs.pool.vdevs));
|
||||
|
||||
allDrives = (lib.lists.flatten poolVdevs) ++ poolCache;
|
||||
in {
|
||||
options.storage = {
|
||||
zfs = {
|
||||
|
|
@ -142,389 +96,15 @@ in {
|
|||
|
||||
config = lib.mkIf config.storage.zfs.enable (lib.mkMerge [
|
||||
{
|
||||
assertions = [
|
||||
{
|
||||
assertion = builtins.length bootDrives > 0;
|
||||
message = ''
|
||||
ZFS configuration requires at least one boot drive. Please configure at least one device with boot = true in storage.zfs.pool.vdevs.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion =
|
||||
!(
|
||||
config.storage.zfs.pool.encryption.enable
|
||||
&& (config.storage.zfs.rootDataset.encryption
|
||||
!= null
|
||||
|| config.storage.zfs.rootDataset.keyformat != null
|
||||
|| config.storage.zfs.rootDataset.keylocation != null)
|
||||
);
|
||||
message = ''
|
||||
Cannot set encryption options in both pool.encryption and rootDataset.
|
||||
Use either pool.encryption for default settings or rootDataset encryption options for explicit control, but not both.
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
services.zfs = {
|
||||
autoScrub.enable = true;
|
||||
autoSnapshot.enable = true;
|
||||
};
|
||||
|
||||
# Disko configuration based on pool settings
|
||||
# disko.devices = {
|
||||
# disk = (
|
||||
# builtins.listToAttrs (
|
||||
# builtins.map
|
||||
# (drive:
|
||||
# lib.attrsets.nameValuePair (drive.name) {
|
||||
# type = "disk";
|
||||
# device = "/dev/disk/by-id/${drive.value}";
|
||||
# content = {
|
||||
# type = "gpt";
|
||||
# partitions = {
|
||||
# ESP = lib.mkIf (builtins.elem drive.value bootDrives) {
|
||||
# size = config.storage.zfs.pool.bootPartitionSize;
|
||||
# type = "EF00";
|
||||
# content = {
|
||||
# type = "filesystem";
|
||||
# format = "vfat";
|
||||
# mountpoint = "/boot";
|
||||
# mountOptions = ["umask=0077"];
|
||||
# };
|
||||
# };
|
||||
# zfs = {
|
||||
# size = "100%";
|
||||
# content = {
|
||||
# type = "zfs";
|
||||
# pool = "rpool";
|
||||
# };
|
||||
# };
|
||||
# };
|
||||
# };
|
||||
# })
|
||||
# allDrives
|
||||
# )
|
||||
# );
|
||||
# zpool = {
|
||||
# rpool = {
|
||||
# type = "zpool";
|
||||
# mode = {
|
||||
# topology = {
|
||||
# type = "topology";
|
||||
# vdev = (
|
||||
# builtins.map (disks: {
|
||||
# mode = config.storage.zfs.pool.mode;
|
||||
# members =
|
||||
# builtins.map (disk: disk.name) disks;
|
||||
# })
|
||||
# poolVdevs
|
||||
# );
|
||||
# cache = builtins.map (disk: disk.name) poolCache;
|
||||
# };
|
||||
# };
|
||||
|
||||
# options = {
|
||||
# ashift = "12";
|
||||
# autotrim = "on";
|
||||
# };
|
||||
|
||||
# rootFsOptions = let
|
||||
# rootDataset = config.storage.zfs.rootDataset;
|
||||
# # Start with defaults that match the original hardcoded values
|
||||
# defaults = {
|
||||
# canmount = "off";
|
||||
# mountpoint = "none";
|
||||
# xattr = "sa";
|
||||
# acltype = "posixacl";
|
||||
# relatime = "on";
|
||||
# compression = "lz4";
|
||||
# "com.sun:auto-snapshot" = "false";
|
||||
# };
|
||||
# # Override defaults with non-null values from rootDataset
|
||||
# userOptions = lib.attrsets.filterAttrs (_: v: v != null) {
|
||||
# canmount = rootDataset.canmount;
|
||||
# mountpoint = rootDataset.mountpoint;
|
||||
# xattr = rootDataset.xattr;
|
||||
# acltype = rootDataset.acltype;
|
||||
# relatime = rootDataset.relatime;
|
||||
# compression = rootDataset.compression;
|
||||
# encryption = rootDataset.encryption;
|
||||
# keyformat = rootDataset.keyformat;
|
||||
# keylocation = rootDataset.keylocation;
|
||||
# recordsize = rootDataset.recordsize;
|
||||
# sync = rootDataset.sync;
|
||||
# atime = rootDataset.atime;
|
||||
# "com.sun:auto-snapshot" =
|
||||
# if rootDataset.autoSnapshot == null
|
||||
# then null
|
||||
# else
|
||||
# (
|
||||
# if rootDataset.autoSnapshot
|
||||
# then "true"
|
||||
# else "false"
|
||||
# );
|
||||
# };
|
||||
# # Only apply pool encryption if user hasn't set encryption options in rootDataset
|
||||
# poolEncryptionOptions =
|
||||
# lib.attrsets.optionalAttrs (
|
||||
# config.storage.zfs.pool.encryption.enable
|
||||
# && rootDataset.encryption == null
|
||||
# && rootDataset.keyformat == null
|
||||
# && rootDataset.keylocation == null
|
||||
# ) {
|
||||
# encryption = "on";
|
||||
# keyformat = config.storage.zfs.pool.encryption.keyformat;
|
||||
# keylocation = config.storage.zfs.pool.encryption.keylocation;
|
||||
# };
|
||||
# in
|
||||
# defaults // userOptions // rootDataset.options // poolEncryptionOptions;
|
||||
|
||||
# datasets = lib.mkMerge [
|
||||
# (
|
||||
# lib.attrsets.mapAttrs (name: value: {
|
||||
# type = value.type;
|
||||
# options = let
|
||||
# # For datasets, only include non-null user-specified values
|
||||
# userOptions = lib.attrsets.filterAttrs (_: v: v != null) {
|
||||
# canmount = value.canmount;
|
||||
# xattr = value.xattr;
|
||||
# acltype = value.acltype;
|
||||
# relatime = value.relatime;
|
||||
# compression = value.compression;
|
||||
# encryption = value.encryption;
|
||||
# keyformat = value.keyformat;
|
||||
# keylocation = value.keylocation;
|
||||
# recordsize = value.recordsize;
|
||||
# sync = value.sync;
|
||||
# atime = value.atime;
|
||||
# "com.sun:auto-snapshot" =
|
||||
# if value.autoSnapshot == null
|
||||
# then null
|
||||
# else
|
||||
# (
|
||||
# if value.autoSnapshot
|
||||
# then "true"
|
||||
# else "false"
|
||||
# );
|
||||
# };
|
||||
# in
|
||||
# userOptions // (value.options or {});
|
||||
# mountpoint = value.mountpoint;
|
||||
# postCreateHook = value.postCreateHook or "";
|
||||
# })
|
||||
# config.storage.zfs.datasets
|
||||
# )
|
||||
# ];
|
||||
# };
|
||||
# };
|
||||
# };
|
||||
|
||||
# Post-activation scripts for validation
|
||||
system.activationScripts = {
|
||||
# Script 1: Validate pool, cache devices, and vdevs
|
||||
zfs-pool-validation = {
|
||||
text = ''
|
||||
echo "Running ZFS pool validation..."
|
||||
|
||||
# Function to check if a device exists in a vdev or cache
|
||||
check_device_in_pool() {
|
||||
local device_id="$1"
|
||||
local device_type="$2" # "cache" or "vdev"
|
||||
|
||||
if ! zpool status rpool | grep -q "$device_id"; then
|
||||
echo "ERROR: Device $device_id not found in pool rpool ($device_type)"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Function to validate vdev configuration
|
||||
validate_vdevs() {
|
||||
local expected_mode="${config.storage.zfs.pool.mode}"
|
||||
local pool_status=$(zpool status rpool)
|
||||
|
||||
# Check if pool exists
|
||||
if ! zpool list rpool >/dev/null 2>&1; then
|
||||
echo "ERROR: ZFS pool 'rpool' does not exist"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate each configured vdev device
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
device: let
|
||||
deviceStr =
|
||||
if builtins.isString device
|
||||
then device
|
||||
else device.device;
|
||||
in ''
|
||||
if ! check_device_in_pool "${deviceStr}" "vdev"; then
|
||||
echo "ERROR: Vdev device ${deviceStr} not found in pool"
|
||||
exit 1
|
||||
fi
|
||||
''
|
||||
)
|
||||
(lib.lists.flatten config.storage.zfs.pool.vdevs)}
|
||||
|
||||
# Check pool mode matches configuration
|
||||
if ! echo "$pool_status" | grep -q "$expected_mode"; then
|
||||
echo "WARNING: Pool mode may not match expected configuration ($expected_mode)"
|
||||
fi
|
||||
|
||||
echo "✓ All vdev devices validated successfully"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Function to validate cache configuration
|
||||
validate_cache() {
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
name: let
|
||||
device = config.storage.zfs.pool.cache.${name};
|
||||
deviceStr =
|
||||
if builtins.isString device
|
||||
then device
|
||||
else device.device;
|
||||
in ''
|
||||
if ! check_device_in_pool "${deviceStr}" "cache"; then
|
||||
echo "ERROR: Cache device ${deviceStr} (${name}) not found in pool"
|
||||
exit 1
|
||||
fi
|
||||
''
|
||||
) (builtins.attrNames config.storage.zfs.pool.cache)}
|
||||
|
||||
echo "✓ All cache devices validated successfully"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Run validations
|
||||
if validate_vdevs && validate_cache; then
|
||||
echo "✓ ZFS pool validation completed successfully"
|
||||
else
|
||||
echo "✗ ZFS pool validation failed"
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
deps = ["zfs"];
|
||||
};
|
||||
|
||||
# Script 2: Validate datasets and their options
|
||||
zfs-dataset-validation = {
|
||||
text = ''
|
||||
echo "Running ZFS dataset validation..."
|
||||
|
||||
# Function to check if dataset exists
|
||||
check_dataset_exists() {
|
||||
local dataset="$1"
|
||||
if ! zfs list "$dataset" >/dev/null 2>&1; then
|
||||
echo "ERROR: Dataset $dataset does not exist"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Function to validate dataset options
|
||||
validate_dataset_options() {
|
||||
local dataset="$1"
|
||||
local expected_options="$2"
|
||||
|
||||
# Parse expected options (format: "option=value option2=value2")
|
||||
echo "$expected_options" | tr ' ' '\n' | while IFS='=' read -r option expected_value; do
|
||||
if [ -n "$option" ] && [ -n "$expected_value" ]; then
|
||||
local actual_value=$(zfs get -H -o value "$option" "$dataset" 2>/dev/null)
|
||||
if [ "$actual_value" != "$expected_value" ]; then
|
||||
echo "ERROR: Dataset $dataset option $option is '$actual_value', expected '$expected_value'"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate root dataset
|
||||
echo "Validating root dataset..."
|
||||
if check_dataset_exists "rpool"; then
|
||||
root_options=""
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
option: let
|
||||
value = config.storage.zfs.rootDataset.${option};
|
||||
in
|
||||
lib.optionalString (value != null) ''
|
||||
root_options="$root_options ${option}=${toString value}"
|
||||
''
|
||||
) ["canmount" "xattr" "acltype" "relatime" "compression" "encryption" "keyformat" "keylocation" "recordsize" "sync" "atime"]}
|
||||
|
||||
# Add autoSnapshot option
|
||||
${lib.optionalString (config.storage.zfs.rootDataset.autoSnapshot != null) ''
|
||||
root_options="$root_options com.sun:auto-snapshot=${
|
||||
if config.storage.zfs.rootDataset.autoSnapshot
|
||||
then "true"
|
||||
else "false"
|
||||
}"
|
||||
''}
|
||||
|
||||
if validate_dataset_options "rpool" "$root_options"; then
|
||||
echo "✓ Root dataset options validated"
|
||||
else
|
||||
echo "✗ Root dataset validation failed"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "✗ Root dataset validation failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate configured datasets
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
name: let
|
||||
dataset = config.storage.zfs.datasets.${name};
|
||||
in ''
|
||||
echo "Validating dataset: rpool/${name}"
|
||||
if check_dataset_exists "rpool/${name}"; then
|
||||
dataset_options=""
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
option: let
|
||||
value = dataset.${option};
|
||||
in
|
||||
lib.optionalString (value != null) ''
|
||||
dataset_options="$dataset_options ${option}=${toString value}"
|
||||
''
|
||||
) ["canmount" "xattr" "acltype" "relatime" "compression" "encryption" "keyformat" "keylocation" "recordsize" "sync" "atime"]}
|
||||
|
||||
# Add autoSnapshot option
|
||||
${lib.optionalString (dataset.autoSnapshot != null) ''
|
||||
dataset_options="$dataset_options com.sun:auto-snapshot=${
|
||||
if dataset.autoSnapshot
|
||||
then "true"
|
||||
else "false"
|
||||
}"
|
||||
''}
|
||||
|
||||
# Add custom options
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
optName: let
|
||||
optValue = dataset.options.${optName};
|
||||
in ''
|
||||
dataset_options="$dataset_options ${optName}=${toString optValue}"
|
||||
''
|
||||
) (builtins.attrNames (dataset.options or {}))}
|
||||
|
||||
if validate_dataset_options "rpool/${name}" "$dataset_options"; then
|
||||
echo "✓ Dataset rpool/${name} options validated"
|
||||
else
|
||||
echo "✗ Dataset rpool/${name} validation failed"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "✗ Dataset rpool/${name} validation failed"
|
||||
exit 1
|
||||
fi
|
||||
''
|
||||
) (builtins.attrNames config.storage.zfs.datasets)}
|
||||
|
||||
echo "✓ ZFS dataset validation completed successfully"
|
||||
'';
|
||||
deps = ["zfs" "zfs-pool-validation"];
|
||||
};
|
||||
};
|
||||
# TODO: configure disko
|
||||
# TODO: assertion that we have a boot device
|
||||
# TODO: check that disks on system match configuration and warn user if they don't
|
||||
# TODO: check that datasets on system match configuration and warn user if they don't
|
||||
}
|
||||
(lib.mkIf config.storage.zfs.notifications.enable {
|
||||
programs.msmtp = {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue