treewide: Format all Nix files
Format all Nix files using the officially approved formatter,
making the CI check introduced in the previous commit succeed:
nix-build ci -A fmt.check
This is the next step of the of the [implementation](https://github.com/NixOS/nixfmt/issues/153)
of the accepted [RFC 166](https://github.com/NixOS/rfcs/pull/166).
This commit will lead to merge conflicts for a number of PRs,
up to an estimated ~1100 (~33%) among the PRs with activity in the past 2
months, but that should be lower than what it would be without the previous
[partial treewide format](https://github.com/NixOS/nixpkgs/pull/322537).
Merge conflicts caused by this commit can now automatically be resolved while rebasing using the
[auto-rebase script](8616af08d9/maintainers/scripts/auto-rebase).
If you run into any problems regarding any of this, please reach out to the
[formatting team](https://nixos.org/community/teams/formatting/) by
pinging @NixOS/nix-formatting.
This commit is contained in:
@@ -1,8 +1,16 @@
|
||||
{ config, lib, pkgs, utils, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
utils,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
bootFs = lib.filterAttrs (n: fs: (fs.fsType == "bcachefs") && (utils.fsNeededForBoot fs)) config.fileSystems;
|
||||
bootFs = lib.filterAttrs (
|
||||
n: fs: (fs.fsType == "bcachefs") && (utils.fsNeededForBoot fs)
|
||||
) config.fileSystems;
|
||||
|
||||
commonFunctions = ''
|
||||
prompt() {
|
||||
@@ -57,73 +65,93 @@ let
|
||||
# bcachefs does not support mounting devices with colons in the path, ergo we don't (see #49671)
|
||||
firstDevice = fs: lib.head (lib.splitString ":" fs.device);
|
||||
|
||||
useClevis = fs: config.boot.initrd.clevis.enable && (lib.hasAttr (firstDevice fs) config.boot.initrd.clevis.devices);
|
||||
useClevis =
|
||||
fs:
|
||||
config.boot.initrd.clevis.enable
|
||||
&& (lib.hasAttr (firstDevice fs) config.boot.initrd.clevis.devices);
|
||||
|
||||
openCommand = name: fs: if useClevis fs then ''
|
||||
if clevis decrypt < /etc/clevis/${firstDevice fs}.jwe | bcachefs unlock ${firstDevice fs}
|
||||
then
|
||||
printf "unlocked ${name} using clevis\n"
|
||||
else
|
||||
printf "falling back to interactive unlocking...\n"
|
||||
tryUnlock ${name} ${firstDevice fs}
|
||||
fi
|
||||
'' else ''
|
||||
tryUnlock ${name} ${firstDevice fs}
|
||||
'';
|
||||
|
||||
mkUnits = prefix: name: fs: let
|
||||
mountUnit = "${utils.escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint))}.mount";
|
||||
device = firstDevice fs;
|
||||
deviceUnit = "${utils.escapeSystemdPath device}.device";
|
||||
in {
|
||||
name = "unlock-bcachefs-${utils.escapeSystemdPath fs.mountPoint}";
|
||||
value = {
|
||||
description = "Unlock bcachefs for ${fs.mountPoint}";
|
||||
requiredBy = [ mountUnit ];
|
||||
after = [ deviceUnit ];
|
||||
before = [ mountUnit "shutdown.target" ];
|
||||
bindsTo = [ deviceUnit ];
|
||||
conflicts = [ "shutdown.target" ];
|
||||
unitConfig.DefaultDependencies = false;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecCondition = "${pkgs.bcachefs-tools}/bin/bcachefs unlock -c \"${device}\"";
|
||||
Restart = "on-failure";
|
||||
RestartMode = "direct";
|
||||
# Ideally, this service would lock the key on stop.
|
||||
# As is, RemainAfterExit doesn't accomplish anything.
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
script = let
|
||||
unlock = ''${pkgs.bcachefs-tools}/bin/bcachefs unlock "${device}"'';
|
||||
unlockInteractively = ''${config.boot.initrd.systemd.package}/bin/systemd-ask-password --timeout=0 "enter passphrase for ${name}" | exec ${unlock}'';
|
||||
in if useClevis fs then ''
|
||||
if ${config.boot.initrd.clevis.package}/bin/clevis decrypt < "/etc/clevis/${device}.jwe" | ${unlock}
|
||||
openCommand =
|
||||
name: fs:
|
||||
if useClevis fs then
|
||||
''
|
||||
if clevis decrypt < /etc/clevis/${firstDevice fs}.jwe | bcachefs unlock ${firstDevice fs}
|
||||
then
|
||||
printf "unlocked ${name} using clevis\n"
|
||||
else
|
||||
printf "falling back to interactive unlocking...\n"
|
||||
${unlockInteractively}
|
||||
tryUnlock ${name} ${firstDevice fs}
|
||||
fi
|
||||
'' else ''
|
||||
${unlockInteractively}
|
||||
''
|
||||
else
|
||||
''
|
||||
tryUnlock ${name} ${firstDevice fs}
|
||||
'';
|
||||
|
||||
mkUnits =
|
||||
prefix: name: fs:
|
||||
let
|
||||
mountUnit = "${utils.escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint))}.mount";
|
||||
device = firstDevice fs;
|
||||
deviceUnit = "${utils.escapeSystemdPath device}.device";
|
||||
in
|
||||
{
|
||||
name = "unlock-bcachefs-${utils.escapeSystemdPath fs.mountPoint}";
|
||||
value = {
|
||||
description = "Unlock bcachefs for ${fs.mountPoint}";
|
||||
requiredBy = [ mountUnit ];
|
||||
after = [ deviceUnit ];
|
||||
before = [
|
||||
mountUnit
|
||||
"shutdown.target"
|
||||
];
|
||||
bindsTo = [ deviceUnit ];
|
||||
conflicts = [ "shutdown.target" ];
|
||||
unitConfig.DefaultDependencies = false;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecCondition = "${pkgs.bcachefs-tools}/bin/bcachefs unlock -c \"${device}\"";
|
||||
Restart = "on-failure";
|
||||
RestartMode = "direct";
|
||||
# Ideally, this service would lock the key on stop.
|
||||
# As is, RemainAfterExit doesn't accomplish anything.
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
script =
|
||||
let
|
||||
unlock = ''${pkgs.bcachefs-tools}/bin/bcachefs unlock "${device}"'';
|
||||
unlockInteractively = ''${config.boot.initrd.systemd.package}/bin/systemd-ask-password --timeout=0 "enter passphrase for ${name}" | exec ${unlock}'';
|
||||
in
|
||||
if useClevis fs then
|
||||
''
|
||||
if ${config.boot.initrd.clevis.package}/bin/clevis decrypt < "/etc/clevis/${device}.jwe" | ${unlock}
|
||||
then
|
||||
printf "unlocked ${name} using clevis\n"
|
||||
else
|
||||
printf "falling back to interactive unlocking...\n"
|
||||
${unlockInteractively}
|
||||
fi
|
||||
''
|
||||
else
|
||||
''
|
||||
${unlockInteractively}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = let
|
||||
kernel = config.boot.kernelPackages.kernel;
|
||||
in (
|
||||
kernel.kernelAtLeast "6.7" || (
|
||||
lib.elem (kernel.structuredExtraConfig.BCACHEFS_FS or null) [
|
||||
assertion =
|
||||
let
|
||||
kernel = config.boot.kernelPackages.kernel;
|
||||
in
|
||||
(
|
||||
kernel.kernelAtLeast "6.7"
|
||||
|| (lib.elem (kernel.structuredExtraConfig.BCACHEFS_FS or null) [
|
||||
lib.kernel.module
|
||||
lib.kernel.yes
|
||||
(lib.kernel.option lib.kernel.yes)
|
||||
]
|
||||
)
|
||||
);
|
||||
])
|
||||
);
|
||||
|
||||
message = "Linux 6.7-rc1 at minimum or a custom linux kernel with bcachefs support is required";
|
||||
}
|
||||
@@ -131,41 +159,52 @@ let
|
||||
in
|
||||
|
||||
{
|
||||
config = lib.mkIf (config.boot.supportedFilesystems.bcachefs or false) (lib.mkMerge [
|
||||
{
|
||||
inherit assertions;
|
||||
# needed for systemd-remount-fs
|
||||
system.fsPackages = [ pkgs.bcachefs-tools ];
|
||||
# FIXME: Remove this line when the LTS (default) kernel is at least version 6.7
|
||||
boot.kernelPackages = lib.mkDefault pkgs.linuxPackages_latest;
|
||||
services.udev.packages = [ pkgs.bcachefs-tools ];
|
||||
config = lib.mkIf (config.boot.supportedFilesystems.bcachefs or false) (
|
||||
lib.mkMerge [
|
||||
{
|
||||
inherit assertions;
|
||||
# needed for systemd-remount-fs
|
||||
system.fsPackages = [ pkgs.bcachefs-tools ];
|
||||
# FIXME: Remove this line when the LTS (default) kernel is at least version 6.7
|
||||
boot.kernelPackages = lib.mkDefault pkgs.linuxPackages_latest;
|
||||
services.udev.packages = [ pkgs.bcachefs-tools ];
|
||||
|
||||
systemd = {
|
||||
packages = [ pkgs.bcachefs-tools ];
|
||||
services = lib.mapAttrs' (mkUnits "") (lib.filterAttrs (n: fs: (fs.fsType == "bcachefs") && (!utils.fsNeededForBoot fs)) config.fileSystems);
|
||||
};
|
||||
}
|
||||
systemd = {
|
||||
packages = [ pkgs.bcachefs-tools ];
|
||||
services = lib.mapAttrs' (mkUnits "") (
|
||||
lib.filterAttrs (n: fs: (fs.fsType == "bcachefs") && (!utils.fsNeededForBoot fs)) config.fileSystems
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
(lib.mkIf ((config.boot.initrd.supportedFilesystems.bcachefs or false) || (bootFs != {})) {
|
||||
inherit assertions;
|
||||
# chacha20 and poly1305 are required only for decryption attempts
|
||||
boot.initrd.availableKernelModules = [ "bcachefs" "sha256" "chacha20" "poly1305" ];
|
||||
boot.initrd.systemd.extraBin = {
|
||||
# do we need this? boot/systemd.nix:566 & boot/systemd/initrd.nix:357
|
||||
"bcachefs" = "${pkgs.bcachefs-tools}/bin/bcachefs";
|
||||
"mount.bcachefs" = "${pkgs.bcachefs-tools}/bin/mount.bcachefs";
|
||||
};
|
||||
boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
|
||||
copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/bcachefs
|
||||
copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/mount.bcachefs
|
||||
'';
|
||||
boot.initrd.extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
|
||||
$out/bin/bcachefs version
|
||||
'';
|
||||
(lib.mkIf ((config.boot.initrd.supportedFilesystems.bcachefs or false) || (bootFs != { })) {
|
||||
inherit assertions;
|
||||
# chacha20 and poly1305 are required only for decryption attempts
|
||||
boot.initrd.availableKernelModules = [
|
||||
"bcachefs"
|
||||
"sha256"
|
||||
"chacha20"
|
||||
"poly1305"
|
||||
];
|
||||
boot.initrd.systemd.extraBin = {
|
||||
# do we need this? boot/systemd.nix:566 & boot/systemd/initrd.nix:357
|
||||
"bcachefs" = "${pkgs.bcachefs-tools}/bin/bcachefs";
|
||||
"mount.bcachefs" = "${pkgs.bcachefs-tools}/bin/mount.bcachefs";
|
||||
};
|
||||
boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
|
||||
copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/bcachefs
|
||||
copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/mount.bcachefs
|
||||
'';
|
||||
boot.initrd.extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
|
||||
$out/bin/bcachefs version
|
||||
'';
|
||||
|
||||
boot.initrd.postDeviceCommands = lib.mkIf (!config.boot.initrd.systemd.enable) (commonFunctions + lib.concatStrings (lib.mapAttrsToList openCommand bootFs));
|
||||
boot.initrd.postDeviceCommands = lib.mkIf (!config.boot.initrd.systemd.enable) (
|
||||
commonFunctions + lib.concatStrings (lib.mapAttrsToList openCommand bootFs)
|
||||
);
|
||||
|
||||
boot.initrd.systemd.services = lib.mapAttrs' (mkUnits "/sysroot") bootFs;
|
||||
})
|
||||
]);
|
||||
boot.initrd.systemd.services = lib.mapAttrs' (mkUnits "/sysroot") bootFs;
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
{ config, lib, options, pkgs, utils, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
options,
|
||||
pkgs,
|
||||
utils,
|
||||
...
|
||||
}:
|
||||
#
|
||||
# TODO: zfs tunables
|
||||
|
||||
@@ -13,7 +20,17 @@ let
|
||||
cfgZED = config.services.zfs.zed;
|
||||
|
||||
selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
|
||||
clevisDatasets = lib.attrNames (lib.filterAttrs (device: _: lib.any (e: e.fsType == "zfs" && (utils.fsNeededForBoot e) && (e.device == device || lib.hasPrefix "${device}/" e.device)) config.system.build.fileSystems) config.boot.initrd.clevis.devices);
|
||||
clevisDatasets = lib.attrNames (
|
||||
lib.filterAttrs (
|
||||
device: _:
|
||||
lib.any (
|
||||
e:
|
||||
e.fsType == "zfs"
|
||||
&& (utils.fsNeededForBoot e)
|
||||
&& (e.device == device || lib.hasPrefix "${device}/" e.device)
|
||||
) config.system.build.fileSystems
|
||||
) config.boot.initrd.clevis.devices
|
||||
);
|
||||
|
||||
inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
|
||||
inSystem = config.boot.supportedFilesystems.zfs or false;
|
||||
@@ -36,7 +53,13 @@ let
|
||||
|
||||
dataPools = lib.unique (lib.filter (pool: !(lib.elem pool rootPools)) allPools);
|
||||
|
||||
snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
|
||||
snapshotNames = [
|
||||
"frequent"
|
||||
"hourly"
|
||||
"daily"
|
||||
"weekly"
|
||||
"monthly"
|
||||
];
|
||||
|
||||
# When importing ZFS pools, there's one difficulty: These scripts may run
|
||||
# before the backing devices (physical HDDs, etc.) of the pool have been
|
||||
@@ -56,42 +79,51 @@ let
|
||||
# sufficient amount of time has passed that we can assume it won't be. In the
|
||||
# latter case it makes one last attempt at importing, allowing the system to
|
||||
# (eventually) boot even with a degraded pool.
|
||||
importLib = {zpoolCmd, awkCmd, pool}: let
|
||||
devNodes = if pool != null && cfgZfs.pools ? ${pool} then cfgZfs.pools.${pool}.devNodes else cfgZfs.devNodes;
|
||||
in ''
|
||||
# shellcheck disable=SC2013
|
||||
for o in $(cat /proc/cmdline); do
|
||||
case $o in
|
||||
zfs_force|zfs_force=1|zfs_force=y)
|
||||
ZFS_FORCE="-f"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
poolReady() {
|
||||
pool="$1"
|
||||
state="$("${zpoolCmd}" import -d "${devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
|
||||
if [[ "$state" = "ONLINE" ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "Pool $pool in state $state, waiting"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
poolImported() {
|
||||
pool="$1"
|
||||
"${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
|
||||
}
|
||||
poolImport() {
|
||||
pool="$1"
|
||||
# shellcheck disable=SC2086
|
||||
"${zpoolCmd}" import -d "${devNodes}" -N $ZFS_FORCE "$pool"
|
||||
}
|
||||
'';
|
||||
importLib =
|
||||
{
|
||||
zpoolCmd,
|
||||
awkCmd,
|
||||
pool,
|
||||
}:
|
||||
let
|
||||
devNodes =
|
||||
if pool != null && cfgZfs.pools ? ${pool} then cfgZfs.pools.${pool}.devNodes else cfgZfs.devNodes;
|
||||
in
|
||||
''
|
||||
# shellcheck disable=SC2013
|
||||
for o in $(cat /proc/cmdline); do
|
||||
case $o in
|
||||
zfs_force|zfs_force=1|zfs_force=y)
|
||||
ZFS_FORCE="-f"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
poolReady() {
|
||||
pool="$1"
|
||||
state="$("${zpoolCmd}" import -d "${devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
|
||||
if [[ "$state" = "ONLINE" ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "Pool $pool in state $state, waiting"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
poolImported() {
|
||||
pool="$1"
|
||||
"${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
|
||||
}
|
||||
poolImport() {
|
||||
pool="$1"
|
||||
# shellcheck disable=SC2086
|
||||
"${zpoolCmd}" import -d "${devNodes}" -N $ZFS_FORCE "$pool"
|
||||
}
|
||||
'';
|
||||
|
||||
getPoolFilesystems = pool:
|
||||
lib.filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
|
||||
getPoolFilesystems =
|
||||
pool: lib.filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
|
||||
|
||||
getPoolMounts = prefix: pool:
|
||||
getPoolMounts =
|
||||
prefix: pool:
|
||||
let
|
||||
poolFSes = getPoolFilesystems pool;
|
||||
|
||||
@@ -102,37 +134,55 @@ let
|
||||
|
||||
hasUsr = lib.any (fs: fs.mountPoint == "/usr") poolFSes;
|
||||
in
|
||||
map (x: "${mountPoint x}.mount") poolFSes
|
||||
++ lib.optional hasUsr "sysusr-usr.mount";
|
||||
map (x: "${mountPoint x}.mount") poolFSes ++ lib.optional hasUsr "sysusr-usr.mount";
|
||||
|
||||
getKeyLocations = pool: if lib.isBool cfgZfs.requestEncryptionCredentials then {
|
||||
hasKeys = cfgZfs.requestEncryptionCredentials;
|
||||
command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus -t volume,filesystem ${pool}";
|
||||
} else let
|
||||
keys = lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
|
||||
in {
|
||||
hasKeys = keys != [];
|
||||
command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus -t volume,filesystem ${toString keys}";
|
||||
};
|
||||
getKeyLocations =
|
||||
pool:
|
||||
if lib.isBool cfgZfs.requestEncryptionCredentials then
|
||||
{
|
||||
hasKeys = cfgZfs.requestEncryptionCredentials;
|
||||
command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus -t volume,filesystem ${pool}";
|
||||
}
|
||||
else
|
||||
let
|
||||
keys = lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
|
||||
in
|
||||
{
|
||||
hasKeys = keys != [ ];
|
||||
command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus -t volume,filesystem ${toString keys}";
|
||||
};
|
||||
|
||||
createImportService = { pool, systemd, force, prefix ? "" }:
|
||||
createImportService =
|
||||
{
|
||||
pool,
|
||||
systemd,
|
||||
force,
|
||||
prefix ? "",
|
||||
}:
|
||||
lib.nameValuePair "zfs-import-${pool}" {
|
||||
description = "Import ZFS pool \"${pool}\"";
|
||||
# We wait for systemd-udev-settle to ensure devices are available,
|
||||
# but don't *require* it, because mounts shouldn't be killed if it's stopped.
|
||||
# In the future, hopefully someone will complete this:
|
||||
# https://github.com/zfsonlinux/zfs/pull/4943
|
||||
wants = [ "systemd-udev-settle.service" ] ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
|
||||
wants = [
|
||||
"systemd-udev-settle.service"
|
||||
] ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
|
||||
after = [
|
||||
"systemd-udev-settle.service"
|
||||
"systemd-modules-load.service"
|
||||
"systemd-ask-password-console.service"
|
||||
] ++ lib.optional (config.boot.initrd.clevis.useTang) "network-online.target";
|
||||
requiredBy = let
|
||||
poolFilesystems = getPoolFilesystems pool;
|
||||
noauto = poolFilesystems != [ ] && lib.all (fs: lib.elem "noauto" fs.options) poolFilesystems;
|
||||
in getPoolMounts prefix pool ++ lib.optional (!noauto) "zfs-import.target";
|
||||
before = getPoolMounts prefix pool ++ [ "shutdown.target" "zfs-import.target" ];
|
||||
requiredBy =
|
||||
let
|
||||
poolFilesystems = getPoolFilesystems pool;
|
||||
noauto = poolFilesystems != [ ] && lib.all (fs: lib.elem "noauto" fs.options) poolFilesystems;
|
||||
in
|
||||
getPoolMounts prefix pool ++ lib.optional (!noauto) "zfs-import.target";
|
||||
before = getPoolMounts prefix pool ++ [
|
||||
"shutdown.target"
|
||||
"zfs-import.target"
|
||||
];
|
||||
conflicts = [ "shutdown.target" ];
|
||||
unitConfig = {
|
||||
DefaultDependencies = "no";
|
||||
@@ -142,69 +192,83 @@ let
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
environment.ZFS_FORCE = lib.optionalString force "-f";
|
||||
script = let
|
||||
keyLocations = getKeyLocations pool;
|
||||
in (importLib {
|
||||
# See comments at importLib definition.
|
||||
zpoolCmd = "${cfgZfs.package}/sbin/zpool";
|
||||
awkCmd = "${pkgs.gawk}/bin/awk";
|
||||
inherit pool;
|
||||
}) + ''
|
||||
if ! poolImported "${pool}"; then
|
||||
echo -n "importing ZFS pool \"${pool}\"..."
|
||||
# Loop across the import until it succeeds, because the devices needed may not be discovered yet.
|
||||
for _ in $(seq 1 60); do
|
||||
poolReady "${pool}" && poolImport "${pool}" && break
|
||||
sleep 1
|
||||
done
|
||||
poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
|
||||
fi
|
||||
if poolImported "${pool}"; then
|
||||
${lib.optionalString config.boot.initrd.clevis.enable (lib.concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true ") (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets))}
|
||||
|
||||
|
||||
${lib.optionalString keyLocations.hasKeys ''
|
||||
${keyLocations.command} | while IFS=$'\t' read -r ds kl ks; do
|
||||
{
|
||||
if [[ "$ks" != unavailable ]]; then
|
||||
continue
|
||||
fi
|
||||
case "$kl" in
|
||||
none )
|
||||
;;
|
||||
prompt )
|
||||
tries=3
|
||||
success=false
|
||||
while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
|
||||
${systemd}/bin/systemd-ask-password --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
|
||||
&& success=true \
|
||||
|| tries=$((tries - 1))
|
||||
done
|
||||
[[ $success = true ]]
|
||||
;;
|
||||
* )
|
||||
${cfgZfs.package}/sbin/zfs load-key "$ds"
|
||||
;;
|
||||
esac
|
||||
} < /dev/null # To protect while read ds kl in case anything reads stdin
|
||||
script =
|
||||
let
|
||||
keyLocations = getKeyLocations pool;
|
||||
in
|
||||
(importLib {
|
||||
# See comments at importLib definition.
|
||||
zpoolCmd = "${cfgZfs.package}/sbin/zpool";
|
||||
awkCmd = "${pkgs.gawk}/bin/awk";
|
||||
inherit pool;
|
||||
})
|
||||
+ ''
|
||||
if ! poolImported "${pool}"; then
|
||||
echo -n "importing ZFS pool \"${pool}\"..."
|
||||
# Loop across the import until it succeeds, because the devices needed may not be discovered yet.
|
||||
for _ in $(seq 1 60); do
|
||||
poolReady "${pool}" && poolImport "${pool}" && break
|
||||
sleep 1
|
||||
done
|
||||
''}
|
||||
echo "Successfully imported ${pool}"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
|
||||
fi
|
||||
if poolImported "${pool}"; then
|
||||
${lib.optionalString config.boot.initrd.clevis.enable (
|
||||
lib.concatMapStringsSep "\n" (
|
||||
elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true "
|
||||
) (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets)
|
||||
)}
|
||||
|
||||
|
||||
${lib.optionalString keyLocations.hasKeys ''
|
||||
${keyLocations.command} | while IFS=$'\t' read -r ds kl ks; do
|
||||
{
|
||||
if [[ "$ks" != unavailable ]]; then
|
||||
continue
|
||||
fi
|
||||
case "$kl" in
|
||||
none )
|
||||
;;
|
||||
prompt )
|
||||
tries=3
|
||||
success=false
|
||||
while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
|
||||
${systemd}/bin/systemd-ask-password --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
|
||||
&& success=true \
|
||||
|| tries=$((tries - 1))
|
||||
done
|
||||
[[ $success = true ]]
|
||||
;;
|
||||
* )
|
||||
${cfgZfs.package}/sbin/zfs load-key "$ds"
|
||||
;;
|
||||
esac
|
||||
} < /dev/null # To protect while read ds kl in case anything reads stdin
|
||||
done
|
||||
''}
|
||||
echo "Successfully imported ${pool}"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
zedConf = lib.generators.toKeyValue {
|
||||
mkKeyValue = lib.generators.mkKeyValueDefault {
|
||||
mkValueString = v:
|
||||
if lib.isInt v then toString v
|
||||
else if lib.isString v then "\"${v}\""
|
||||
else if true == v then "1"
|
||||
else if false == v then "0"
|
||||
else if lib.isList v then "\"" + (lib.concatStringsSep " " v) + "\""
|
||||
else lib.err "this value is" (toString v);
|
||||
mkValueString =
|
||||
v:
|
||||
if lib.isInt v then
|
||||
toString v
|
||||
else if lib.isString v then
|
||||
"\"${v}\""
|
||||
else if true == v then
|
||||
"1"
|
||||
else if false == v then
|
||||
"0"
|
||||
else if lib.isList v then
|
||||
"\"" + (lib.concatStringsSep " " v) + "\""
|
||||
else
|
||||
lib.err "this value is" (toString v);
|
||||
} "=";
|
||||
} cfgZED.settings;
|
||||
in
|
||||
@@ -212,8 +276,16 @@ in
|
||||
{
|
||||
|
||||
imports = [
|
||||
(lib.mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
|
||||
(lib.mkRemovedOptionModule [ "boot" "zfs" "enableUnstable" ] "Instead set `boot.zfs.package = pkgs.zfs_unstable;`")
|
||||
(lib.mkRemovedOptionModule [
|
||||
"boot"
|
||||
"zfs"
|
||||
"enableLegacyCrypto"
|
||||
] "The corresponding package was removed from nixpkgs.")
|
||||
(lib.mkRemovedOptionModule [
|
||||
"boot"
|
||||
"zfs"
|
||||
"enableUnstable"
|
||||
] "Instead set `boot.zfs.package = pkgs.zfs_unstable;`")
|
||||
];
|
||||
|
||||
###### interface
|
||||
@@ -253,8 +325,11 @@ in
|
||||
|
||||
extraPools = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [];
|
||||
example = [ "tank" "data" ];
|
||||
default = [ ];
|
||||
example = [
|
||||
"tank"
|
||||
"data"
|
||||
];
|
||||
description = ''
|
||||
Name or GUID of extra ZFS pools that you wish to import during boot.
|
||||
|
||||
@@ -316,7 +391,10 @@ in
|
||||
requestEncryptionCredentials = lib.mkOption {
|
||||
type = lib.types.either lib.types.bool (lib.types.listOf lib.types.str);
|
||||
default = true;
|
||||
example = [ "tank" "data" ];
|
||||
example = [
|
||||
"tank"
|
||||
"data"
|
||||
];
|
||||
description = ''
|
||||
If true on import encryption keys or passwords for all encrypted datasets
|
||||
are requested. To only decrypt selected datasets supply a list of dataset
|
||||
@@ -336,16 +414,18 @@ in
|
||||
};
|
||||
|
||||
pools = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule {
|
||||
options = {
|
||||
devNodes = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = cfgZfs.devNodes;
|
||||
defaultText = "config.boot.zfs.devNodes";
|
||||
description = options.boot.zfs.devNodes.description;
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule {
|
||||
options = {
|
||||
devNodes = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = cfgZfs.devNodes;
|
||||
defaultText = "config.boot.zfs.devNodes";
|
||||
description = options.boot.zfs.devNodes.description;
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
description = ''
|
||||
Configuration for individual pools to override global defaults.
|
||||
@@ -501,7 +581,7 @@ in
|
||||
};
|
||||
|
||||
pools = lib.mkOption {
|
||||
default = [];
|
||||
default = [ ];
|
||||
type = lib.types.listOf lib.types.str;
|
||||
example = [ "tank" ];
|
||||
description = ''
|
||||
@@ -512,9 +592,15 @@ in
|
||||
};
|
||||
|
||||
services.zfs.expandOnBoot = lib.mkOption {
|
||||
type = lib.types.either (lib.types.enum [ "disabled" "all" ]) (lib.types.listOf lib.types.str);
|
||||
type = lib.types.either (lib.types.enum [
|
||||
"disabled"
|
||||
"all"
|
||||
]) (lib.types.listOf lib.types.str);
|
||||
default = "disabled";
|
||||
example = [ "tank" "dozer" ];
|
||||
example = [
|
||||
"tank"
|
||||
"dozer"
|
||||
];
|
||||
description = ''
|
||||
After importing, expand each device in the specified pools.
|
||||
|
||||
@@ -541,7 +627,18 @@ in
|
||||
};
|
||||
|
||||
settings = lib.mkOption {
|
||||
type = let t = lib.types; in t.attrsOf (t.oneOf [ t.str t.int t.bool (t.listOf t.str) ]);
|
||||
type =
|
||||
let
|
||||
t = lib.types;
|
||||
in
|
||||
t.attrsOf (
|
||||
t.oneOf [
|
||||
t.str
|
||||
t.int
|
||||
t.bool
|
||||
(t.listOf t.str)
|
||||
]
|
||||
);
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
ZED_DEBUG_LOG = "/tmp/zed.debug.log";
|
||||
@@ -614,62 +711,82 @@ in
|
||||
|
||||
boot.initrd = lib.mkIf inInitrd {
|
||||
kernelModules = [ "zfs" ];
|
||||
extraUtilsCommands =
|
||||
lib.mkIf (!config.boot.initrd.systemd.enable) ''
|
||||
copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
|
||||
copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
|
||||
copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
|
||||
copy_bin_and_libs ${cfgZfs.package}/lib/udev/vdev_id
|
||||
copy_bin_and_libs ${cfgZfs.package}/lib/udev/zvol_id
|
||||
'';
|
||||
extraUtilsCommandsTest =
|
||||
lib.mkIf (!config.boot.initrd.systemd.enable) ''
|
||||
$out/bin/zfs --help >/dev/null 2>&1
|
||||
$out/bin/zpool --help >/dev/null 2>&1
|
||||
'';
|
||||
postResumeCommands = lib.mkIf (!config.boot.initrd.systemd.enable) (lib.concatStringsSep "\n" ([''
|
||||
ZFS_FORCE="${lib.optionalString cfgZfs.forceImportRoot "-f"}"
|
||||
''] ++ [(importLib {
|
||||
# See comments at importLib definition.
|
||||
zpoolCmd = "zpool";
|
||||
awkCmd = "awk";
|
||||
pool = null;
|
||||
})] ++ (map (pool: ''
|
||||
echo -n "importing root ZFS pool \"${pool}\"..."
|
||||
# Loop across the import until it succeeds, because the devices needed may not be discovered yet.
|
||||
if ! poolImported "${pool}"; then
|
||||
for _ in $(seq 1 60); do
|
||||
poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
|
||||
sleep 1
|
||||
echo -n .
|
||||
done
|
||||
echo
|
||||
if [[ -n "$msg" ]]; then
|
||||
echo "$msg";
|
||||
fi
|
||||
poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
|
||||
fi
|
||||
|
||||
${lib.optionalString config.boot.initrd.clevis.enable (lib.concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}") (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets))}
|
||||
|
||||
${if lib.isBool cfgZfs.requestEncryptionCredentials
|
||||
then lib.optionalString cfgZfs.requestEncryptionCredentials ''
|
||||
zfs load-key -a
|
||||
extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
|
||||
copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
|
||||
copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
|
||||
copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
|
||||
copy_bin_and_libs ${cfgZfs.package}/lib/udev/vdev_id
|
||||
copy_bin_and_libs ${cfgZfs.package}/lib/udev/zvol_id
|
||||
'';
|
||||
extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
|
||||
$out/bin/zfs --help >/dev/null 2>&1
|
||||
$out/bin/zpool --help >/dev/null 2>&1
|
||||
'';
|
||||
postResumeCommands = lib.mkIf (!config.boot.initrd.systemd.enable) (
|
||||
lib.concatStringsSep "\n" (
|
||||
[
|
||||
''
|
||||
else lib.concatMapStrings (fs: ''
|
||||
zfs load-key -- ${lib.escapeShellArg fs}
|
||||
'') (lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)}
|
||||
'') rootPools)));
|
||||
ZFS_FORCE="${lib.optionalString cfgZfs.forceImportRoot "-f"}"
|
||||
''
|
||||
]
|
||||
++ [
|
||||
(importLib {
|
||||
# See comments at importLib definition.
|
||||
zpoolCmd = "zpool";
|
||||
awkCmd = "awk";
|
||||
pool = null;
|
||||
})
|
||||
]
|
||||
++ (map (pool: ''
|
||||
echo -n "importing root ZFS pool \"${pool}\"..."
|
||||
# Loop across the import until it succeeds, because the devices needed may not be discovered yet.
|
||||
if ! poolImported "${pool}"; then
|
||||
for _ in $(seq 1 60); do
|
||||
poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
|
||||
sleep 1
|
||||
echo -n .
|
||||
done
|
||||
echo
|
||||
if [[ -n "$msg" ]]; then
|
||||
echo "$msg";
|
||||
fi
|
||||
poolImported "${pool}" || poolImport "${pool}" # Try one last time, e.g. to import a degraded pool.
|
||||
fi
|
||||
|
||||
${lib.optionalString config.boot.initrd.clevis.enable (
|
||||
lib.concatMapStringsSep "\n" (
|
||||
elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}"
|
||||
) (lib.filter (p: (lib.elemAt (lib.splitString "/" p) 0) == pool) clevisDatasets)
|
||||
)}
|
||||
|
||||
${
|
||||
if lib.isBool cfgZfs.requestEncryptionCredentials then
|
||||
lib.optionalString cfgZfs.requestEncryptionCredentials ''
|
||||
zfs load-key -a
|
||||
''
|
||||
else
|
||||
lib.concatMapStrings (fs: ''
|
||||
zfs load-key -- ${lib.escapeShellArg fs}
|
||||
'') (lib.filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)
|
||||
}
|
||||
'') rootPools)
|
||||
)
|
||||
);
|
||||
|
||||
# Systemd in stage 1
|
||||
systemd = lib.mkIf config.boot.initrd.systemd.enable {
|
||||
packages = [cfgZfs.package];
|
||||
services = lib.listToAttrs (map (pool: createImportService {
|
||||
inherit pool;
|
||||
systemd = config.boot.initrd.systemd.package;
|
||||
force = cfgZfs.forceImportRoot;
|
||||
prefix = "/sysroot";
|
||||
}) rootPools);
|
||||
packages = [ cfgZfs.package ];
|
||||
services = lib.listToAttrs (
|
||||
map (
|
||||
pool:
|
||||
createImportService {
|
||||
inherit pool;
|
||||
systemd = config.boot.initrd.systemd.package;
|
||||
force = cfgZfs.forceImportRoot;
|
||||
prefix = "/sysroot";
|
||||
}
|
||||
) rootPools
|
||||
);
|
||||
targets.zfs-import.wantedBy = [ "zfs.target" ];
|
||||
targets.zfs.wantedBy = [ "initrd.target" ];
|
||||
extraBin = {
|
||||
@@ -682,13 +799,14 @@ in
|
||||
"${cfgZfs.package}/lib/udev/zvol_id"
|
||||
];
|
||||
};
|
||||
services.udev.packages = [cfgZfs.package]; # to hook zvol naming, in stage 1
|
||||
services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, in stage 1
|
||||
};
|
||||
|
||||
systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source = pkgs.writeShellScript "zpool-sync-shutdown" ''
|
||||
exec ${cfgZfs.package}/bin/zpool sync
|
||||
'';
|
||||
systemd.shutdownRamfs.storePaths = ["${cfgZfs.package}/bin/zpool"];
|
||||
systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source =
|
||||
pkgs.writeShellScript "zpool-sync-shutdown" ''
|
||||
exec ${cfgZfs.package}/bin/zpool sync
|
||||
'';
|
||||
systemd.shutdownRamfs.storePaths = [ "${cfgZfs.package}/bin/zpool" ];
|
||||
|
||||
# TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
|
||||
boot.loader.grub = lib.mkIf (inInitrd || inSystem) {
|
||||
@@ -697,10 +815,11 @@ in
|
||||
};
|
||||
|
||||
services.zfs.zed.settings = {
|
||||
ZED_EMAIL_PROG = lib.mkIf cfgZED.enableMail (lib.mkDefault (
|
||||
config.security.wrapperDir + "/" +
|
||||
config.services.mail.sendmailSetuidWrapper.program
|
||||
));
|
||||
ZED_EMAIL_PROG = lib.mkIf cfgZED.enableMail (
|
||||
lib.mkDefault (
|
||||
config.security.wrapperDir + "/" + config.services.mail.sendmailSetuidWrapper.program
|
||||
)
|
||||
);
|
||||
# subject in header for sendmail
|
||||
ZED_EMAIL_OPTS = lib.mkIf cfgZED.enableMail (lib.mkDefault "@ADDRESS@");
|
||||
|
||||
@@ -721,10 +840,9 @@ in
|
||||
ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
|
||||
'';
|
||||
|
||||
environment.etc = lib.genAttrs
|
||||
(map
|
||||
(file: "zfs/zed.d/${file}")
|
||||
[
|
||||
environment.etc =
|
||||
lib.genAttrs
|
||||
(map (file: "zfs/zed.d/${file}") [
|
||||
"all-syslog.sh"
|
||||
"pool_import-led.sh"
|
||||
"resilver_finish-start-scrub.sh"
|
||||
@@ -736,56 +854,68 @@ in
|
||||
"scrub_finish-notify.sh"
|
||||
"statechange-notify.sh"
|
||||
"vdev_clear-led.sh"
|
||||
]
|
||||
)
|
||||
(file: { source = "${cfgZfs.package}/etc/${file}"; })
|
||||
// {
|
||||
"zfs/zed.d/zed.rc".text = zedConf;
|
||||
"zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
|
||||
};
|
||||
])
|
||||
(file: {
|
||||
source = "${cfgZfs.package}/etc/${file}";
|
||||
})
|
||||
// {
|
||||
"zfs/zed.d/zed.rc".text = zedConf;
|
||||
"zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
|
||||
};
|
||||
|
||||
system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
|
||||
environment.systemPackages = [ cfgZfs.package ]
|
||||
++ lib.optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
|
||||
environment.systemPackages = [ cfgZfs.package ] ++ lib.optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
|
||||
|
||||
services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
|
||||
systemd.packages = [ cfgZfs.package ];
|
||||
|
||||
systemd.services = let
|
||||
createImportService' = pool: createImportService {
|
||||
inherit pool;
|
||||
systemd = config.systemd.package;
|
||||
force = cfgZfs.forceImportAll;
|
||||
};
|
||||
|
||||
# This forces a sync of any ZFS pools prior to poweroff, even if they're set
|
||||
# to sync=disabled.
|
||||
createSyncService = pool:
|
||||
lib.nameValuePair "zfs-sync-${pool}" {
|
||||
description = "Sync ZFS pool \"${pool}\"";
|
||||
wantedBy = [ "shutdown.target" ];
|
||||
before = [ "final.target" ];
|
||||
unitConfig = {
|
||||
DefaultDependencies = false;
|
||||
systemd.services =
|
||||
let
|
||||
createImportService' =
|
||||
pool:
|
||||
createImportService {
|
||||
inherit pool;
|
||||
systemd = config.systemd.package;
|
||||
force = cfgZfs.forceImportAll;
|
||||
};
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
|
||||
# This forces a sync of any ZFS pools prior to poweroff, even if they're set
|
||||
# to sync=disabled.
|
||||
createSyncService =
|
||||
pool:
|
||||
lib.nameValuePair "zfs-sync-${pool}" {
|
||||
description = "Sync ZFS pool \"${pool}\"";
|
||||
wantedBy = [ "shutdown.target" ];
|
||||
before = [ "final.target" ];
|
||||
unitConfig = {
|
||||
DefaultDependencies = false;
|
||||
};
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
script = ''
|
||||
${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
|
||||
'';
|
||||
};
|
||||
script = ''
|
||||
${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
|
||||
'';
|
||||
};
|
||||
|
||||
createZfsService = serv:
|
||||
lib.nameValuePair serv {
|
||||
after = [ "systemd-modules-load.service" ];
|
||||
wantedBy = [ "zfs.target" ];
|
||||
};
|
||||
createZfsService =
|
||||
serv:
|
||||
lib.nameValuePair serv {
|
||||
after = [ "systemd-modules-load.service" ];
|
||||
wantedBy = [ "zfs.target" ];
|
||||
};
|
||||
|
||||
in lib.listToAttrs (map createImportService' dataPools ++
|
||||
map createSyncService allPools ++
|
||||
map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
|
||||
in
|
||||
lib.listToAttrs (
|
||||
map createImportService' dataPools
|
||||
++ map createSyncService allPools
|
||||
++ map createZfsService [
|
||||
"zfs-mount"
|
||||
"zfs-share"
|
||||
"zfs-zed"
|
||||
]
|
||||
);
|
||||
|
||||
systemd.targets.zfs-import.wantedBy = [ "zfs.target" ];
|
||||
|
||||
@@ -805,7 +935,7 @@ in
|
||||
scriptArgs = "%i";
|
||||
path = [ cfgZfs.package ];
|
||||
|
||||
script = ''
|
||||
script = ''
|
||||
pool=$1
|
||||
|
||||
echo "Expanding all devices for $pool."
|
||||
@@ -821,9 +951,11 @@ in
|
||||
# If the `pools` option is `true`, we want to dynamically
|
||||
# expand every pool. Otherwise we want to enumerate
|
||||
# just the specifically provided list of pools.
|
||||
poolListProvider = if cfgExpandOnBoot == "all"
|
||||
then "$(zpool list -H -o name)"
|
||||
else lib.escapeShellArgs cfgExpandOnBoot;
|
||||
poolListProvider =
|
||||
if cfgExpandOnBoot == "all" then
|
||||
"$(zpool list -H -o name)"
|
||||
else
|
||||
lib.escapeShellArgs cfgExpandOnBoot;
|
||||
in
|
||||
{
|
||||
description = "Expand specified ZFS pools";
|
||||
@@ -846,41 +978,55 @@ in
|
||||
})
|
||||
|
||||
(lib.mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
|
||||
systemd.services = let
|
||||
descr = name: if name == "frequent" then "15 mins"
|
||||
else if name == "hourly" then "hour"
|
||||
else if name == "daily" then "day"
|
||||
else if name == "weekly" then "week"
|
||||
else if name == "monthly" then "month"
|
||||
else throw "unknown snapshot name";
|
||||
numSnapshots = name: builtins.getAttr name cfgSnapshots;
|
||||
in builtins.listToAttrs (map (snapName:
|
||||
{
|
||||
name = "zfs-snapshot-${snapName}";
|
||||
value = {
|
||||
description = "ZFS auto-snapshotting every ${descr snapName}";
|
||||
after = [ "zfs-import.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
|
||||
};
|
||||
restartIfChanged = false;
|
||||
};
|
||||
}) snapshotNames);
|
||||
systemd.services =
|
||||
let
|
||||
descr =
|
||||
name:
|
||||
if name == "frequent" then
|
||||
"15 mins"
|
||||
else if name == "hourly" then
|
||||
"hour"
|
||||
else if name == "daily" then
|
||||
"day"
|
||||
else if name == "weekly" then
|
||||
"week"
|
||||
else if name == "monthly" then
|
||||
"month"
|
||||
else
|
||||
throw "unknown snapshot name";
|
||||
numSnapshots = name: builtins.getAttr name cfgSnapshots;
|
||||
in
|
||||
builtins.listToAttrs (
|
||||
map (snapName: {
|
||||
name = "zfs-snapshot-${snapName}";
|
||||
value = {
|
||||
description = "ZFS auto-snapshotting every ${descr snapName}";
|
||||
after = [ "zfs-import.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
|
||||
};
|
||||
restartIfChanged = false;
|
||||
};
|
||||
}) snapshotNames
|
||||
);
|
||||
|
||||
systemd.timers = let
|
||||
timer = name: if name == "frequent" then "*:0,15,30,45" else name;
|
||||
in builtins.listToAttrs (map (snapName:
|
||||
{
|
||||
name = "zfs-snapshot-${snapName}";
|
||||
value = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = timer snapName;
|
||||
Persistent = lib.mkDefault "yes";
|
||||
};
|
||||
};
|
||||
}) snapshotNames);
|
||||
systemd.timers =
|
||||
let
|
||||
timer = name: if name == "frequent" then "*:0,15,30,45" else name;
|
||||
in
|
||||
builtins.listToAttrs (
|
||||
map (snapName: {
|
||||
name = "zfs-snapshot-${snapName}";
|
||||
value = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = timer snapName;
|
||||
Persistent = lib.mkDefault "yes";
|
||||
};
|
||||
};
|
||||
}) snapshotNames
|
||||
);
|
||||
})
|
||||
|
||||
(lib.mkIf (cfgZfs.enabled && cfgScrub.enable) {
|
||||
@@ -894,11 +1040,11 @@ in
|
||||
script = ''
|
||||
# shellcheck disable=SC2046
|
||||
${cfgZfs.package}/bin/zpool scrub -w ${
|
||||
if cfgScrub.pools != [] then
|
||||
if cfgScrub.pools != [ ] then
|
||||
(lib.concatStringsSep " " cfgScrub.pools)
|
||||
else
|
||||
"$(${cfgZfs.package}/bin/zpool list -H -o name)"
|
||||
}
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user