Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

zfs: create zfs_data pool with LUKS encryption #940

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 22 additions & 4 deletions modules/disko/disko-ab-partitions.nix
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@
boot = {
initrd.availableKernelModules = [ "zfs" ];
supportedFilesystems = [ "zfs" ];
zfs.extraPools = [ "zfs_data" ];
initrd.luks.devices.zfs_data = {
device = "/dev/disk/by-partlabel/disk-disk1-zfs_data";
};
};
disko = {
# 8GB is the recommeneded minimum for ZFS, so we are using this for VMs to avoid `cp` oom errors.
Expand All @@ -57,7 +61,7 @@
devices = {
disk.disk1 = {
type = "disk";
imageSize = "60G";
imageSize = "70G";
content = {
type = "gpt";
partitions = {
Expand Down Expand Up @@ -91,18 +95,26 @@
#randomEncryption = true;
};
};
zfs_1 = {
zfs_root = {
size = "30G";
content = {
type = "zfs";
pool = "zfs_root";
};
};
zfs_data = {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you are still using this post boot script, than this should be just an empty partition.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If using post boot script, we keep empty partition and we do something similar to this?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. We need to detect if the device already has luks headers and if only format if this is not the case.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Addressed, now we have better control on password with post boot script.
My only concern is once I use luksFormat on zfs_data partition all zfs meta data (pools, datasets) info is lost, because of that I need to create pool and datasets again in post boot script.

size = "100%";
content = {
type = "zfs";
pool = "zfspool";
pool = "zfs_data";
};
};
};
};
};

zpool = {
zfspool = {
zfs_root = {
type = "zpool";
rootFsOptions = {
mountpoint = "none";
Expand All @@ -124,6 +136,12 @@
quota = "30G";
};
};
};
};

zfs_data = {
type = "zpool";
datasets = {
"vm_storage" = {
type = "zfs_fs";
options = {
Expand Down
87 changes: 63 additions & 24 deletions modules/disko/disko-zfs-postboot.nix
Original file line number Diff line number Diff line change
Expand Up @@ -2,40 +2,79 @@
# SPDX-License-Identifier: Apache-2.0
{ pkgs, ... }:
let
postBootCmds = ''
set -xeuo pipefail
zfsPostBoot = pkgs.writeShellApplication {
name = "zfsPostBootScript";
runtimeInputs = with pkgs; [
zfs
gnugrep
gawk
cryptsetup
util-linux
gptfdisk
parted
systemd
];
text = ''
set -xeuo pipefail

# Check which physical disk is used by ZFS
ZFS_POOLNAME=$(${pkgs.zfs}/bin/zpool list | ${pkgs.gnugrep}/bin/grep -v NAME | ${pkgs.gawk}/bin/awk '{print $1}')
ZFS_LOCATION=$(${pkgs.zfs}/bin/zpool status -P | ${pkgs.gnugrep}/bin/grep dev | ${pkgs.gawk}/bin/awk '{print $1}')
# Check which physical disk is used by ZFS
ENCRYPTED_POOLNAME=zfs_data
zpool import -f "$ENCRYPTED_POOLNAME"
ZFS_POOLNAME=$(zpool list | grep -v NAME | grep $ENCRYPTED_POOLNAME | awk '{print $1}')
ZFS_LOCATION=$(zpool status "$ZFS_POOLNAME" -P | grep dev | awk '{print $1}')

# Get the actual device path
P_DEVPATH=$(readlink -f "$ZFS_LOCATION")
# Get the actual device path
P_DEVPATH=$(readlink -f "$ZFS_LOCATION")

# Extract the partition number using regex
if [[ "$P_DEVPATH" =~ [0-9]+$ ]]; then
PARTNUM=$(echo "$P_DEVPATH" | ${pkgs.gnugrep}/bin/grep -o '[0-9]*$')
PARENT_DISK=/dev/$(${pkgs.util-linux}/bin/lsblk -no pkname "$P_DEVPATH")
else
echo "No partition number found in device path: $P_DEVPATH"
fi
if [[ "$P_DEVPATH" =~ [0-9]+$ ]]; then
PARTNUM=$(echo "$P_DEVPATH" | grep -o '[0-9]*$')
PARENT_DISK=/dev/$(lsblk -no pkname "$P_DEVPATH")
else
echo "No partition number found in device path: $P_DEVPATH"
fi

# Fix GPT first
${pkgs.gptfdisk}/bin/sgdisk "$PARENT_DISK" -e
set +o pipefail
# Check if zfs pool has luks headers
if (cryptsetup status "$ZFS_POOLNAME") | grep -q "is inactive"; then
# Fix GPT first
sgdisk "$PARENT_DISK" -e

# Call partprobe to update kernel's partitions
${pkgs.parted}/bin/partprobe
# Call partprobe to update kernel's partitions
partprobe

# Extend the partition to use unallocated space
${pkgs.parted}/bin/parted -s -a opt "$PARENT_DISK" "resizepart $PARTNUM 100%"
# Extend the partition to use unallocated space
parted -s -a opt "$PARENT_DISK" "resizepart $PARTNUM 100%"

# Extend ZFS pool to use newly allocated space
zpool online -e "$ZFS_POOLNAME" "$ZFS_LOCATION"

# Exporting pool to avoid device in use errors
zpool export "$ZFS_POOLNAME"

# TODO: Remove hardcoded password and have better password mechanism
pswd="ghaf"
# Format pool with LUKS
echo -n $pswd | cryptsetup luksFormat --type luks2 -q "$ZFS_LOCATION"
echo -n $pswd | cryptsetup luksOpen "$ZFS_LOCATION" "$ZFS_POOLNAME" --persistent

# Enrolling for disk unlocking, it automatically assigns keys to a specific slot in the TPM
PASSWORD=$pswd systemd-cryptenroll --tpm2-device auto "$P_DEVPATH"

# Create pool, datasets as luksFormat will erase pools, ZFS datasets stored on that partition
zpool create -o ashift=12 -O compression=lz4 -O acltype=posixacl -O xattr=sa -f "$ZFS_POOLNAME" /dev/mapper/"$ZFS_POOLNAME"
zfs create -o quota=30G "$ZFS_POOLNAME"/vm_storage
zfs create -o quota=10G -o mountpoint=none "$ZFS_POOLNAME"/reserved
zfs create -o quota=50G "$ZFS_POOLNAME"/gp_storage
zfs create "$ZFS_POOLNAME"/storagevm
zfs create -o mountpoint=none "$ZFS_POOLNAME"/recovery
fi
'';
};

# Extend ZFS pool to use newly allocated space
${pkgs.zfs}/bin/zpool online -e "$ZFS_POOLNAME" "$ZFS_LOCATION"
'';
in
{
# To debug postBootCommands, one may run
# journalctl -u initrd-nixos-activation.service
# inside the running Ghaf host.
boot.postBootCommands = postBootCmds;
boot.postBootCommands = "${zfsPostBoot}/bin/zfsPostBootScript";
}
Loading