Skip to content

Commit

Permalink
test: add disk-customization test
Browse files Browse the repository at this point in the history
This commit adds a new integration test that checks for the disk
customizations. It exchanges some of the filesystem tests with
disk customization tests.
  • Loading branch information
mvo5 committed Dec 3, 2024
1 parent 3857e5e commit 89c4475
Show file tree
Hide file tree
Showing 4 changed files with 106 additions and 30 deletions.
72 changes: 51 additions & 21 deletions test/test_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ class ImageBuildResult(NamedTuple):
img_arch: str
container_ref: str
rootfs: str
disk_config: str
username: str
password: str
ssh_keyfile_private_path: str
Expand Down Expand Up @@ -203,7 +204,6 @@ def sign_container_image(gpg_conf: GPGConf, registry_conf: RegistryConf, contain
f"docker://{container_ref}",
f"docker://{signed_container_ref}",
]
print(cmd)
subprocess.run(cmd, check=True, env={"GNUPGHOME": gpg_conf.home_dir})


Expand Down Expand Up @@ -280,7 +280,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_
# AF_UNIX) is derived from the path
# hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to
# different image type combinations
output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.target_arch))), "x")
output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.disk_config) + str(tc.target_arch))), "x")
output_path.mkdir(exist_ok=True)

# make sure that the test store exists, because podman refuses to start if the source directory for a volume
Expand Down Expand Up @@ -327,7 +327,8 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_
bib_output = bib_output_path.read_text(encoding="utf8")
results.append(ImageBuildResult(
image_type, generated_img, tc.target_arch,
container_ref, tc.rootfs, username, password,
container_ref, tc.rootfs, tc.disk_config,
username, password,
ssh_keyfile_private_path, kargs, bib_output, journal_output))

# generate new keyfile
Expand Down Expand Up @@ -368,12 +369,14 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_
"groups": ["wheel"],
},
],
"filesystem": testutil.create_filesystem_customizations(tc.rootfs),
"kernel": {
"append": kargs,
},
},
}
testutil.maybe_create_filesystem_customizations(cfg, tc)
testutil.maybe_create_disk_customizations(cfg, tc)
print(f"config for {output_path} {tc=}: {cfg=}")

config_json_path = output_path / "config.json"
config_json_path.write_text(json.dumps(cfg), encoding="utf-8")
Expand Down Expand Up @@ -473,7 +476,8 @@ def del_ami():
for image_type in image_types:
results.append(ImageBuildResult(
image_type, artifact[image_type], tc.target_arch,
container_ref, tc.rootfs, username, password,
container_ref, tc.rootfs, tc.disk_config,
username, password,
ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata))
yield results

Expand Down Expand Up @@ -532,19 +536,10 @@ def test_image_boots(image_type):
# XXX: read the fully yaml instead?
assert f"image: {image_type.container_ref}" in output

# check the minsize specified in the build configuration for each mountpoint against the sizes in the image
# TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint
exit_status, output = test_vm.run("df --output=target,size", user="root",
keyfile=image_type.ssh_keyfile_private_path)
assert exit_status == 0
# parse the output of 'df' to a mountpoint -> size dict for convenience
mountpoint_sizes = {}
for line in output.splitlines()[1:]:
fields = line.split()
# Note that df output is in 1k blocks, not bytes
mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes

assert_fs_customizations(image_type, mountpoint_sizes)
if image_type.disk_config:
assert_disk_customizations(image_type, test_vm)
else:
assert_fs_customizations(image_type, test_vm)


@pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"])
Expand Down Expand Up @@ -663,17 +658,52 @@ def test_multi_build_request(images):
assert artifacts == expected


def assert_fs_customizations(image_type, mountpoint_sizes):
def assert_fs_customizations(image_type, test_vm):
"""
Asserts that each mountpoint that appears in the build configuration also appears in mountpoint_sizes.
TODO: assert that the size of each filesystem (or partition) also matches the expected size based on the
customization.
"""
fs_customizations = testutil.create_filesystem_customizations(image_type.rootfs)
for fs in fs_customizations:
# check the minsize specified in the build configuration for each mountpoint against the sizes in the image
# TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint
exit_status, output = test_vm.run("df --output=target,size", user="root",
keyfile=image_type.ssh_keyfile_private_path)
assert exit_status == 0
# parse the output of 'df' to a mountpoint -> size dict for convenience
mountpoint_sizes = {}
for line in output.splitlines()[1:]:
fields = line.split()
# Note that df output is in 1k blocks, not bytes
mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes

cfg = {
"customizations": {},
}
testutil.maybe_create_filesystem_customizations(cfg, image_type)
for fs in cfg["customizations"]["filesystem"]:
mountpoint = fs["mountpoint"]
if mountpoint == "/":
# / is actually /sysroot
mountpoint = "/sysroot"
assert mountpoint in mountpoint_sizes


def assert_disk_customizations(image_type, test_vm):
exit_status, output = test_vm.run("findmnt --json", user="root",
keyfile=image_type.ssh_keyfile_private_path)
assert exit_status == 0
findmnt = json.loads(output)
if dc := image_type.disk_config:
if dc == "lvm":
mnts = [mnt for mnt in findmnt["filesystems"][0]["children"]
if mnt["target"] == "/sysroot"]
assert len(mnts) == 1
assert "/dev/mapper/vg00-rootlv" == mnts[0]["source"]
elif dc == "btrfs":
mnts = [mnt for mnt in findmnt["filesystems"][0]["children"]
if mnt["target"] == "/sysroot"]
assert len(mnts) == 1
assert "btrfs" == mnts[0]["fstype"]
# ensure sysroot comes from the "root" subvolume
assert mnts[0]["source"].endswith("[/root]")
3 changes: 3 additions & 0 deletions test/test_manifest.py
Original file line number Diff line number Diff line change
Expand Up @@ -572,8 +572,10 @@ def test_manifest_disk_customization_lvm(tmp_path, build_container):
"partitions": [
{
"type": "lvm",
"minsize": "10 GiB",
"logical_volumes": [
{
"minsize": "10 GiB",
"fs_type": "ext4",
"mountpoint": "/",
}
Expand Down Expand Up @@ -606,6 +608,7 @@ def test_manifest_disk_customization_btrfs(tmp_path, build_container):
"partitions": [
{
"type": "btrfs",
"minsize": "10 GiB",
"subvolumes": [
{
"name": "root",
Expand Down
8 changes: 4 additions & 4 deletions test/testcases.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ class TestCase:
rootfs: str = ""
# Sign the container_ref and use the new signed image instead of the original one
sign: bool = False
# use special partition_mode like "lvm"
partition_mode: str = ""
# use special disk_config like "lvm"
disk_config: str = ""

def bib_rootfs_args(self):
if self.rootfs:
Expand Down Expand Up @@ -92,9 +92,9 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements
# and custom with raw (this is arbitrary, we could do it the
# other way around too
test_cases.append(
TestCaseCentos(image="raw", partition_mode="lvm"))
TestCaseCentos(image="raw", disk_config="lvm"))
test_cases.append(
TestCaseFedora(image="raw", partition_mode="btrfs"))
TestCaseFedora(image="raw", disk_config="btrfs"))
# do a cross arch test too
if platform.machine() == "x86_64":
# TODO: re-enable once
Expand Down
53 changes: 48 additions & 5 deletions test/testutil.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,18 +109,21 @@ def deregister_ami(ami_id):
print(f"Error {err_code}: {err_msg}")


def create_filesystem_customizations(rootfs: str):
if rootfs == "btrfs":
def maybe_create_filesystem_customizations(cfg, tc):
# disk_config and filesystem_customization are mutually exclusive
if tc.disk_config:
return
if tc.rootfs == "btrfs":
# only minimal customizations are supported for btrfs currently
return [
cfg["customizations"]["filesystem"] = [
{
"mountpoint": "/",
"minsize": "12 GiB"
},
]

return
# add some custom mountpoints
return [
cfg["customizations"]["filesystem"] = [
{
"mountpoint": "/",
"minsize": "12 GiB"
Expand All @@ -140,6 +143,46 @@ def create_filesystem_customizations(rootfs: str):
]


def maybe_create_disk_customizations(cfg, tc):
if not tc.disk_config:
return
if tc.disk_config == "lvm":
cfg["customizations"]["disk"] = {
"partitions": [
{
"type": "lvm",
# XXX: why is this minsize also needed? should we derrive
# it from the LVs ?
"minsize": "10 GiB",
"logical_volumes": [
{
"minsize": "10 GiB",
"fs_type": "xfs",
"mountpoint": "/",
}
]
}
]
}
elif tc.disk_config == "btrfs":
cfg["customizations"]["disk"] = {
"partitions": [
{
"type": "btrfs",
"minsize": "10 GiB",
"subvolumes": [
{
"name": "varlog",
"mountpoint": "/var/log",
}
]
}
]
}
else:
raise ValueError(f"unsupported disk_config {tc.disk_config}")


# podman_run_common has the common prefix for the podman run invocations
podman_run_common = [
"podman", "run", "--rm",
Expand Down

0 comments on commit 89c4475

Please sign in to comment.