diff --git a/bib/go.mod b/bib/go.mod index 0e77c098..7c7fb03c 100644 --- a/bib/go.mod +++ b/bib/go.mod @@ -8,7 +8,7 @@ require ( github.com/cheggaaa/pb/v3 v3.1.5 github.com/google/uuid v1.6.0 github.com/hashicorp/go-version v1.7.0 - github.com/osbuild/images v0.100.1-0.20241122142352-ec6496521e7b + github.com/osbuild/images v0.103.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 diff --git a/bib/go.sum b/bib/go.sum index 76fdc24c..1f68a855 100644 --- a/bib/go.sum +++ b/bib/go.sum @@ -227,6 +227,8 @@ github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaL github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/osbuild/images v0.100.1-0.20241122142352-ec6496521e7b h1:zwLef4NPjW6Q0HAjFdwOmsQ5XPNkVRCRm42RPzNBEwM= github.com/osbuild/images v0.100.1-0.20241122142352-ec6496521e7b/go.mod h1:4bNmMQOVadIKVC1q8zsLO8tdEQFH90zIp+MQBQUnCiE= +github.com/osbuild/images v0.103.0 h1:GePI65RK/DPUEjoNqTqvZTdWJtrj+s2NyiLzdNoQYnM= +github.com/osbuild/images v0.103.0/go.mod h1:4bNmMQOVadIKVC1q8zsLO8tdEQFH90zIp+MQBQUnCiE= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/test/test_build.py b/test/test_build.py index 7c94616d..7c8ffd13 100644 --- a/test/test_build.py +++ b/test/test_build.py @@ -38,6 +38,7 @@ class ImageBuildResult(NamedTuple): img_arch: str container_ref: str rootfs: str + disk_config: str username: str password: str ssh_keyfile_private_path: str @@ -203,7 +204,6 @@ def sign_container_image(gpg_conf: GPGConf, registry_conf: RegistryConf, contain f"docker://{container_ref}", f"docker://{signed_container_ref}", ] - print(cmd) subprocess.run(cmd, check=True, env={"GNUPGHOME": gpg_conf.home_dir}) @@ -280,7 +280,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ # AF_UNIX) is derived from the path # hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to # different image type combinations - output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.target_arch))), "x") + output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.disk_config) + str(tc.target_arch))), "x") output_path.mkdir(exist_ok=True) # make sure that the test store exists, because podman refuses to start if the source directory for a volume @@ -327,7 +327,8 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( image_type, generated_img, tc.target_arch, - container_ref, tc.rootfs, username, password, + container_ref, tc.rootfs, tc.disk_config, + username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) # generate new keyfile @@ -368,12 +369,14 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ "groups": ["wheel"], }, ], - "filesystem": testutil.create_filesystem_customizations(tc.rootfs), "kernel": { "append": kargs, }, }, } + testutil.maybe_create_filesystem_customizations(cfg, tc) + testutil.maybe_create_disk_customizations(cfg, tc) + print(f"config for {output_path} {tc=}: {cfg=}") config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(cfg), encoding="utf-8") @@ -473,7 +476,8 @@ def del_ami(): for image_type in image_types: results.append(ImageBuildResult( image_type, artifact[image_type], tc.target_arch, - container_ref, tc.rootfs, username, password, + container_ref, tc.rootfs, tc.disk_config, + username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results @@ -532,19 +536,10 @@ def test_image_boots(image_type): # XXX: read the fully yaml instead? assert f"image: {image_type.container_ref}" in output - # check the minsize specified in the build configuration for each mountpoint against the sizes in the image - # TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint - exit_status, output = test_vm.run("df --output=target,size", user="root", - keyfile=image_type.ssh_keyfile_private_path) - assert exit_status == 0 - # parse the output of 'df' to a mountpoint -> size dict for convenience - mountpoint_sizes = {} - for line in output.splitlines()[1:]: - fields = line.split() - # Note that df output is in 1k blocks, not bytes - mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes - - assert_fs_customizations(image_type, mountpoint_sizes) + if image_type.disk_config: + assert_disk_customizations(image_type, test_vm) + else: + assert_fs_customizations(image_type, test_vm) @pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) @@ -663,17 +658,52 @@ def test_multi_build_request(images): assert artifacts == expected -def assert_fs_customizations(image_type, mountpoint_sizes): +def assert_fs_customizations(image_type, test_vm): """ Asserts that each mountpoint that appears in the build configuration also appears in mountpoint_sizes. TODO: assert that the size of each filesystem (or partition) also matches the expected size based on the customization. """ - fs_customizations = testutil.create_filesystem_customizations(image_type.rootfs) - for fs in fs_customizations: + # check the minsize specified in the build configuration for each mountpoint against the sizes in the image + # TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint + exit_status, output = test_vm.run("df --output=target,size", user="root", + keyfile=image_type.ssh_keyfile_private_path) + assert exit_status == 0 + # parse the output of 'df' to a mountpoint -> size dict for convenience + mountpoint_sizes = {} + for line in output.splitlines()[1:]: + fields = line.split() + # Note that df output is in 1k blocks, not bytes + mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes + + cfg = { + "customizations": {}, + } + testutil.maybe_create_filesystem_customizations(cfg, image_type) + for fs in cfg["customizations"]["filesystem"]: mountpoint = fs["mountpoint"] if mountpoint == "/": # / is actually /sysroot mountpoint = "/sysroot" assert mountpoint in mountpoint_sizes + + +def assert_disk_customizations(image_type, test_vm): + exit_status, output = test_vm.run("findmnt --json", user="root", + keyfile=image_type.ssh_keyfile_private_path) + assert exit_status == 0 + findmnt = json.loads(output) + if dc := image_type.disk_config: + if dc == "lvm": + mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] + if mnt["target"] == "/sysroot"] + assert len(mnts) == 1 + assert "/dev/mapper/vg00-rootlv" == mnts[0]["source"] + elif dc == "btrfs": + mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] + if mnt["target"] == "/sysroot"] + assert len(mnts) == 1 + assert "btrfs" == mnts[0]["fstype"] + # ensure sysroot comes from the "root" subvolume + assert mnts[0]["source"].endswith("[/root]") diff --git a/test/test_manifest.py b/test/test_manifest.py index 8badfd9e..c869bdb4 100644 --- a/test/test_manifest.py +++ b/test/test_manifest.py @@ -572,8 +572,10 @@ def test_manifest_disk_customization_lvm(tmp_path, build_container): "partitions": [ { "type": "lvm", + "minsize": "10 GiB", "logical_volumes": [ { + "minsize": "10 GiB", "fs_type": "ext4", "mountpoint": "/", } @@ -606,6 +608,7 @@ def test_manifest_disk_customization_btrfs(tmp_path, build_container): "partitions": [ { "type": "btrfs", + "minsize": "10 GiB", "subvolumes": [ { "name": "root", diff --git a/test/testcases.py b/test/testcases.py index 0cf51e1d..ed0ee45e 100644 --- a/test/testcases.py +++ b/test/testcases.py @@ -25,8 +25,8 @@ class TestCase: rootfs: str = "" # Sign the container_ref and use the new signed image instead of the original one sign: bool = False - # use special partition_mode like "lvm" - partition_mode: str = "" + # use special disk_config like "lvm" + disk_config: str = "" def bib_rootfs_args(self): if self.rootfs: @@ -92,9 +92,9 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements # and custom with raw (this is arbitrary, we could do it the # other way around too test_cases.append( - TestCaseCentos(image="raw", partition_mode="lvm")) + TestCaseCentos(image="raw", disk_config="lvm")) test_cases.append( - TestCaseFedora(image="raw", partition_mode="btrfs")) + TestCaseFedora(image="raw", disk_config="btrfs")) # do a cross arch test too if platform.machine() == "x86_64": # TODO: re-enable once diff --git a/test/testutil.py b/test/testutil.py index b853c613..8ee8682d 100644 --- a/test/testutil.py +++ b/test/testutil.py @@ -109,18 +109,21 @@ def deregister_ami(ami_id): print(f"Error {err_code}: {err_msg}") -def create_filesystem_customizations(rootfs: str): - if rootfs == "btrfs": +def maybe_create_filesystem_customizations(cfg, tc): + # disk_config and filesystem_customization are mutually exclusive + if tc.disk_config: + return + if tc.rootfs == "btrfs": # only minimal customizations are supported for btrfs currently - return [ + cfg["customizations"]["filesystem"] = [ { "mountpoint": "/", "minsize": "12 GiB" }, ] - + return # add some custom mountpoints - return [ + cfg["customizations"]["filesystem"] = [ { "mountpoint": "/", "minsize": "12 GiB" @@ -140,6 +143,46 @@ def create_filesystem_customizations(rootfs: str): ] +def maybe_create_disk_customizations(cfg, tc): + if not tc.disk_config: + return + if tc.disk_config == "lvm": + cfg["customizations"]["disk"] = { + "partitions": [ + { + "type": "lvm", + # XXX: why is this minsize also needed? should we derrive + # it from the LVs ? + "minsize": "10 GiB", + "logical_volumes": [ + { + "minsize": "10 GiB", + "fs_type": "xfs", + "mountpoint": "/", + } + ] + } + ] + } + elif tc.disk_config == "btrfs": + cfg["customizations"]["disk"] = { + "partitions": [ + { + "type": "btrfs", + "minsize": "10 GiB", + "subvolumes": [ + { + "name": "varlog", + "mountpoint": "/var/log", + } + ] + } + ] + } + else: + raise ValueError(f"unsupported disk_config {tc.disk_config}") + + # podman_run_common has the common prefix for the podman run invocations podman_run_common = [ "podman", "run", "--rm",