diff --git a/.github/workflows/bibtests.yaml b/.github/workflows/bibtests.yaml new file mode 100644 index 00000000..79212a61 --- /dev/null +++ b/.github/workflows/bibtests.yaml @@ -0,0 +1,118 @@ +--- +name: Bib tests + +on: + pull_request: + branches: + - "*" + push: + branches: + - "main" + # for merge queue + merge_group: + +env: + GO_VERSION: 1.23 + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + collect_tests: + runs-on: ubuntu-latest + outputs: + test_files: ${{ steps.collect.outputs.test_files }} + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Collect test files + id: collect + run: | + TEST_FILES=$(ls test/bib/test_*.py | sort) + JSON_FILES=$(echo "${TEST_FILES}" | jq -R | jq -cs ) + echo "test_files=${JSON_FILES}" >> $GITHUB_OUTPUT + + integration: + name: "Integration" + runs-on: ubuntu-24.04 + needs: collect_tests + strategy: + matrix: + test_file: ${{ fromJson(needs.collect_tests.outputs.test_files) }} + steps: + - uses: actions/checkout@v6 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Setup up python + uses: actions/setup-python@v6 + - name: Apt update + run: sudo apt update + - name: Install test dependencies + run: | + sudo apt update + sudo apt install -y python3-pytest python3-boto3 flake8 pylint libosinfo-bin squashfs-tools sshpass + - name: Diskspace (before) + run: | + df -h + sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh + - name: Free Disk Space + uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be + with: + tool-cache: true + # The following line runs apt remove which is slow + large-packages: false + - name: Workaround podman issues in GH actions + run: | + # see https://github.com/osbuild/bootc-image-builder/issues/446 + sudo rm -rf /var/lib/containers/storage + sudo mkdir -p /etc/containers + echo -e "[storage]\ndriver = \"overlay\"\nrunroot = \"/run/containers/storage\"\ngraphroot = \"/var/lib/containers/storage\"" | sudo tee /etc/containers/storage.conf + - name: Updating qemu-user + run: | + # get qemu-9 with openat2 patches via qemu-user-static, that + # has no dependencies so just install. + # XXX: remove once ubuntu ships qemu-9.1 + sudo apt install -y software-properties-common + sudo apt-add-repository -y ppa:mvo/qemu + sudo apt install --no-install-recommends -y qemu-user-static + # Now remove ppa again, the metadata confuses apt. Then install + # qemu-system-* from the regular repo again. + sudo apt-add-repository --remove -y ppa:mvo/qemu + sudo apt install -y qemu-system-arm qemu-system-x86 qemu-efi-aarch64 + - name: Install python test deps + run: | + # make sure test deps are available for root + sudo -E pip install --user -r test/bib/requirements.txt + - name: Workarounds for GH runner diskspace + run: | + # use custom basetemp here because /var/tmp is on a smaller disk + # than /mnt + sudo mkdir -p /mnt/var/tmp/bib-tests + # on GH runners /mnt has 70G free space, use that for our container + # storage + sudo mkdir -p /mnt/var/lib/containers + sudo mount -o bind /mnt/var/lib/containers /var/lib/containers + - run: | + mkdir -p /var/tmp/osbuild-test-store + - name: Cache osbuild env + uses: actions/cache@v4 + with: + path: /var/tmp/osbuild-test-store + key: no-key-needed-here + - name: Run tests + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + # podman needs (parts of) the environment but will break when + # XDG_RUNTIME_DIR is set. + # TODO: figure out what exactly podman needs + sudo -E XDG_RUNTIME_DIR= PYTHONPATH=. pytest-3 -v --basetemp=/mnt/var/tmp/bib-tests ${{ matrix.test_file }} + - name: Diskspace (after) + if: ${{ always() }} + run: | + df -h + sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh diff --git a/Containerfile.bib b/Containerfile.bib new file mode 100644 index 00000000..bd155188 --- /dev/null +++ b/Containerfile.bib @@ -0,0 +1,46 @@ +FROM registry.fedoraproject.org/fedora:43 AS builder +RUN dnf install -y git-core golang gpgme-devel libassuan-devel libvirt-devel && mkdir -p /build/bib +COPY go.mod go.sum /build/bib/ +ARG GOPROXY=https://proxy.golang.org,direct +RUN go env -w GOPROXY=$GOPROXY +RUN cd /build/bib && go mod download +# Copy the entire dir to avoid having to conditionally include ".git" as that +# will not be available when tests are run under tmt +COPY . /build +WORKDIR /build +# keep in sync with: +# https://github.com/containers/podman/blob/2981262215f563461d449b9841741339f4d9a894/Makefile#L51 +# disable cgo as +# a) gcc crashes on fedora41/arm64 regularly +# b) we don't really need it +RUN --mount=type=cache,target=/root/.cache/go-build \ + go build -tags "containers_image_openpgp exclude_graphdriver_btrfs exclude_graphdriver_devicemapper" ./cmd/image-builder +RUN --mount=type=cache,target=/root/.cache/go-build \ + for arch in amd64 arm64; do \ + [ "$arch" = "$(go env GOARCH)" ] && continue; \ + GOARCH="$arch" go build -ldflags="-s -w" -o ../bin/bib-canary-"$arch" ./cmd/cross-arch/; \ + done + + +FROM registry.fedoraproject.org/fedora:43 +# Fast-track osbuild so we don't depend on the "slow" Fedora release process to implement new features in bib +RUN dnf install -y dnf-plugins-core \ + && dnf copr enable -y @osbuild/osbuild \ + && dnf install -y libxcrypt-compat wget osbuild osbuild-ostree osbuild-depsolve-dnf osbuild-lvm2 openssl subscription-manager libvirt-libs \ + && dnf clean all + +# copy as bootc-image-builder +COPY --from=builder /build/image-builder /usr/bin/bootc-image-builder + +ENTRYPOINT ["/usr/bin/bootc-image-builder"] +VOLUME /output +WORKDIR /output +VOLUME /store +VOLUME /rpmmd +VOLUME /var/lib/containers/storage + +LABEL description="This tools allows to build and deploy disk-images from bootc container inputs." +LABEL io.k8s.description="This tools allows to build and deploy disk-images from bootc container inputs." +LABEL io.k8s.display-name="Bootc Image Builder" +LABEL io.openshift.tags="base fedora42" +LABEL summary="A container to create disk-images from bootc container inputs" diff --git a/cmd/cross-arch/canary.go b/cmd/cross-arch/canary.go new file mode 100644 index 00000000..07a48394 --- /dev/null +++ b/cmd/cross-arch/canary.go @@ -0,0 +1,5 @@ +package main + +func main() { + println("ok") +} diff --git a/test/bib/README.md b/test/bib/README.md new file mode 100644 index 00000000..bd501281 --- /dev/null +++ b/test/bib/README.md @@ -0,0 +1,21 @@ +Integration tests for bootc-image-builder +---------------------------------------------- + +This directory contains integration tests for bootc-image-builder. + +They can be run in two ways +1. On the local machine: + By just running `sudo pytest -s -v` in the _top level folder_ of the project (where `Containerfile` is) + If you have set up `pip` only for your user, you might just want to run the test with elevated privileges + `sudo -E $(which pytest) -s -v` +2. Via `tmt` [0] which will spin up a clean VM and run the tests inside: + + tmt run -vvv + +[0] https://github.com/teemtee/tmt + +To install `tmt` on fedora at least those packages are needed: + +```shell +sudo dnf install tmt tmt+provision-virtual +``` diff --git a/test/bib/conftest.py b/test/bib/conftest.py new file mode 100644 index 00000000..acdfb393 --- /dev/null +++ b/test/bib/conftest.py @@ -0,0 +1,29 @@ +import pytest + +# pylint: disable=wrong-import-order +from testcases import TestCase +from vmtest.util import get_free_port + + +def pytest_addoption(parser): + parser.addoption("--force-aws-upload", action="store_true", default=False, + help=("Force AWS upload when building AMI, failing if credentials are not set. " + "If not set, the upload will be performed only when credentials are available.")) + + +@pytest.fixture(name="force_aws_upload", scope="session") +def force_aws_upload_fixture(request): + return request.config.getoption("--force-aws-upload") + + +# see https://hackebrot.github.io/pytest-tricks/param_id_func/ and +# https://docs.pytest.org/en/7.1.x/reference/reference.html#pytest.hookspec.pytest_make_parametrize_id +def pytest_make_parametrize_id(config, val): # pylint: disable=W0613 + if isinstance(val, TestCase): + return f"{val}" + return None + + +@pytest.fixture(name="free_port") +def free_port_fixture(): + return get_free_port() diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py new file mode 100644 index 00000000..02d5d1d2 --- /dev/null +++ b/test/bib/containerbuild.py @@ -0,0 +1,148 @@ +import os +import platform +import random +import string +import subprocess +import textwrap +from contextlib import contextmanager + +import pytest + + +@contextmanager +def make_container(container_path, arch=None): + # BIB only supports container tags, not hashes + container_tag = "bib-test-" + "".join(random.choices(string.digits, k=12)) + + if not arch: + # Always provide an architecture here because without that the default + # behavior is to pull whatever arch was pulled for this image ref + # last but we want "native" if nothing else is specified. + # + # Note: podman seems to translate kernel arch to go arches + # automatically it seems. + arch = platform.uname().machine + + subprocess.check_call([ + "podman", "build", + "--cache-ttl=1h", + "-t", container_tag, + "--arch", arch, + container_path], encoding="utf8") + yield container_tag + subprocess.check_call(["podman", "rmi", container_tag]) + + +@pytest.fixture(name="build_container", scope="session") +def build_container_fixture(): + """Build a container from the Containerfile and returns the name""" + if tag_from_env := os.getenv("BIB_TEST_BUILD_CONTAINER_TAG"): + return tag_from_env + + container_tag = "bootc-image-builder-test" + subprocess.check_call([ + "podman", "build", + "--cache-ttl=1h", + "-f", "Containerfile.bib", + "-t", container_tag, + ]) + return container_tag + + +@pytest.fixture(name="build_fake_container", scope="session") +def build_fake_container_fixture(tmpdir_factory, build_container): + """Build a container with a fake osbuild and returns the name""" + tmp_path = tmpdir_factory.mktemp("build-fake-container") + + # see https://github.com/osbuild/osbuild/blob/main/osbuild/testutil/__init__.py#L91 + tracing_podman_path = tmp_path / "tracing-podman" + tracing_podman_path.write_text(textwrap.dedent("""\ + #!/bin/sh -e + + TRACE_PATH=/output/"$(basename $0)".log + for arg in "$@"; do + echo "$arg" >> "$TRACE_PATH" + done + # extra separator to differenciate between calls + echo >> "$TRACE_PATH" + exec "$0".real "$@" + """), encoding="utf8") + + fake_osbuild_path = tmp_path / "fake-osbuild" + fake_osbuild_path.write_text(textwrap.dedent("""\ + #!/bin/bash -e + + # injest generated manifest from the images library, if we do not + # do this images may fail with "broken" pipe errors + cat - >/dev/null + + mkdir -p /output/qcow2 + echo "fake-disk.qcow2" > /output/qcow2/disk.qcow2 + + """), encoding="utf8") + + cntf_path = tmp_path / "Containerfile" + + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {build_container} + COPY fake-osbuild /usr/bin/osbuild + RUN chmod 755 /usr/bin/osbuild + COPY --from={build_container} /usr/bin/podman /usr/bin/podman.real + COPY tracing-podman /usr/bin/podman + RUN chmod 755 /usr/bin/podman + """), encoding="utf8") + + container_tag = "bootc-image-builder-test-faked-osbuild" + subprocess.check_call([ + "podman", "build", + "-t", container_tag, + tmp_path, + ]) + return container_tag + + +@pytest.fixture(name="build_erroring_container", scope="session") +def build_erroring_container_fixture(tmpdir_factory, build_container): + """Build a container with a erroring osbuild and returns the name""" + tmp_path = tmpdir_factory.mktemp("build-fake-container") + + # this ensures there are messages from osbuild itself that + # we can reliably test for + wrapping_osbuild_path = tmp_path / "wrapping-osbuild" + wrapping_osbuild_path.write_text(textwrap.dedent("""\ + #!/bin/sh -e + echo "output-from-osbuild-stdout" + >&2 echo "output-from-osbuild-stderr" + + exec /usr/bin/osbuild.real "$@" + """), encoding="utf8") + + # this ensures we have a failing stage and failure messages + bad_stage_path = tmp_path / "bad-stage" + bad_stage_path.write_text(textwrap.dedent("""\ + #!/bin/sh -e + echo osbuild-stage-stdout-output + >&2 echo osbuild-stage-stderr-output + exit 112 + """), encoding="utf8") + + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {build_container} + # ensure there is osbuild output + COPY --from={build_container} /usr/bin/osbuild /usr/bin/osbuild.real + COPY wrapping-osbuild /usr/bin/osbuild + RUN chmod 755 /usr/bin/osbuild + + # we break org.osbuild.selinux as runs early and is used everywhere + COPY bad-stage /usr/lib/osbuild/stages/org.osbuild.selinux + RUN chmod +x /usr/lib/osbuild/stages/org.osbuild.selinux + """), encoding="utf8") + + container_tag = "bootc-image-builder-test--osbuild" + subprocess.check_call([ + "podman", "build", + "-t", container_tag, + tmp_path, + ]) + return container_tag diff --git a/test/bib/requirements.txt b/test/bib/requirements.txt new file mode 100644 index 00000000..5a58554d --- /dev/null +++ b/test/bib/requirements.txt @@ -0,0 +1,6 @@ +pytest==7.4.3 +flake8==6.1.0 +boto3==1.33.13 +qmp==1.1.0 +pylint==3.2.5 +vmtest @ git+https://github.com/osbuild/images.git diff --git a/test/bib/test_build_cross.py b/test/bib/test_build_cross.py new file mode 100644 index 00000000..12b89eeb --- /dev/null +++ b/test/bib/test_build_cross.py @@ -0,0 +1,23 @@ +import platform + +import pytest + +from testcases import gen_testcases + +from test_build_disk import ( # pylint: disable=unused-import + assert_disk_image_boots, + build_container_fixture, + gpg_conf_fixture, + image_type_fixture, + registry_conf_fixture, + shared_tmpdir_fixture, +) + + +# This testcase is not part of "test_build_disk.py:test_image_boots" +# because it takes ~30min on the GH runners so moving it into a +# separate file ensures it is run in parallel on GH. +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") +@pytest.mark.parametrize("image_type", gen_testcases("qemu-cross"), indirect=["image_type"]) +def test_image_boots_cross(image_type): + assert_disk_image_boots(image_type) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py new file mode 100644 index 00000000..7672bd80 --- /dev/null +++ b/test/bib/test_build_disk.py @@ -0,0 +1,730 @@ +import json +import os +import pathlib +import platform +import random +import re +import shutil +import string +import subprocess +import tempfile +import uuid +from contextlib import contextmanager, ExitStack +from typing import NamedTuple +from dataclasses import dataclass + +import pytest +# local test utils +import testutil +from containerbuild import build_container_fixture # pylint: disable=unused-import +from testcases import CLOUD_BOOT_IMAGE_TYPES, DISK_IMAGE_TYPES, gen_testcases +import vmtest.util +from vmtest.vm import AWS_REGION, AWS, QEMU + +if not testutil.has_executable("podman"): + pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) + +if not testutil.can_start_rootful_containers(): + pytest.skip("tests require to be able to run rootful containers (try: sudo)", allow_module_level=True) + +# building an ELN image needs x86_64-v3 to work, we use avx2 as a proxy +# to detect if we have x86-64-v3 (not perfect but should be good enough) +if platform.system() == "Linux" and platform.machine() == "x86_64" and not testutil.has_x86_64_v3_cpu(): + pytest.skip("need x86_64-v3 capable CPU", allow_module_level=True) + + +class ImageBuildResult(NamedTuple): + img_type: str + img_path: str + img_arch: str + container_ref: str + build_container_ref: str + rootfs: str + disk_config: str + username: str + password: str + ssh_keyfile_private_path: str + kargs: str + bib_output: str + journal_output: str + metadata: dict = {} + + +@dataclass +class GPGConf: + email: str + key_length: str + home_dir: str + pub_key_file: str + key_params: str + + +@dataclass +class RegistryConf: + local_registry: str + sigstore_dir: str + registries_d_dir: str + policy_file: str + lookaside_conf_file: str + lookaside_conf: str + + +@pytest.fixture(name="shared_tmpdir", scope='session') +def shared_tmpdir_fixture(tmpdir_factory): + tmp_path = pathlib.Path(tmpdir_factory.mktemp("shared")) + yield tmp_path + + +@pytest.fixture(name="gpg_conf", scope='session') +def gpg_conf_fixture(shared_tmpdir): + key_params_tmpl = """ + %no-protection + Key-Type: RSA + Key-Length: {key_length} + Key-Usage: sign + Name-Real: Bootc Image Builder Tests + Name-Email: {email} + Expire-Date: 0 + """ + email = "bib-tests@redhat.com" + key_length = "3072" + home_dir = f"{shared_tmpdir}/.gnupg" + pub_key_file = f"{shared_tmpdir}/GPG-KEY-bib-tests" + key_params = key_params_tmpl.format(key_length=key_length, email=email) + + os.makedirs(home_dir, mode=0o700, exist_ok=False) + subprocess.run( + ["gpg", "--gen-key", "--batch"], + check=True, env={"GNUPGHOME": home_dir}, + input=key_params, + text=True) + subprocess.run( + ["gpg", "--output", pub_key_file, + "--armor", "--export", email], + check=True, env={"GNUPGHOME": home_dir}) + + yield GPGConf(email=email, home_dir=home_dir, + key_length=key_length, pub_key_file=pub_key_file, key_params=key_params) + + +@pytest.fixture(name="registry_conf", scope='session') +def registry_conf_fixture(shared_tmpdir, request): + lookaside_conf_tmpl = """ + docker: + {local_registry}: + lookaside: file:///{sigstore_dir} + """ + registry_port = vmtest.util.get_free_port() + # We cannot use localhost as we need to access the registry from both + # the host system and the bootc-image-builder container. + default_ip = testutil.get_ip_from_default_route() + local_registry = f"{default_ip}:{registry_port}" + sigstore_dir = f"{shared_tmpdir}/sigstore" + registries_d_dir = f"{shared_tmpdir}/registries.d" + policy_file = f"{shared_tmpdir}/policy.json" + lookaside_conf_file = f"{registries_d_dir}/lookaside.yaml" + lookaside_conf = lookaside_conf_tmpl.format( + local_registry=local_registry, + sigstore_dir=sigstore_dir + ) + os.makedirs(registries_d_dir, mode=0o700, exist_ok=True) + os.makedirs(sigstore_dir, mode=0o700, exist_ok=True) + + registry_container_name = f"registry_{registry_port}" + + registry_container_running = subprocess.run([ + "podman", "ps", "-a", "--filter", f"name={registry_container_name}", "--format", "{{.Names}}" + ], check=True, capture_output=True, text=True).stdout.strip() + if registry_container_running != registry_container_name: + subprocess.run([ + "podman", "run", "-d", + "-p", f"{registry_port}:5000", + "--restart", "always", + "--name", registry_container_name, + # We use a copy of docker.io registry to avoid running into docker.io pull rate limits + "ghcr.io/osbuild/bootc-image-builder/registry:2" + ], check=True) + + registry_container_state = subprocess.run([ + "podman", "ps", "-a", "--filter", f"name={registry_container_name}", "--format", "{{.State}}" + ], check=True, capture_output=True, text=True).stdout.strip() + + if registry_container_state in ("paused", "exited"): + subprocess.run([ + "podman", "start", registry_container_name + ], check=True) + + def remove_registry(): + subprocess.run([ + "podman", "rm", "--force", registry_container_name + ], check=True) + + request.addfinalizer(remove_registry) + yield RegistryConf( + local_registry=local_registry, + sigstore_dir=sigstore_dir, + registries_d_dir=registries_d_dir, + policy_file=policy_file, + lookaside_conf=lookaside_conf, + lookaside_conf_file=lookaside_conf_file, + ) + + +def get_signed_container_ref(local_registry: str, container_ref: str): + container_ref_path = container_ref[container_ref.index('/'):] + return f"{local_registry}{container_ref_path}" + + +def sign_container_image(gpg_conf: GPGConf, registry_conf: RegistryConf, container_ref): + registry_policy = { + "default": [{"type": "insecureAcceptAnything"}], + "transports": { + "docker": { + f"{registry_conf.local_registry}": [ + { + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": f"{gpg_conf.pub_key_file}" + } + ] + }, + "docker-daemon": { + "": [{"type": "insecureAcceptAnything"}] + } + } + } + with open(registry_conf.policy_file, mode="w", encoding="utf-8") as f: + f.write(json.dumps(registry_policy)) + + with open(registry_conf.lookaside_conf_file, mode="w", encoding="utf-8") as f: + f.write(registry_conf.lookaside_conf) + + signed_container_ref = get_signed_container_ref(registry_conf.local_registry, container_ref) + cmd = [ + "skopeo", "--registries.d", registry_conf.registries_d_dir, + "copy", "--dest-tls-verify=false", "--remove-signatures", + "--sign-by", gpg_conf.email, + f"docker://{container_ref}", + f"docker://{signed_container_ref}", + ] + subprocess.run(cmd, check=True, env={"GNUPGHOME": gpg_conf.home_dir}) + + +@pytest.fixture(name="image_type", scope="session") +# pylint: disable=too-many-arguments +def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload, gpg_conf, registry_conf): + """ + Build an image inside the passed build_container and return an + ImageBuildResult with the resulting image path and user/password + In the case an image is being built from a local container, the + function will build the required local container for the test. + """ + testutil.pull_container(request.param.container_ref, request.param.target_arch) + + with build_images(shared_tmpdir, build_container, + request, force_aws_upload, gpg_conf, registry_conf) as build_results: + yield build_results[0] + + +@pytest.fixture(name="images", scope="session") +# pylint: disable=too-many-arguments +def images_fixture(shared_tmpdir, build_container, request, force_aws_upload, gpg_conf, registry_conf): + """ + Build one or more images inside the passed build_container and return an + ImageBuildResult array with the resulting image path and user/password + """ + testutil.pull_container(request.param.container_ref, request.param.target_arch) + with build_images(shared_tmpdir, build_container, + request, force_aws_upload, gpg_conf, registry_conf) as build_results: + yield build_results + + +# XXX: refactor +# pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-arguments +@contextmanager +def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_conf, registry_conf): + """ + Build all available image types if necessary and return the results for + the image types that were requested via :request:. + + Will return cached results of previous build requests. + + :request.param: has the form "container_url,img_type1+img_type2,arch,local" + """ + # the testcases.TestCase comes from the request.parameter + tc = request.param + + # images might be multiple --type args + # split and check each one + image_types = request.param.image.split("+") + + username = "test" + # use 18 char random password + password = "".join( + random.choices(string.ascii_uppercase + string.digits, k=18)) + kargs = "systemd.journald.forward_to_console=1" + + container_ref = tc.container_ref + + if tc.sign: + container_ref = get_signed_container_ref(registry_conf.local_registry, tc.container_ref) + + # params can be long and the qmp socket (that has a limit of 100ish + # AF_UNIX) is derived from the path + # hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to + # different image type combinations + output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.disk_config) + str(tc.target_arch))), "x") + output_path.mkdir(exist_ok=True) + + # make sure that the test store exists, because podman refuses to start if the source directory for a volume + # doesn't exist + pathlib.Path("/var/tmp/osbuild-test-store").mkdir(exist_ok=True, parents=True) + + journal_log_path = output_path / "journal.log" + bib_output_path = output_path / "bib-output.log" + + ssh_keyfile_private_path = output_path / "ssh-keyfile" + ssh_keyfile_public_path = ssh_keyfile_private_path.with_suffix(".pub") + + artifact = { + "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", + "ami": pathlib.Path(output_path) / "image/disk.raw", + "raw": pathlib.Path(output_path) / "image/disk.raw", + "vmdk": pathlib.Path(output_path) / "vmdk/disk.vmdk", + "vhd": pathlib.Path(output_path) / "vpc/disk.vhd", + "gce": pathlib.Path(output_path) / "gce/image.tar.gz", + "anaconda-iso": pathlib.Path(output_path) / "bootiso/install.iso", + } + assert len(artifact) == len(set(tc.image for tc in gen_testcases("all"))), \ + "please keep artifact mapping and supported images in sync" + + # this helper checks the cache + results = [] + for image_type in image_types: + # TODO: properly cache amis here. The issue right now is that + # ami and raw are the same image on disk which means that if a test + # like "boots_in_aws" requests an ami it will get the raw file on + # disk. However that is not sufficient because part of the ami test + # is the upload to AWS and the generated metadata. The fix could be + # to make the boot-in-aws a new image type like "ami-aws" where we + # cache the metadata instead of the disk image. Alternatively we + # could stop testing ami locally at all and just skip any ami tests + # if there are no AWS credentials. + if image_type in CLOUD_BOOT_IMAGE_TYPES: + continue + generated_img = artifact[image_type] + print(f"Checking for cached image {image_type} -> {generated_img}") + if generated_img.exists(): + print(f"NOTE: reusing cached image {generated_img}") + journal_output = journal_log_path.read_text(encoding="utf8") + bib_output = bib_output_path.read_text(encoding="utf8") + results.append(ImageBuildResult( + image_type, generated_img, tc.target_arch, + container_ref, tc.build_container_ref, tc.rootfs, tc.disk_config, + username, password, + ssh_keyfile_private_path, kargs, bib_output, journal_output)) + + # generate new keyfile + if not ssh_keyfile_private_path.exists(): + subprocess.run([ + "ssh-keygen", + "-N", "", + # be very conservative with keys for paramiko + "-b", "2048", + "-t", "rsa", + "-f", os.fspath(ssh_keyfile_private_path), + ], check=True) + ssh_pubkey = ssh_keyfile_public_path.read_text(encoding="utf8").strip() + + # Because we always build all image types, regardless of what was requested, we should either have 0 results or all + # should be available, so if we found at least one result but not all of them, this is a problem with our setup + assert not results or len(results) == len(image_types), \ + f"unexpected number of results found: requested {image_types} but got {results}" + + if results: + yield results + return + + print(f"Requested {len(image_types)} images but found {len(results)} cached images. Building...") + + # not all requested image types are available - build them + cfg = { + "customizations": { + "user": [ + { + "name": "root", + "key": ssh_pubkey, + # cannot use default /root as is on a read-only place + "home": "/var/roothome", + }, { + "name": username, + "password": password, + "groups": ["wheel"], + }, + ], + "kernel": { + "append": kargs, + }, + "files": [ + { + "path": "/etc/some-file", + "data": "some-data", + }, + ], + "directories": [ + { + "path": "/etc/some-dir", + }, + ], + }, + } + testutil.maybe_create_filesystem_customizations(cfg, tc) + testutil.maybe_create_disk_customizations(cfg, tc) + # if we build an iso we cannot have the "home" customization for + # user root or images will panic(), c.f. + # https://github.com/osbuild/images/pull/1806 + if not image_types[0] in DISK_IMAGE_TYPES: + del cfg["customizations"]["user"][0]["home"] + + config_json_path = output_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + # mask pw + for user in cfg["customizations"]["user"]: + user["password"] = "***" + print(f"config for {output_path} {tc=}: {cfg=}") + + cursor = testutil.journal_cursor() + + upload_args = [] + creds_args = [] + target_arch_args = [] + build_container_args = [] + if tc.target_arch: + target_arch_args = ["--target-arch", tc.target_arch] + + with tempfile.TemporaryDirectory() as tempdir: + if "ami" in image_types: + creds_file = pathlib.Path(tempdir) / "aws.creds" + if testutil.write_aws_creds(creds_file): + creds_args = ["-v", f"{creds_file}:/root/.aws/credentials:ro", + "--env", "AWS_PROFILE=default"] + + upload_args = [ + f"--aws-ami-name=bootc-image-builder-test-{str(uuid.uuid4())}", + f"--aws-region={AWS_REGION}", + "--aws-bucket=bootc-image-builder-ci", + ] + elif force_aws_upload: + # upload forced but credentials aren't set + raise RuntimeError("AWS credentials not available (upload forced)") + + # all disk-image types can be generated via a single build + if image_types[0] in DISK_IMAGE_TYPES: + types_arg = [f"--type={it}" for it in DISK_IMAGE_TYPES] + else: + # building an iso + types_arg = [f"--type={image_types[0]}"] + + # run container to deploy an image into a bootable disk and upload to a cloud service if applicable + cmd = [ + *testutil.podman_run_common, + "-v", f"{config_json_path}:/config.json:ro", + "-v", f"{output_path}:/output", + "-v", "/var/tmp/osbuild-test-store:/store", # share the cache between builds + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", # mount the host's containers storage + ] + if tc.target_arch: + # help debug cross-arch issues by making qemu-user print + cmd.extend( + ["--env", "OSBUILD_EXPERIMENTAL=debug-qemu-user"]) + + if tc.podman_terminal: + cmd.append("-t") + + if tc.sign: + sign_container_image(gpg_conf, registry_conf, tc.container_ref) + signed_image_args = [ + "-v", f"{registry_conf.policy_file}:/etc/containers/policy.json", + "-v", f"{registry_conf.lookaside_conf_file}:/etc/containers/registries.d/bib-lookaside.yaml", + "-v", f"{registry_conf.sigstore_dir}:{registry_conf.sigstore_dir}", + "-v", f"{gpg_conf.pub_key_file}:{gpg_conf.pub_key_file}", + ] + cmd.extend(signed_image_args) + + # Pull the signed image + testutil.pull_container(container_ref, tls_verify=False) + + if tc.build_container_ref: + build_container_args = [ + "--build-container", tc.build_container_ref, + ] + + cmd.extend([ + *creds_args, + build_container, + container_ref, + *build_container_args, + *types_arg, + *upload_args, + *target_arch_args, + *tc.bib_rootfs_args(), + f"--use-librepo={tc.use_librepo}", + *tc.bib_rootfs_args() + ]) + + # print the build command for easier tracing + print(" ".join(cmd)) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + # not using subprocss.check_output() to ensure we get live output + # during the text + bib_output = "" + while True: + line = p.stdout.readline() + if not line: + break + print(line, end="") + bib_output += line + rc = p.wait(timeout=10) + assert rc == 0, f"bootc-image-builder failed with return code {rc}" + + journal_output = testutil.journal_after_cursor(cursor) + metadata = {} + if "ami" in image_types and upload_args: + metadata["ami_id"] = parse_ami_id_from_log(journal_output) + + def del_ami(): + testutil.deregister_ami(metadata["ami_id"], AWS_REGION) + request.addfinalizer(del_ami) + + journal_log_path.write_text(journal_output, encoding="utf8") + bib_output_path.write_text(bib_output, encoding="utf8") + + results = [] + for image_type in image_types: + results.append(ImageBuildResult( + image_type, artifact[image_type], tc.target_arch, + container_ref, tc.build_container_ref, tc.rootfs, tc.disk_config, + username, password, + ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) + yield results + + # Try to cache as much as possible + for image_type in image_types: + img = artifact[image_type] + print(f"Checking disk usage for {img}") + if os.path.exists(img): + # might already be removed if we're deleting 'raw' and 'ami' + disk_usage = shutil.disk_usage(img) + print(f"NOTE: disk usage after {img}: {disk_usage.free / 1_000_000} / {disk_usage.total / 1_000_000}") + if disk_usage.free < 1_000_000_000: + print(f"WARNING: running low on disk space, removing {img}") + img.unlink() + else: + print("does not exist") + subprocess.run(["podman", "rmi", container_ref], check=False) + return + + +def test_container_builds(build_container): + output = subprocess.check_output([ + "podman", "images", "-n", build_container], encoding="utf-8") + assert build_container in output + + +@pytest.mark.parametrize("image_type", gen_testcases("multidisk"), indirect=["image_type"]) +def test_image_is_generated(image_type): + assert image_type.img_path.exists(), "output file missing, dir "\ + f"content: {os.listdir(os.fspath(image_type.img_path))}" + + +@pytest.mark.parametrize("image_type", gen_testcases("build-container"), indirect=["image_type"]) +def test_build_container_works(image_type): + assert image_type.img_path.exists(), "output file missing, dir "\ + f"content: {os.listdir(os.fspath(image_type.img_path))}" + + +def assert_kernel_args(test_vm, image_type): + ret = test_vm.run(["cat", "/proc/cmdline"], user=image_type.username, password=image_type.password) + kcmdline = ret.stdout + # the kernel arg string must have a space as the prefix and either a space + # as suffix or be the last element of the kernel commandline + assert re.search(f" {re.escape(image_type.kargs)}( |$)", kcmdline) + + +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") +@pytest.mark.parametrize("image_type", gen_testcases("qemu-boot"), indirect=["image_type"]) +def test_image_boots(image_type): + assert_disk_image_boots(image_type) + + +def assert_disk_image_boots(image_type): + with QEMU(image_type.img_path, arch=image_type.img_arch) as test_vm: + # user/password login works + test_vm.run("true", user=image_type.username, password=image_type.password) + # root/ssh login also works + ret = test_vm.run("id", user="root", keyfile=image_type.ssh_keyfile_private_path) + assert "uid=0" in ret.stdout + # check generic image options + assert_kernel_args(test_vm, image_type) + # ensure bootc points to the right image + ret = test_vm.run(["bootc", "status"], user="root", keyfile=image_type.ssh_keyfile_private_path) + # XXX: read the fully yaml instead? + assert f"image: {image_type.container_ref}" in ret.stdout + + if image_type.disk_config: + assert_disk_customizations(image_type, test_vm) + else: + assert_fs_customizations(image_type, test_vm) + + # check file/dir customizations + ret = test_vm.run(["stat", "/etc/some-file"], user=image_type.username, password=image_type.password) + assert "File: /etc/some-file" in ret.stdout + ret = test_vm.run(["stat", "/etc/some-dir"], user=image_type.username, password=image_type.password) + assert "File: /etc/some-dir" in ret.stdout + + +@pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) +def test_ami_boots_in_aws(image_type, force_aws_upload): + if not testutil.write_aws_creds("/dev/null"): # we don't care about the file, just the variables being there + if force_aws_upload: + # upload forced but credentials aren't set + raise RuntimeError("AWS credentials not available") + pytest.skip("AWS credentials not available (upload not forced)") + + # check that upload progress is in the output log. Uploads looks like: + # 4.30 GiB / 10.00 GiB [------------>____________] 43.02% 58.04 MiB p/s + assert "] 100.00%" in image_type.bib_output + with AWS(image_type.metadata["ami_id"]) as test_vm: + test_vm.run("true", user=image_type.username, password=image_type.password) + ret = test_vm.run(["echo", "hello"], user=image_type.username, password=image_type.password) + assert "hello" in ret.stdout + + +def log_has_osbuild_selinux_denials(log): + osbuid_selinux_denials_re = re.compile(r"(?ms)avc:\ +denied.*osbuild") + return re.search(osbuid_selinux_denials_re, log) + + +def parse_ami_id_from_log(log_output): + ami_id_re = re.compile(r"AMI registered: (?Pami-[a-z0-9]+)\n") + ami_ids = ami_id_re.findall(log_output) + assert len(ami_ids) > 0 + return ami_ids[0] + + +def test_osbuild_selinux_denials_re_works(): + fake_log = ( + 'Dec 05 07:19:39 other log msg\n' + 'Dec 05 07:19:39 fedora audit: SELINUX_ERR' + ' op=security_bounded_transition seresult=denied' + ' oldcontext=system_u:system_r:install_t:s0:c42,c355' + ' newcontext=system_u:system_r:mount_t:s0:c42,c355\n' + 'Dec 06 16:00:54 internal audit[14368]: AVC avc: denied ' + '{ nnp_transition nosuid_transition } for pid=14368 ' + 'comm="org.osbuild.ost" scontext=system_u:system_r:install_t:s0:' + 'c516,c631 tcontext=system_u:system_r:mount_t:s0:c516,c631 ' + 'tclass=process2 permissive=0' + ) + assert log_has_osbuild_selinux_denials(fake_log) + assert not log_has_osbuild_selinux_denials("some\nrandom\nlogs") + + +def has_selinux(): + return testutil.has_executable("selinuxenabled") and subprocess.run("selinuxenabled", check=False).returncode == 0 + + +@pytest.mark.skipif(not has_selinux(), reason="selinux not enabled") +@pytest.mark.parametrize("image_type", gen_testcases("qemu-boot"), indirect=["image_type"]) +def test_image_build_without_se_linux_denials(image_type): + pytest.skip("skip until https://github.com/osbuild/bootc-image-builder/issues/645 is resolved") + + # the journal always contains logs from the image building + assert image_type.journal_output != "" + assert not log_has_osbuild_selinux_denials(image_type.journal_output), \ + f"denials in log {image_type.journal_output}" + + +@pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") +@pytest.mark.skipif(not testutil.has_executable("unsquashfs"), reason="need unsquashfs") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_install_img_is_squashfs(tmp_path, image_type): + installer_iso_path = image_type.img_path + with ExitStack() as cm: + mount_point = tmp_path / "cdrom" + mount_point.mkdir() + subprocess.check_call(["mount", installer_iso_path, os.fspath(mount_point)]) + cm.callback(subprocess.check_call, ["umount", os.fspath(mount_point)]) + # ensure install.img is the "flat" squashfs, before PR#777 the content + # was an intermediate ext4 image "squashfs-root/LiveOS/rootfs.img" + output = subprocess.check_output(["unsquashfs", "-ls", mount_point / "images/install.img"], text=True) + assert "usr/bin/bootc" in output + + +@pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) +def test_multi_build_request(images): + artifacts = set() + expected = {"disk.qcow2", "disk.raw", "disk.vhd", "disk.vmdk", "image.tar.gz"} + for result in images: + filename = os.path.basename(result.img_path) + assert result.img_path.exists() + artifacts.add(filename) + assert artifacts == expected + + +def assert_fs_customizations(image_type, test_vm): + """ + Asserts that each mountpoint that appears in the build configuration also appears in mountpoint_sizes. + + TODO: assert that the size of each filesystem (or partition) also matches the expected size based on the + customization. + """ + # check the minsize specified in the build configuration for each mountpoint against the sizes in the image + # TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint + ret = test_vm.run(["df", "--all", "--output=target,size"], user="root", + keyfile=image_type.ssh_keyfile_private_path) + # parse the output of 'df' to a mountpoint -> size dict for convenience + mountpoint_sizes = {} + for line in ret.stdout.splitlines()[1:]: + fields = line.split() + # some filesystems to not report a size with --all + if fields[1] == "-": + continue + # Note that df output is in 1k blocks, not bytes + mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes + + cfg = { + "customizations": {}, + } + testutil.maybe_create_filesystem_customizations(cfg, image_type) + for fs in cfg["customizations"]["filesystem"]: + mountpoint = fs["mountpoint"] + if mountpoint == "/": + # / is actually /sysroot + mountpoint = "/sysroot" + assert mountpoint in mountpoint_sizes + + +def assert_disk_customizations(image_type, test_vm): + ret = test_vm.run(["findmnt", "--json"], user="root", + keyfile=image_type.ssh_keyfile_private_path) + findmnt = json.loads(ret.stdout) + swapon_ret = test_vm.run(["swapon", "--show"], user="root", + keyfile=image_type.ssh_keyfile_private_path) + swapon_output = swapon_ret.stdout + if dc := image_type.disk_config: + if dc == "lvm": + mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] + if mnt["target"] == "/sysroot"] + assert len(mnts) == 1 + assert "/dev/mapper/vg00-rootlv" == mnts[0]["source"] + # check swap too + assert "7G" in swapon_output + elif dc == "btrfs": + mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] + if mnt["target"] == "/sysroot"] + assert len(mnts) == 1 + assert "btrfs" == mnts[0]["fstype"] + # ensure sysroot comes from the "root" subvolume + assert mnts[0]["source"].endswith("[/root]") + elif dc == "swap": + assert "123M" in swapon_output diff --git a/test/bib/test_build_iso.py b/test/bib/test_build_iso.py new file mode 100644 index 00000000..8d93603a --- /dev/null +++ b/test/bib/test_build_iso.py @@ -0,0 +1,202 @@ +import os +import random +import json +import platform +import string +import subprocess +import textwrap +from contextlib import ExitStack + +import pytest +# local test utils +import testutil +from containerbuild import build_container_fixture, make_container # pylint: disable=unused-import +from testcases import gen_testcases +from test_build_disk import ( + assert_kernel_args, + ImageBuildResult, +) +from test_build_disk import ( # pylint: disable=unused-import + gpg_conf_fixture, + image_type_fixture, + registry_conf_fixture, + shared_tmpdir_fixture, +) +from vmtest.vm import QEMU + + +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_installs(image_type): + installer_iso_path = image_type.img_path + test_disk_path = installer_iso_path.with_name("test-disk.img") + with open(test_disk_path, "w", encoding="utf8") as fp: + fp.truncate(10_1000_1000_1000) + # install to test disk + with QEMU(test_disk_path, cdrom=installer_iso_path) as vm: + vm.start(wait_event="qmp:RESET", snapshot=False, use_ovmf=True) + vm.force_stop() + # boot test disk and do extremly simple check + with QEMU(test_disk_path) as vm: + vm.start(use_ovmf=True) + vm.run("true", user=image_type.username, password=image_type.password) + assert_kernel_args(vm, image_type) + + +def osinfo_for(it: ImageBuildResult, arch: str) -> str: + base = "Media is an installer for OS" + if it.container_ref.endswith("/centos-bootc/centos-bootc:stream9"): + return f"{base} 'CentOS Stream 9 ({arch})'\n" + if it.container_ref.endswith("/centos-bootc/centos-bootc:stream10"): + return f"Media is an installer for OS 'CentOS Stream 10 ({arch})'\n" + if "/fedora/fedora-bootc:" in it.container_ref: + ver = it.container_ref.rsplit(":", maxsplit=1)[1] + return f"{base} 'Fedora Server {ver} ({arch})'\n" + raise ValueError(f"unknown osinfo string for '{it.container_ref}'") + + +@pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_os_detection(image_type): + installer_iso_path = image_type.img_path + arch = image_type.img_arch + if not arch: + arch = platform.machine() + result = subprocess.run([ + "osinfo-detect", + installer_iso_path, + ], capture_output=True, text=True, check=True) + osinfo_output = result.stdout + expected_output = f"Media is bootable.\n{osinfo_for(image_type, arch)}" + assert osinfo_output == expected_output + + +@pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") +@pytest.mark.skipif(not testutil.has_executable("unsquashfs"), reason="need unsquashfs") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_install_img_is_squashfs(tmp_path, image_type): + installer_iso_path = image_type.img_path + with ExitStack() as cm: + mount_point = tmp_path / "cdrom" + mount_point.mkdir() + subprocess.check_call(["mount", installer_iso_path, os.fspath(mount_point)]) + cm.callback(subprocess.check_call, ["umount", os.fspath(mount_point)]) + # ensure install.img is the "flat" squashfs, before PR#777 the content + # was an intermediate ext4 image "squashfs-root/LiveOS/rootfs.img" + output = subprocess.check_output(["unsquashfs", "-ls", mount_point / "images/install.img"], text=True) + assert "usr/bin/bootc" in output + + +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") +@pytest.mark.parametrize("container_ref", [ + "quay.io/centos-bootc/centos-bootc:stream10", + "quay.io/fedora/fedora-bootc:42", + "quay.io/centos-bootc/centos-bootc:stream9", +]) +# pylint: disable=too-many-locals +def test_bootc_installer_iso_installs(tmp_path, build_container, container_ref): + # XXX: duplicated from test_build_disk.py + username = "test" + password = "".join( + random.choices(string.ascii_uppercase + string.digits, k=18)) + ssh_keyfile_private_path = tmp_path / "ssh-keyfile" + ssh_keyfile_public_path = ssh_keyfile_private_path.with_suffix(".pub") + if not ssh_keyfile_private_path.exists(): + subprocess.run([ + "ssh-keygen", + "-N", "", + # be very conservative with keys for paramiko + "-b", "2048", + "-t", "rsa", + "-f", os.fspath(ssh_keyfile_private_path), + ], check=True) + ssh_pubkey = ssh_keyfile_public_path.read_text(encoding="utf8").strip() + cfg = { + "customizations": { + "user": [ + { + "name": "root", + "key": ssh_pubkey, + # note that we have no "home" here for ISOs + }, { + "name": username, + "password": password, + "groups": ["wheel"], + }, + ], + "kernel": { + # XXX: we need https://github.com/osbuild/images/pull/1786 or no kargs are added to anaconda + # XXX2: drop a bunch of the debug flags + # + # Use console=ttyS0 so that we see output in our debug + # logs. by default anaconda prints to the last console= + # from the kernel commandline + "append": "systemd.debug-shell=1 rd.systemd.debug-shell=1 inst.debug console=ttyS0", + }, + }, + } + config_json_path = tmp_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + # create anaconda iso from base + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {container_ref} + RUN dnf install -y \ + anaconda-core \ + anaconda-dracut \ + anaconda-install-img-deps \ + biosdevname \ + grub2-efi-x64-cdboot \ + net-tools \ + prefixdevname \ + python3-mako \ + lorax-templates-* \ + squashfs-tools \ + && dnf clean all + # shim-x64 is marked installed but the files are not in the expected + # place for https://github.com/osbuild/osbuild/blob/v160/stages/org.osbuild.grub2.iso#L91, see + # workaround via reinstall, we could add a config to the grub2.iso + # stage to allow a different prefix that then would be used by + # anaconda. + # If https://github.com/osbuild/osbuild/pull/2204 would get merged we + # can update images/ to set the correct efi_src_dirs and this can + # be removed (but its rather ugly). + # See also https://bugzilla.redhat.com/show_bug.cgi?id=1750708 + RUN dnf reinstall -y shim-x64 + # lorax wants to create a symlink in /mnt which points to /var/mnt + # on bootc but /var/mnt does not exist on some images. + # + # If https://gitlab.com/fedora/bootc/base-images/-/merge_requests/294 + # gets merged this will be no longer needed + RUN mkdir /var/mnt + """), encoding="utf8") + output_path = tmp_path / "output" + output_path.mkdir() + with make_container(tmp_path) as container_tag: + cmd = [ + *testutil.podman_run_common, + "-v", f"{config_json_path}:/config.json:ro", + "-v", f"{output_path}:/output", + "-v", "/var/tmp/osbuild-test-store:/store", # share the cache between builds + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + build_container, + "--type", "bootc-installer", + "--rootfs", "ext4", + "--installer-payload-ref", container_ref, + f"localhost/{container_tag}", + ] + subprocess.check_call(cmd) + installer_iso_path = output_path / "bootiso" / "install.iso" + test_disk_path = installer_iso_path.with_name("test-disk.img") + with open(test_disk_path, "w", encoding="utf8") as fp: + fp.truncate(10_1000_1000_1000) + # install to test disk + with QEMU(test_disk_path, cdrom=installer_iso_path) as vm: + vm.start(wait_event="qmp:RESET", snapshot=False, use_ovmf=True) + vm.force_stop() + # boot test disk and do extremly simple check + with QEMU(test_disk_path) as vm: + vm.start(use_ovmf=True) + vm.run("true", user=username, password=password) + ret = vm.run(["bootc", "status"], user="root", keyfile=ssh_keyfile_private_path) + assert f"image: {container_ref}" in ret.stdout diff --git a/test/bib/test_flake8.py b/test/bib/test_flake8.py new file mode 100644 index 00000000..bfd79219 --- /dev/null +++ b/test/bib/test_flake8.py @@ -0,0 +1,11 @@ +import os +import pathlib +import subprocess + + +def test_flake8(): + p = pathlib.Path(__file__).parent + # TODO: use all static checks from osbuild instead + subprocess.check_call( + ["flake8", "--ignore=E402,F811,F401", "--max-line-length=120", + os.fspath(p)]) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py new file mode 100644 index 00000000..fee8acaa --- /dev/null +++ b/test/bib/test_manifest.py @@ -0,0 +1,1102 @@ +# pylint: disable=too-many-lines + +import base64 +import hashlib +import json +import pathlib +import platform +import subprocess +import textwrap + +import pytest + +import testutil +from containerbuild import build_container_fixture as _ +from containerbuild import make_container +from testcases import gen_testcases + +if not testutil.has_executable("podman"): + pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) +if not testutil.can_start_rootful_containers(): + pytest.skip("tests require to be able to run rootful containers (try: sudo)", allow_module_level=True) + + +def find_image_size_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "image": + for st in pipl["stages"]: + if st["type"] == "org.osbuild.truncate": + return st["options"]["size"] + raise ValueError(f"cannot find disk size in manifest:\n{manifest_str}") + + +@pytest.mark.parametrize("tc", gen_testcases("manifest")) +def test_manifest_smoke(build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + *tc.bib_rootfs_args(), + f"{tc.container_ref}", + ]) + manifest = json.loads(output) + # just some basic validation + assert manifest["version"] == "2" + assert manifest["pipelines"][0]["name"] == "build" + # default disk size is 10G + disk_size = find_image_size_from(output) + # default image size is 10G + assert int(disk_size) == 10 * 1024 * 1024 * 1024 + + +@pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) +def test_rpm_iso_manifest_smoke(build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + *tc.bib_rootfs_args(), + "--type=anaconda-iso", + f"{tc.container_ref}", + ]) + manifest = json.loads(output) + # just some basic validation + expected_pipeline_names = ["build", "anaconda-tree", "efiboot-tree", "bootiso-tree", "bootiso"] + assert manifest["version"] == "2" + assert [pipeline["name"] for pipeline in manifest["pipelines"]] == expected_pipeline_names + + +def test_bootc_iso_manifest_smoke(build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + # Note that this is not a realistic ref, a generic bootc + # image does not contain anaconda so this won't produce a + # working installer. For the purpose of the test to validate + # that we get a manifest with the right refs its good enough. + installer_payload_ref = "quay.io/centos-bootc/centos-bootc:stream10" + testutil.pull_container(container_ref) + testutil.pull_container(installer_payload_ref) + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + "--type=bootc-installer", + f"{container_ref}", + f"--installer-payload-ref={installer_payload_ref}", + ]) + manifest = json.loads(output) + # just some basic validation + expected_pipeline_names = ["build", "anaconda-tree", "efiboot-tree", "bootiso-tree", "bootiso"] + assert manifest["version"] == "2" + assert [pipeline["name"] for pipeline in manifest["pipelines"]] == expected_pipeline_names + + +@pytest.mark.parametrize("tc", gen_testcases("manifest")) +def test_manifest_disksize(tmp_path, build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + + # create derrived container with 6G silly file to ensure that + # bib doubles the size to 12G+ + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {tc.container_ref} + RUN truncate -s 2G /big-file1 + RUN truncate -s 2G /big-file2 + RUN truncate -s 2G /big-file3 + """), encoding="utf8") + + print(f"building big size container from {tc.container_ref}") + with make_container(tmp_path) as container_tag: + print(f"using {container_tag}") + manifest_str = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + *tc.bib_rootfs_args(), + f"localhost/{container_tag}", + ], encoding="utf8") + # ensure disk size is bigger than the default 10G + disk_size = find_image_size_from(manifest_str) + assert int(disk_size) > 11_000_000_000 + + +def test_manifest_local_checks_containers_storage_errors(build_container): + # note that the + # "-v /var/lib/containers/storage:/var/lib/containers/storage" + # is missing here + res = subprocess.run([ + # not using *testutil.podman_run_common to test bad usage + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + build_container, + "manifest", "arg-not-used", + ], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8") + assert res.returncode == 1 + err = 'could not access container storage, ' + \ + 'did you forget -v /var/lib/containers/storage:/var/lib/containers/storage?' + assert err in res.stderr + + +@pytest.mark.parametrize("tc", gen_testcases("manifest")) +def test_manifest_local_checks_containers_storage_works(tmp_path, build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {tc.container_ref} + """), encoding="utf8") + + with make_container(tmp_path) as container_tag: + subprocess.run([ + *testutil.podman_run_common, + build_container, + "manifest", + *tc.bib_rootfs_args(), + f"localhost/{container_tag}", + ], check=True, encoding="utf8") + + +@pytest.mark.skipif(platform.uname().machine != "x86_64", reason="cross build test only runs on x86") +def test_manifest_cross_arch_check(tmp_path, build_container): + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent("""\n + # build for x86_64 only + FROM quay.io/centos-bootc/centos-bootc:stream9 + """), encoding="utf8") + + with make_container(tmp_path, arch="x86_64") as container_tag: + with pytest.raises(subprocess.CalledProcessError) as exc: + subprocess.run([ + *testutil.podman_run_common, + build_container, + "manifest", "--target-arch=aarch64", + f"localhost/{container_tag}" + ], check=True, capture_output=True, encoding="utf8") + assert ('cannot generate manifest: requested bootc arch "aarch64" ' + 'does not match available arches [x86_64]') in exc.value.stderr + + +def find_rootfs_type_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "image": + for st in pipl["stages"]: + if st["type"].startswith("org.osbuild.mkfs."): + if st.get("options", {}).get("label") == "root": + return st["type"].rpartition(".")[2] + raise ValueError(f"cannot find rootfs type in manifest:\n{manifest_str}") + + +@pytest.mark.parametrize("tc", gen_testcases("default-rootfs")) +def test_manifest_rootfs_respected(build_container, tc): + # TODO: derive container and fake "bootc install print-configuration"? + testutil.pull_container(tc.container_ref) + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", f"{tc.container_ref}", + ]) + rootfs_type = find_rootfs_type_from(output) + match tc.container_ref: + case "quay.io/centos-bootc/centos-bootc:stream9": + assert rootfs_type == "xfs" + case _: + pytest.fail(f"unknown container_ref {tc.container_ref} please update test") + + +def test_manifest_rootfs_override(build_container): + # no need to parameterize this test, --rootfs behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", "--rootfs", "btrfs", f"{container_ref}", + ]) + rootfs_type = find_rootfs_type_from(output) + assert rootfs_type == "btrfs" + + +def find_user_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "image": + for st in pipl["stages"]: + if st["type"] == "org.osbuild.users": + return st + raise ValueError(f"cannot find users stage in manifest:\n{manifest_str}") + + +def test_manifest_user_customizations_toml(tmp_path, build_container): + # no need to parameterize this test, toml is the same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config_toml_path = tmp_path / "config.toml" + config_toml_path.write_text(textwrap.dedent("""\ + [[customizations.user]] + name = "alice" + password = "$5$xx$aabbccddeeffgghhiijj" # notsecret + key = "ssh-rsa AAA ... user@email.com" + groups = ["wheel"] + """)) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_toml_path}:/config.toml:ro", + build_container, + "manifest", f"{container_ref}", + ]) + user_stage = find_user_stage_from(output) + assert user_stage["options"]["users"].get("alice") == { + # use very fake password here, if it looks too real the + # infosec "leak detect" get very nervous + "password": "$5$xx$aabbccddeeffgghhiijj", # notsecret + "key": "ssh-rsa AAA ... user@email.com", + "groups": ["wheel"], + } + + +def test_manifest_installer_customizations(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config_toml_path = tmp_path / "config.toml" + config_toml_path.write_text(textwrap.dedent("""\ + [customizations.installer.kickstart] + contents = \"\"\" + autopart --type=lvm + \"\"\" + """)) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_toml_path}:/config.toml:ro", + build_container, + "manifest", "--type=anaconda-iso", f"{container_ref}", + ]) + manifest = json.loads(output) + + # expected values for the following inline file contents + ks_content = textwrap.dedent("""\ + %include /run/install/repo/osbuild-base.ks + autopart --type=lvm + """).encode("utf8") + expected_data = base64.b64encode(ks_content).decode() + expected_content_hash = hashlib.sha256(ks_content).hexdigest() + expected_content_id = f"sha256:{expected_content_hash}" # hash with algo prefix + + # check the inline source for the custom kickstart contents + assert expected_content_id in manifest["sources"]["org.osbuild.inline"]["items"] + assert manifest["sources"]["org.osbuild.inline"]["items"][expected_content_id]["data"] == expected_data + + +def test_mount_ostree_error(tmpdir_factory, build_container): + # no need to parameterize this test, toml is the same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + cfg = { + "blueprint": { + "customizations": { + "filesystem": [ + { + "mountpoint": "/", + "minsize": "12GiB" + }, + { + "mountpoint": "/var/log", + "minsize": "1GiB" + }, + { + "mountpoint": "/ostree", + "minsize": "10GiB" + }, + ] + }, + }, + } + + output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + output_path.mkdir(exist_ok=True) + config_json_path = output_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + with pytest.raises(subprocess.CalledProcessError) as exc: + subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{output_path}:/output", + build_container, + "manifest", f"{container_ref}", + "--config", "/output/config.json", + ], stderr=subprocess.PIPE, encoding="utf8") + assert 'the following errors occurred while validating custom mountpoints:\npath "/ostree" is not allowed' \ + in exc.value.stderr + + +@pytest.mark.parametrize( + "container_ref,should_error,expected_error", + [ + ("quay.io/centos/centos:stream9", True, "image quay.io/centos/centos:stream9 is not a bootc image"), + ("quay.io/centos-bootc/centos-bootc:stream9", False, None), + ], +) +def test_manifest_checks_build_container_is_bootc(build_container, container_ref, should_error, expected_error): + def check_image_ref(): + testutil.pull_container(container_ref) + subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + container_ref, + ], stderr=subprocess.PIPE, encoding="utf8") + if should_error: + with pytest.raises(subprocess.CalledProcessError) as exc: + check_image_ref() + assert expected_error in exc.value.stderr + else: + check_image_ref() + + +@pytest.mark.parametrize("tc", gen_testcases("target-arch-smoke")) +def test_manifest_target_arch_smoke(build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + + # TODO: actually build an image too + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + *tc.bib_rootfs_args(), + f"--target-arch={tc.target_arch}", + tc.container_ref, + ]) + manifest = json.loads(output) + # just minimal validation, we could in theory look at the partition + # table be beside this there is relatively little that is different + assert manifest["version"] == "2" + assert manifest["pipelines"][0]["name"] == "build" + + +def find_image_anaconda_stage(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "anaconda-tree": + for st in pipl["stages"]: + if st["type"] == "org.osbuild.anaconda": + return st + raise ValueError(f"cannot find disk size in manifest:\n{manifest_str}") + + +@pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) +def test_manifest_anaconda_module_customizations(tmpdir_factory, build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + + cfg = { + "customizations": { + "installer": { + "modules": { + "enable": [ + "org.fedoraproject.Anaconda.Modules.Localization", + # disable takes precedence + "org.fedoraproject.Anaconda.Modules.Timezone", + ], + "disable": [ + # defaults can be disabled as well + "org.fedoraproject.Anaconda.Modules.Users", + # disable takes precedence + "org.fedoraproject.Anaconda.Modules.Timezone", + ] + }, + }, + }, + } + output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + output_path.mkdir(exist_ok=True) + config_json_path = output_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{output_path}:/output", + build_container, + "manifest", + "--config", "/output/config.json", + *tc.bib_rootfs_args(), + "--type=anaconda-iso", tc.container_ref, + ]) + st = find_image_anaconda_stage(output) + assert "org.fedoraproject.Anaconda.Modules.Localization" in st["options"]["activatable-modules"] + assert "org.fedoraproject.Anaconda.Modules.Users" not in st["options"]["activatable-modules"] + assert "org.fedoraproject.Anaconda.Modules.Timezone" not in st["options"]["activatable-modules"] + + +def find_fs_mount_info_from(manifest_str): + manifest = json.loads(manifest_str) + mount_stages = [] + # normally there should be only one swap partition, but there's no technical reason you can't have multiple + swap_stages = [] + for pipeline in manifest["pipelines"]: + # the mount unit stages in cross-arch manifests are in the "ostree-deployment" pipeline + if pipeline["name"] in ("image", "ostree-deployment"): + for st in pipeline["stages"]: + if st["type"] == "org.osbuild.systemd.unit.create": + options = st["options"] + if options["filename"].endswith(".mount"): + mount_stages.append(st) + elif options["filename"].endswith(".swap"): + swap_stages.append(st) + + if not mount_stages: + raise ValueError(f"cannot find mount unit creation stages in manifest:\n{manifest_str}") + + mounts = [] + for stage in mount_stages: + options = stage["options"]["config"] + mounts.append(options["Mount"]) + + swaps = [] + for stage in swap_stages: + options = stage["options"]["config"] + swaps.append(options["Swap"]) + + return mounts, swaps + + +@pytest.mark.parametrize("fscustomizations,rootfs", [ + ({"/var/data": "2 GiB", "/var/stuff": "10 GiB"}, "xfs"), + ({"/var/data": "2 GiB", "/var/stuff": "10 GiB"}, "ext4"), + ({"/": "2 GiB", "/boot": "1 GiB"}, "ext4"), + ({"/": "2 GiB", "/boot": "1 GiB", "/var/data": "42 GiB"}, "ext4"), + ({"/": "2 GiB"}, "btrfs"), + ({}, "ext4"), + ({}, "xfs"), + ({}, "btrfs"), +]) +def test_manifest_fs_customizations(tmp_path, build_container, fscustomizations, rootfs): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config = { + "customizations": { + "filesystem": [{"mountpoint": mnt, "minsize": minsize} for mnt, minsize in fscustomizations.items()], + }, + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + "--entrypoint=/usr/bin/bootc-image-builder", + build_container, + f"--rootfs={rootfs}", + "manifest", f"{container_ref}", + ]) + assert_fs_customizations(fscustomizations, rootfs, output) + + +def test_manifest_fs_customizations_smoke_toml(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + rootfs = "xfs" + + expected_fs_customizations = { + "/": 10 * 1024 * 1024 * 1024, + "/var/data": 20 * 1024 * 1024 * 1024, + } + + config_toml_path = tmp_path / "config.toml" + config_toml_path.write_text(textwrap.dedent("""\ + [[customizations.filesystem]] + mountpoint = "/" + minsize = "10 GiB" + + [[customizations.filesystem]] + mountpoint = "/var/data" + minsize = "20 GiB" + """)) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_toml_path}:/config.toml:ro", + "--entrypoint=/usr/bin/bootc-image-builder", + build_container, + f"--rootfs={rootfs}", + "manifest", f"{container_ref}", + ]) + assert_fs_customizations(expected_fs_customizations, rootfs, output) + + +def assert_fs_customizations(customizations, fstype, manifest): + mounts, _ = find_fs_mount_info_from(manifest) + + manifest_mountpoints = set() + for mount in mounts: + manifest_mountpoints.add(mount["Where"]) + if mount["Where"] == "/boot/efi": + assert mount["Type"] == "vfat" + continue + + if fstype == "btrfs" and mount["Where"] == "/boot": + # /boot keeps its default fstype when using btrfs + assert mount["Type"] == "ext4" + continue + + assert mount["Type"] == fstype, f"incorrect filesystem type for {mount['Where']}" + + # check that all fs customizations appear in the manifest + for custom_mountpoint in customizations: + assert custom_mountpoint in manifest_mountpoints + + +@pytest.mark.skipif(platform.uname().machine != "x86_64", reason="cross arch test only runs on x86") +@pytest.mark.parametrize("fscustomizations,rootfs", [ + ({"/var/data": "2 GiB", "/var/stuff": "10 GiB"}, "xfs"), + ({"/var/data": "2 GiB", "/var/stuff": "10 GiB"}, "ext4"), + ({"/": "2 GiB", "/boot": "1 GiB"}, "ext4"), + ({"/": "2 GiB", "/boot": "1 GiB", "/var/data": "42 GiB"}, "ext4"), + ({"/": "2 GiB"}, "btrfs"), + ({}, "ext4"), + ({}, "xfs"), + ({}, "btrfs"), +]) +def test_manifest_fs_customizations_xarch(tmp_path, build_container, fscustomizations, rootfs): + target_arch = "aarch64" + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref, target_arch) + + config = { + "customizations": { + "filesystem": [{"mountpoint": mnt, "minsize": minsize} for mnt, minsize in fscustomizations.items()], + }, + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + "--entrypoint=/usr/bin/bootc-image-builder", + build_container, + f"--rootfs={rootfs}", + f"--target-arch={target_arch}", + "manifest", f"{container_ref}", + ]) + + assert_fs_customizations(fscustomizations, rootfs, output) + + +def find_grub2_iso_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + for st in pipl["stages"]: + if st["type"] == "org.osbuild.grub2.iso": + return st + raise ValueError(f"cannot find grub2.iso stage in manifest:\n{manifest_str}") + + +def test_manifest_fips_customization(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config = { + "customizations": { + "fips": True, + }, + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + # XXX: test for qcow2 too + "--type=anaconda-iso", + "manifest", f"{container_ref}", + ], text=True) + st = find_grub2_iso_stage_from(output) + assert "fips=1" in st["options"]["kernel"]["opts"] + + +def find_bootc_install_to_fs_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipeline in manifest["pipelines"]: + # the fstab stage in cross-arch manifests is in the "ostree-deployment" pipeline + if pipeline["name"] == "image": + for st in pipeline["stages"]: + if st["type"] == "org.osbuild.bootc.install-to-filesystem": + return st + raise ValueError(f"cannot find bootc.install-to-filesystem stage in manifest:\n{manifest_str}") + + +def test_manifest_disk_customization_lvm(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config = textwrap.dedent("""\ + [[customizations.disk.partitions]] + type = "lvm" + minsize = "10 GiB" + + [[customizations.disk.partitions.logical_volumes]] + minsize = "10 GiB" + fs_type = "ext4" + mountpoint = "/" + """) + config_path = tmp_path / "config.toml" + config_path.write_text(config) + + testutil.pull_container(container_ref) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.toml:ro", + build_container, + "manifest", f"{container_ref}", + ]) + st = find_bootc_install_to_fs_stage_from(output) + assert st["devices"]["rootlv"]["type"] == "org.osbuild.lvm2.lv" + + +def test_manifest_disk_customization_dos(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config = textwrap.dedent("""\ + [customizations.disk] + type = "dos" + """) + config_path = tmp_path / "config.toml" + config_path.write_text(config) + + testutil.pull_container(container_ref) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.toml:ro", + build_container, + "manifest", f"{container_ref}", + ]) + st = find_stage_options_from(output, "org.osbuild.sfdisk") + assert st["label"] == "dos" + + +def test_manifest_disk_customization_btrfs(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "disk": { + "partitions": [ + { + "type": "btrfs", + "minsize": "10 GiB", + "subvolumes": [ + { + "name": "root", + "mountpoint": "/", + } + ] + } + ] + } + } + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + + testutil.pull_container(container_ref) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + "manifest", f"{container_ref}", + ]) + st = find_bootc_install_to_fs_stage_from(output) + assert st["mounts"][0]["type"] == "org.osbuild.btrfs" + assert st["mounts"][0]["target"] == "/" + + +def find_mkswap_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipeline in manifest["pipelines"]: + if pipeline["name"] == "image": + for st in pipeline["stages"]: + if st["type"] == "org.osbuild.mkswap": + return st + raise ValueError(f"cannot find mkswap stage in manifest:\n{manifest_str}") + + +def test_manifest_disk_customization_swap(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "disk": { + "partitions": [ + { + "minsize": "2 GiB", + "fs_type": "swap", + } + ] + } + } + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + + testutil.pull_container(container_ref) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + "manifest", f"{container_ref}", + ]) + mkswap_stage = find_mkswap_stage_from(output) + assert mkswap_stage["options"].get("uuid") + swap_uuid = mkswap_stage["options"]["uuid"] + _, swaps = find_fs_mount_info_from(output) + what_node = f"/dev/disk/by-uuid/{swap_uuid}" + assert { + "What": what_node, + "Options": "defaults", + } in swaps + + +def test_manifest_disk_customization_lvm_swap(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "disk": { + "partitions": [ + { + "type": "lvm", + "minsize": "10 GiB", + "logical_volumes": [ + { + "minsize": "2 GiB", + "fs_type": "swap", + } + ] + } + ] + } + } + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + + testutil.pull_container(container_ref) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + "manifest", f"{container_ref}", + ]) + mkswap_stage = find_mkswap_stage_from(output) + assert mkswap_stage["options"].get("uuid") + swap_uuid = mkswap_stage["options"]["uuid"] + _, swaps = find_fs_mount_info_from(output) + what_node = f"/dev/disk/by-uuid/{swap_uuid}" + assert { + "What": what_node, + "Options": "defaults", + } in swaps + # run osbuild schema validation, see gh#748 + if not testutil.has_executable("osbuild"): + pytest.skip("no osbuild executable") + osbuild_manifest_path = tmp_path / "manifest.json" + osbuild_manifest_path.write_bytes(output) + subprocess.run(["osbuild", osbuild_manifest_path.as_posix()], check=True) + + +@pytest.mark.parametrize("use_librepo", [False, True]) +def test_iso_manifest_use_librepo(build_container, use_librepo): + # no need to parameterize this test, --use-librepo behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + "--type=anaconda-iso", + container_ref, + f"--use-librepo={use_librepo}", + ]) + manifest = json.loads(output) + if use_librepo: + assert "org.osbuild.librepo" in manifest["sources"] + else: + assert "org.osbuild.curl" in manifest["sources"] + + +def test_manifest_customization_custom_file_smoke(tmp_path, build_container): + # no need to parameterize this test, toml is the same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + cfg = { + "blueprint": { + "customizations": { + "files": [ + { + "path": "/etc/custom_file", + "data": "hello world" + }, + ], + "directories": [ + { + "path": "/etc/custom_dir", + }, + ], + }, + }, + } + + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + config_json_path = output_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{output_path}:/output", + build_container, + "manifest", f"{container_ref}", + "--config", "/output/config.json", + ], stderr=subprocess.PIPE, encoding="utf8") + json.loads(output) + assert '"to":"tree:///etc/custom_file"' in output + assert ('{"type":"org.osbuild.mkdir","options":{"paths":' + '[{"path":"/etc/custom_dir","exist_ok":true}]},' + '"devices":{"disk":{"type":"org.osbuild.loopback"' + ',"options":{"filename":"disk.raw"') in output + + +def find_stage_options_from(manifest_str, stage_type): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + for st in pipl["stages"]: + if st["type"] == stage_type: + return st["options"] + raise ValueError(f"cannot find {stage_type} stage manifest:\n{manifest_str}") + + +def test_manifest_image_customize_filesystem(tmp_path, build_container): + # no need to parameterize this test, overrides behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + cfg = { + "blueprint": { + "customizations": { + "filesystem": [ + { + "mountpoint": "/boot", + "minsize": "3GiB" + } + ] + }, + }, + } + + config_json_path = tmp_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + # create derrived container with filesystem customization + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {container_ref} + RUN mkdir -p -m 0755 /usr/lib/bootc-image-builder + COPY config.json /usr/lib/bootc-image-builder/ + """), encoding="utf8") + + print(f"building filesystem customize container from {container_ref}") + with make_container(tmp_path) as container_tag: + print(f"using {container_tag}") + manifest_str = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + f"localhost/{container_tag}", + ], encoding="utf8") + sfdisk_options = find_stage_options_from(manifest_str, "org.osbuild.sfdisk") + assert sfdisk_options["partitions"][2]["size"] == 3 * 1024 * 1024 * 1024 / 512 + + +def test_manifest_image_customize_disk(tmp_path, build_container): + # no need to parameterize this test, overrides behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + cfg = { + "blueprint": { + "customizations": { + "disk": { + "partitions": [ + { + "label": "var", + "mountpoint": "/var", + "fs_type": "ext4", + "minsize": "3 GiB", + }, + ], + }, + }, + }, + } + + config_json_path = tmp_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + # create derrived container with disk customization + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {container_ref} + RUN mkdir -p -m 0755 /usr/lib/bootc-image-builder + COPY config.json /usr/lib/bootc-image-builder/ + """), encoding="utf8") + + print(f"building filesystem customize container from {container_ref}") + with make_container(tmp_path) as container_tag: + print(f"using {container_tag}") + manifest_str = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + f"localhost/{container_tag}", + ], encoding="utf8") + sfdisk_options = find_stage_options_from(manifest_str, "org.osbuild.sfdisk") + assert sfdisk_options["partitions"][2]["size"] == 3 * 1024 * 1024 * 1024 / 512 + + +def test_manifest_image_disk_yaml(tmp_path, build_container): + # no need to parameterize this test, overrides behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + disk_yaml = textwrap.dedent("""--- + #enabled once https://github.com/osbuild/images/pull/1834 is in + #mount_configuration: none + partition_table: + size: '8589934592' + partitions: + - bootable: true + size: 1 MiB + type: 21686148-6449-6E6F-744E-656564454649 + uuid: fac7f1fb-3e8d-4137-a512-961de09a5549 + - bootable: false + label: efi + payload: + label: ESP + mountpoint: /boot/efi + type: vfat + payload_type: filesystem + size: '104857600' + type: c12a7328-f81f-11d2-ba4b-00a0c93ec93b + uuid: 68b2905b-df3e-4fb3-80fa-49d1e773aa33 + - label: ukiboot_a + size: '134217728' + type: df331e4d-be00-463f-b4a7-8b43e18fb53a + uuid: CD3B4BE3-0139-4A63-8060-658554C7273B + payload_type: raw + payload: + source_path: /usr/lib/modules/5.0-x86_64/aboot.img + - label: ukiboot_b + size: '134217728' + type: df331e4d-be00-463f-b4a7-8b43e18fb53a + uuid: E4D4DA50-7050-41AE-A5F9-DEF12B94DFB5 + - label: ukibootctl + size: '1048576' + type: fefd9070-346f-4c9a-85e6-17f07f922773 + uuid: 5A6F3ADE-EEB0-11EF-A838-E89C256C3906 + - label: root + payload: + label: root + mountpoint: / + type: ext4 + payload_type: filesystem + type: b921b045-1df0-41c3-af44-4c6f280d3fae + uuid: 6264d520-3fb9-423f-8ab8-7a0a8e3d3562 + """) + + disk_yaml_path = tmp_path / "disk.yaml" + disk_yaml_path.write_text(disk_yaml, encoding="utf-8") + + testdata_path = tmp_path / "fake-aboot.img" + testdata_path.write_text("fake aboot.img content", encoding="utf-8") + + # Create derived container with the custom partitioning with an aboot + # partition and a kernel module dir with an aboot.img file + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {container_ref} + RUN mkdir -p -m 0755 /usr/lib/bootc-image-builder + COPY disk.yaml /usr/lib/bootc-image-builder/ + # add a preditable aboot.img for the write-device tes + RUN mkdir -p -m 0755 /usr/lib/modules/5.0-x86_64/ + COPY fake-aboot.img /usr/lib/modules/5.0-x86_64/aboot.img + """), encoding="utf8") + + print(f"building filesystem customize container from {container_ref}") + with make_container(tmp_path) as container_tag: + print(f"using {container_tag}") + manifest_str = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + f"localhost/{container_tag}", + ], encoding="utf8") + write_device_options = find_stage_options_from(manifest_str, "org.osbuild.write-device") + assert write_device_options["from"] == "input://tree/usr/lib/modules/5.0-x86_64/aboot.img" + + +@pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) +def test_ova_manifest_smoke(build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + *tc.bib_rootfs_args(), + "--type=ova", + f"{tc.container_ref}", + ]) + # just some basic validation that we generate a ova + assert find_stage_options_from(output, "org.osbuild.tar") == { + "filename": "image.ova", + "format": "ustar", + "paths": [ + "image.ovf", + "image.mf", + "image.vmdk" + ] + } + + +def test_manifest_warns_on_unsupported(tmp_path, build_container): + # no need to parameterize this test, toml is the same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config_toml_path = tmp_path / "config.toml" + config_toml_path.write_text(textwrap.dedent("""\ + [[customizations.repositories]] + id = "foo" + """)) + res = subprocess.run([ + *testutil.podman_run_common, + "-v", f"{config_toml_path}:/config.toml:ro", + build_container, + "manifest", f"{container_ref}", + ], check=True, capture_output=True, text=True) + assert ('blueprint validation failed for image type "qcow2": ' + 'customizations.repositories: not supported' in res.stderr) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py new file mode 100644 index 00000000..c12a8a76 --- /dev/null +++ b/test/bib/test_opts.py @@ -0,0 +1,181 @@ +import os +import platform +import subprocess + +import pytest +import testutil +# pylint: disable=unused-import +from containerbuild import build_container_fixture, build_fake_container_fixture + + +@pytest.fixture(name="container_storage", scope="session") +def container_storage_fixture(tmp_path_factory): + # share systemwide storage when running as root, this makes the GH + # tests faster because they already have the test images used here + if os.getuid() == 0: + return "/var/lib/containers/storage" + return tmp_path_factory.mktemp("storage") + + +@pytest.mark.parametrize("chown_opt,expected_uid_gid", [ + ([], (0, 0)), + (["--chown", "1000:1000"], (1000, 1000)), + (["--chown", "1000"], (1000, 0)), +]) +def test_bib_chown_opts(tmp_path, container_storage, build_fake_container, chown_opt, expected_uid_gid): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + subprocess.check_call([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + container_ref, + ] + chown_opt) + expected_output_disk = output_path / "qcow2/disk.qcow2" + for p in output_path, expected_output_disk: + assert p.exists() + assert p.stat().st_uid == expected_uid_gid[0] + assert p.stat().st_gid == expected_uid_gid[1] + + +@pytest.mark.parametrize("target_arch_opt, expected_err", [ + ([], ""), + (["--target-arch=amd64"], ""), + (["--target-arch=x86_64"], ""), + (["--target-arch=arm64"], "cannot build iso for different target arches yet"), +]) +@pytest.mark.skipif(platform.uname().machine != "x86_64", reason="cross build test only runs on x86") +def test_opts_arch_is_same_arch_is_fine(tmp_path, build_fake_container, target_arch_opt, expected_err): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "--type=iso", + container_ref, + ] + target_arch_opt, check=False, capture_output=True, text=True) + if expected_err == "": + assert res.returncode == 0 + else: + assert res.returncode != 0 + assert expected_err in res.stderr + + +@pytest.mark.parametrize("with_debug", [False, True]) +def test_bib_log_level_smoke(tmp_path, container_storage, build_fake_container, with_debug): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + log_debug = ["--log-level", "debug"] if with_debug else [] + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + *log_debug, + container_ref, + ], check=True, capture_output=True, text=True) + assert ('level=debug' in res.stderr) == with_debug + + +def test_bib_help_hides_config(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "manifest", "--help", + ], check=True, capture_output=True, text=True) + # --config should not be user visible + assert '--config' not in res.stdout + # but other options should be + assert '--log-level' in res.stdout + + +def test_bib_errors_only_once(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "localhost/no-such-image", + ], check=False, capture_output=True, text=True) + needle = "cannot build manifest: failed to inspect the image:" + assert res.stderr.count(needle) == 1 + + +@pytest.mark.parametrize("version_argument", ["version", "--version"]) +def test_bib_version(tmp_path, container_storage, build_fake_container, version_argument): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + version_argument, + ], check=True, capture_output=True, text=True) + + expected_rev = "unknown" + git_res = subprocess.run( + ["git", "describe", "--always"], + capture_output=True, text=True, check=False) + if git_res.returncode == 0: + expected_rev = git_res.stdout.strip() + assert f"build_revision: {expected_rev}" in res.stdout + assert "build_time: " in res.stdout + assert "build_tainted: " in res.stdout + # we have a final newline + assert res.stdout[-1] == "\n" + + +def test_bib_no_outside_container_warning_in_container(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + container_ref, + ], check=True, capture_output=True, text=True) + assert "running outside a container" not in res.stderr diff --git a/test/bib/test_progress.py b/test/bib/test_progress.py new file mode 100644 index 00000000..678685d1 --- /dev/null +++ b/test/bib/test_progress.py @@ -0,0 +1,100 @@ +import subprocess + +import pytest + +import testutil +# pylint: disable=unused-import,duplicate-code +from test_opts import container_storage_fixture +from containerbuild import ( + build_container_fixture, + build_erroring_container_fixture, + build_fake_container_fixture, +) + + +def test_progress_debug(tmp_path, build_fake_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = [ + *testutil.podman_run_common, + build_fake_container, + "build", + "--progress=debug", + container_ref, + ] + res = subprocess.run(cmdline, capture_output=True, check=True, text=True) + assert res.stderr.count("Start progressbar") == 1 + assert res.stderr.count("Manifest generation step") == 1 + assert res.stderr.count("Disk image building step") == 1 + assert res.stderr.count("Build complete") == 1 + assert res.stderr.count("Stop progressbar") == 1 + assert res.stdout.strip() == "" + + +def test_progress_term_works_without_tty(tmp_path, build_fake_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = [ + *testutil.podman_run_common, + # note that "-t" is missing here + build_fake_container, + "build", + # explicitly selecting term progress works even when there is no tty + # (i.e. we just need ansi terminal support) + "--progress=term", + container_ref, + ] + res = subprocess.run(cmdline, capture_output=True, text=True, check=False) + assert res.returncode == 0 + assert "[|] Manifest generation step" in res.stderr + + +def test_progress_term_autoselect(tmp_path, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = [ + *testutil.podman_run_common, + # we have a terminal + "-t", + build_fake_container, + "build", + # note that we do not select a --progress here so auto-select is used + "quay.io/centos-bootc/centos-bootc:stream9", + ] + res = subprocess.run(cmdline, capture_output=True, text=True, check=False) + assert res.returncode == 0 + # its curious that we get the output on stdout here, podman weirdness? + assert "[|] Manifest generation step" in res.stdout + + +@pytest.mark.skipif(not testutil.can_start_rootful_containers, reason="require a rootful containers (try: sudo)") +@pytest.mark.parametrize("progress", ["term", "verbose"]) +def test_progress_error_reporting(tmp_path, build_erroring_container, progress): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = [ + *testutil.podman_run_common, + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + # we have a terminal + "-t", + build_erroring_container, + "build", + f"--progress={progress}", + "quay.io/centos-bootc/centos-bootc:stream9", + ] + res = subprocess.run(cmdline, capture_output=True, text=True, check=False) + assert "osbuild-stage-stdout-output" in res.stdout + assert "osbuild-stage-stderr-output" in res.stdout + assert "output-from-osbuild-stdout" in res.stdout + assert "output-from-osbuild-stderr" in res.stdout + assert res.returncode == 1 diff --git a/test/bib/test_pylint.py b/test/bib/test_pylint.py new file mode 100644 index 00000000..eb1c386b --- /dev/null +++ b/test/bib/test_pylint.py @@ -0,0 +1,18 @@ +import pathlib +import subprocess + + +def test_pylint(): + p = pathlib.Path(__file__).parent + subprocess.check_call( + ["pylint", + "--disable=fixme", + "--disable=missing-class-docstring", + "--disable=missing-module-docstring", + "--disable=missing-function-docstring", + "--disable=too-many-instance-attributes", + # false positive because of "if yield else yield" in + # the "build_container" fixture, see + # https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/contextmanager-generator-missing-cleanup.html + "--disable=contextmanager-generator-missing-cleanup", + "--max-line-length=120"] + list(p.glob("*.py"))) diff --git a/test/bib/testcases.py b/test/bib/testcases.py new file mode 100644 index 00000000..97326158 --- /dev/null +++ b/test/bib/testcases.py @@ -0,0 +1,149 @@ +import dataclasses +import inspect +import os +import platform + +# disk image types can be build from a single manifest +DISK_IMAGE_TYPES = ["qcow2", "raw", "vmdk", "vhd", "gce"] + +# supported images that can be booted in a cloud +CLOUD_BOOT_IMAGE_TYPES = ["ami"] + + +@dataclasses.dataclass +class TestCase: + # container_ref to the bootc image, e.g. quay.io/fedora/fedora-bootc:40 + container_ref: str = "" + # optional build_container_ref to the bootc image, e.g. quay.io/fedora/fedora-bootc:40 + build_container_ref: str = "" + # image is the image type, e.g. "ami" + image: str = "" + # target_arch is the target archicture, empty means current arch + target_arch: str = "" + # rootfs to use (e.g. ext4), some containers like fedora do not + # have a default rootfs. If unset the container default is used. + rootfs: str = "" + # Sign the container_ref and use the new signed image instead of the original one + sign: bool = False + # use special disk_config like "lvm" + disk_config: str = "" + # use librepo for the downloading + use_librepo: bool = False + # podman_terminal enables the podman -t option to get progress + podman_terminal: bool = False + + def bib_rootfs_args(self): + if self.rootfs: + return ["--rootfs", self.rootfs] + return [] + + def __str__(self): + return ",".join([ + f"{name}={attr}" + for name, attr in inspect.getmembers(self) + if not name.startswith("_") and not callable(attr) and attr + ]) + + +@dataclasses.dataclass +class TestCaseFedora(TestCase): + container_ref: str = "quay.io/fedora/fedora-bootc:42" + rootfs: str = "btrfs" + use_librepo: bool = True + + +@dataclasses.dataclass +class TestCaseFedora43(TestCase): + container_ref: str = "quay.io/fedora/fedora-bootc:43" + rootfs: str = "btrfs" + use_librepo: bool = True + + +@dataclasses.dataclass +class TestCaseC9S(TestCase): + container_ref: str = os.getenv( + "BIB_TEST_BOOTC_CONTAINER_TAG", + "quay.io/centos-bootc/centos-bootc:stream9") + use_librepo: bool = True + use_terminal: bool = True + + +@dataclasses.dataclass +class TestCaseC10S(TestCase): + container_ref: str = os.getenv( + "BIB_TEST_BOOTC_CONTAINER_TAG", + "quay.io/centos-bootc/centos-bootc:stream10") + use_librepo: bool = True + + +def test_testcase_nameing(): + """ + Ensure the testcase naming does not change without us knowing as those + are visible when running "pytest --collect-only" + """ + tc = TestCaseFedora() + expected = "container_ref=quay.io/fedora/fedora-bootc:40,rootfs=btrfs" + assert f"{tc}" == expected, f"{tc} != {expected}" + + +def gen_testcases(what): # pylint: disable=too-many-return-statements + if what == "manifest": + return [TestCaseC9S(), TestCaseFedora(), TestCaseC10S()] + if what == "default-rootfs": + # Fedora doesn't have a default rootfs + return [TestCaseC9S()] + if what == "ami-boot": + return [TestCaseC9S(image="ami"), TestCaseFedora(image="ami")] + if what == "anaconda-iso": + return [ + TestCaseFedora(image="anaconda-iso", sign=True), + TestCaseC9S(image="anaconda-iso"), + TestCaseC10S(image="anaconda-iso"), + ] + if what == "qemu-cross": + test_cases = [] + if platform.machine() == "x86_64": + # 2025-09-19: disabled because CI hangs, see + # https://github.com/osbuild/bootc-image-builder/actions/runs/17821609665 + # test_cases.append( + # TestCaseC9S(image="raw", target_arch="arm64")) + pass + elif platform.machine() == "arm64": + # TODO: add arm64->x86_64 cross build test too + pass + return test_cases + if what == "qemu-boot": + return [ + # test default partitioning + TestCaseFedora(image="qcow2"), + # test with custom disk configs + TestCaseC9S(image="qcow2", disk_config="swap"), + TestCaseFedora43(image="raw", disk_config="btrfs"), + TestCaseC9S(image="raw", disk_config="lvm"), + ] + if what == "all": + return [ + klass(image=img) + for klass in (TestCaseC9S, TestCaseFedora) + for img in CLOUD_BOOT_IMAGE_TYPES + DISK_IMAGE_TYPES + ["anaconda-iso"] + ] + if what == "multidisk": + # single test that specifies all image types + image = "+".join(DISK_IMAGE_TYPES) + return [ + TestCaseC9S(image=image), + TestCaseFedora(image=image), + ] + # Smoke test that all supported --target-arch architecture can + # create a manifest + if what == "target-arch-smoke": + return [ + TestCaseC9S(target_arch="arm64"), + TestCaseFedora(target_arch="ppc64le"), + TestCaseFedora(target_arch="s390x"), + ] + if what == "build-container": + return [ + TestCaseC9S(build_container_ref="quay.io/centos-bootc/centos-bootc:stream10", image="qcow2"), + ] + raise ValueError(f"unknown test-case type {what}") diff --git a/test/bib/testutil.py b/test/bib/testutil.py new file mode 100644 index 00000000..096d8f66 --- /dev/null +++ b/test/bib/testutil.py @@ -0,0 +1,206 @@ +import os +import pathlib +import platform +import shutil +import subprocess + +import boto3 +from botocore.exceptions import ClientError + + +def run_journalctl(*args): + pre = [] + if platform.system() == "Darwin": + pre = ["podman", "machine", "ssh"] + cmd = pre + ["journalctl"] + list(args) + return subprocess.check_output(cmd, encoding="utf-8").strip() + + +def journal_cursor(): + output = run_journalctl("-n0", "--show-cursor") + cursor = output.rsplit("\n", maxsplit=1)[-1] + return cursor.split("cursor: ")[-1] + + +def journal_after_cursor(cursor): + output = run_journalctl(f"--after-cursor={cursor}") + return output + + +def has_executable(name): + return shutil.which(name) is not None + + +def has_x86_64_v3_cpu(): + # x86_64-v3 has multiple features, see + # https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels + # but "avx2" is probably a good enough proxy + return " avx2 " in pathlib.Path("/proc/cpuinfo").read_text("utf8") + + +def can_start_rootful_containers(): + system = platform.system() + if system == "Linux": + # on linux we need to run "podman" with sudo to get full + # root containers + return os.getuid() == 0 + if system == "Darwin": + # on darwin a container is root if the podman machine runs + # in "rootful" mode, i.e. no need to run "podman" as root + # as it's just proxying to the VM + res = subprocess.run([ + "podman", "machine", "inspect", "--format={{.Rootful}}", + ], capture_output=True, encoding="utf8", check=True) + return res.stdout.strip() == "true" + raise ValueError(f"unknown platform {system}") + + +def write_aws_creds(path): + key_id = os.environ.get("AWS_ACCESS_KEY_ID") + secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY") + if not key_id or not secret_key: + return False + + with open(path, mode="w", encoding="utf-8") as creds_file: + creds_file.write("[default]\n") + creds_file.write(f"aws_access_key_id = {key_id}\n") + creds_file.write(f"aws_secret_access_key = {secret_key}\n") + + return True + + +def deregister_ami(ami_id, aws_region): + ec2 = boto3.resource("ec2", region_name=aws_region) + try: + print(f"Deregistering image {ami_id}") + ami = ec2.Image(ami_id) + ami.deregister() + print("Image deregistered") + except ClientError as err: + err_code = err.response["Error"]["Code"] + err_msg = err.response["Error"]["Message"] + print(f"Couldn't deregister image {ami_id}.") + print(f"Error {err_code}: {err_msg}") + + +def maybe_create_filesystem_customizations(cfg, tc): + # disk_config and filesystem_customization are mutually exclusive + if tc.disk_config: + return + if tc.rootfs == "btrfs": + # only minimal customizations are supported for btrfs currently + cfg["customizations"]["filesystem"] = [ + { + "mountpoint": "/", + "minsize": "12 GiB" + }, + ] + return + # add some custom mountpoints + cfg["customizations"]["filesystem"] = [ + { + "mountpoint": "/", + "minsize": "12 GiB" + }, + { + "mountpoint": "/var/data", + "minsize": "3 GiB" + }, + { + "mountpoint": "/var/data/test", + "minsize": "1 GiB" + }, + { + "mountpoint": "/var/opt", + "minsize": "2 GiB" + }, + ] + + +def maybe_create_disk_customizations(cfg, tc): + if not tc.disk_config: + return + if tc.disk_config == "lvm": + cfg["customizations"]["disk"] = { + "partitions": [ + { + "type": "lvm", + # XXX: why is this minsize also needed? should we derrive + # it from the LVs ? + "minsize": "10 GiB", + "logical_volumes": [ + { + "fs_type": "xfs", + "minsize": "1 GiB", + "mountpoint": "/var/log", + }, + { + "minsize": "7 GiB", + "fs_type": "swap", + } + ] + } + ] + } + elif tc.disk_config == "btrfs": + cfg["customizations"]["disk"] = { + "partitions": [ + { + "type": "btrfs", + "minsize": "10 GiB", + "subvolumes": [ + { + "name": "varlog", + "mountpoint": "/var/log", + } + ] + } + ] + } + elif tc.disk_config == "swap": + cfg["customizations"]["disk"] = { + "partitions": [ + { + "minsize": "123 MiB", + "fs_type": "swap", + } + ] + } + else: + raise ValueError(f"unsupported disk_config {tc.disk_config}") + + +# podman_run_common has the common prefix for the podman run invocations +podman_run_common = [ + "podman", "run", "--rm", + "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "--security-opt", "label=type:unconfined_t", + # ensure we run in reasonable memory limits + "--memory=8g", "--memory-swap=8g", +] + + +def get_ip_from_default_route(): + default_route = subprocess.run([ + "ip", + "route", + "list", + "default" + ], check=True, capture_output=True, text=True).stdout + return default_route.split()[8] + + +def pull_container(container_ref, target_arch="", tls_verify=True): + if target_arch == "": + target_arch = platform.machine() + + if target_arch not in ["x86_64", "amd64", "aarch64", "arm64", "s390x", "ppc64le"]: + raise RuntimeError(f"unknown host arch: {target_arch}") + + subprocess.run([ + "podman", "pull", + "--arch", target_arch, + "--tls-verify" if tls_verify else "--tls-verify=false", + container_ref, + ], check=True)