From 8247f91e5431e1ff1ed8514365d39434948793d1 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Fri, 24 Nov 2023 17:56:07 +0100 Subject: [PATCH 001/279] github: add workflow with linters golangci-lint on the odc directory and shellcheck for the whole project. --- .github.com/workflows/bibtests.yaml | 58 +++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github.com/workflows/bibtests.yaml diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml new file mode 100644 index 00000000..0bdcfad6 --- /dev/null +++ b/.github.com/workflows/bibtests.yaml @@ -0,0 +1,58 @@ +--- +name: Tests + +on: + pull_request: + branches: + - "*" + push: + branches: + - main + # for merge queue + merge_group: + +jobs: + lint: + name: "⌨ Lint" + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.19 + uses: actions/setup-go@v4 + with: + go-version: 1.19 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Apt update + run: sudo apt update + + # This is needed for the container upload dependencies + - name: Install libgpgme devel package + run: sudo apt install -y libgpgme-dev + + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + version: v1.54.2 + args: --timeout 5m0s + working-directory: odc + + shellcheck: + name: "🐚 Shellcheck" + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Run ShellCheck + uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 + with: + ignore: vendor # We don't want to fix the code in vendored dependencies + env: + # don't check /etc/os-release sourcing, allow useless cats to live inside our codebase, and + # allow seemingly unreachable commands + SHELLCHECK_OPTS: -e SC1091 -e SC2002 -e SC2317 From 8e0e94abd71317b91c415921517c496e644cd224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Tue, 5 Dec 2023 00:16:49 +0100 Subject: [PATCH 002/279] rename odc to bib Following the change from the last commit, let's also use the new abbreviation "bib" instead of "odc". --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 0bdcfad6..4358c162 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -39,7 +39,7 @@ jobs: with: version: v1.54.2 args: --timeout 5m0s - working-directory: odc + working-directory: bib shellcheck: name: "🐚 Shellcheck" From 62250377b8bd83bc91bcd4e9c54a2f78dbe09696 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 4 Dec 2023 17:30:03 +0100 Subject: [PATCH 003/279] tests: add basic integration testing This commit adds basic integration testing for the project. It is pytest based and can run both locally or via `tmt` [0] which will spin up a clean VM and run the tests inside. [0] https://github.com/teemtee/tmt --- .github.com/workflows/bibtests.yaml | 20 +++++++++ test/bib/README.md | 12 ++++++ test/bib/test_flake8.py | 11 +++++ test/bib/test_smoke.py | 67 +++++++++++++++++++++++++++++ test/bib/testutil.py | 17 ++++++++ 5 files changed, 127 insertions(+) create mode 100644 test/bib/README.md create mode 100644 test/bib/test_flake8.py create mode 100644 test/bib/test_smoke.py create mode 100644 test/bib/testutil.py diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 4358c162..e548fc58 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -56,3 +56,23 @@ jobs: # don't check /etc/os-release sourcing, allow useless cats to live inside our codebase, and # allow seemingly unreachable commands SHELLCHECK_OPTS: -e SC1091 -e SC2002 -e SC2317 + + integration: + # TODO: run this also via tmt/testing-farm + name: "Integration" + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Setup up python + uses: actions/setup-python@v4 + - name: Install test dependencies + run: | + sudo apt install -y podman python3-pytest flake8 + - name: Run tests + run: | + # podman needs (parts of) the environment but will break when + # XDG_RUNTIME_DIR is set. + # TODO: figure out what exactly podman needs + sudo -E XDG_RUNTIME_DIR= pytest-3 -s -vv diff --git a/test/bib/README.md b/test/bib/README.md new file mode 100644 index 00000000..f6092806 --- /dev/null +++ b/test/bib/README.md @@ -0,0 +1,12 @@ +Integration tests for bootc-image-builder +---------------------------------------------- + +This directory contans integration tests for bootc-image-builder. + +They can be run in two ways: +1. On the local machine by just running `sudo pytest -s -v` +2. Via `tmt` [0] which will spin up a clean VM and run the tests inside: + + tmt run -vvv + +[0] https://github.com/teemtee/tmt diff --git a/test/bib/test_flake8.py b/test/bib/test_flake8.py new file mode 100644 index 00000000..5cb61a7b --- /dev/null +++ b/test/bib/test_flake8.py @@ -0,0 +1,11 @@ +import os +import pathlib +import subprocess + + +def test_flake8(): + p = pathlib.Path(__file__).parent + # TODO: use all static checks from osbuild instead + subprocess.check_call( + ["flake8", "--ignore=E402", "--max-line-length=120", + os.fspath(p)]) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py new file mode 100644 index 00000000..7bed2287 --- /dev/null +++ b/test/bib/test_smoke.py @@ -0,0 +1,67 @@ +import json +import os +import pathlib +import subprocess + +import pytest + +# local test utils +import testutil + + +@pytest.fixture(name="output_path") +def output_path_fixture(tmp_path): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + return output_path + + +@pytest.fixture(name="config_json") +def config_json_fixture(output_path): + CFG = { + "blueprint": { + "customizations": { + "user": [ + { + "name": "test", + "password": "password", + "groups": ["wheel"], + }, + ], + }, + }, + } + config_json_path = output_path / "config.json" + config_json_path.write_text(json.dumps(CFG), encoding="utf-8") + return config_json_path + + +@pytest.mark.skipif(os.getuid() != 0, reason="needs root") +@pytest.mark.skipif(not testutil.has_executable("podman"), reason="need podman") +def test_smoke(output_path, config_json): + # build local container + subprocess.check_call([ + "podman", "build", + "-f", "Containerfile", + "-t", "bootc-image-builder-test", + ]) + cursor = testutil.journal_cursor() + # and run container to deploy an image into output/disk.qcow2 + subprocess.check_call([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{output_path}:/output", + "bootc-image-builder-test", + "quay.io/centos-bootc/centos-bootc:stream9", + "--config", "/output/config.json", + ]) + # check that there are no denials + # TODO: actually check this once https://github.com/osbuild/images/pull/287 + # is merged + journal_output = testutil.journal_after_cursor(cursor) + assert journal_output != "" + generated_img = pathlib.Path(output_path) / "qcow2/disk.qcow2" + assert generated_img.exists(), f"output file missing, dir content: {os.listdir(os.fspath(output_path))}" + # TODO: boot and do basic checks, see + # https://github.com/osbuild/bootc-image-builder/compare/main...mvo5:integration-test?expand=1 diff --git a/test/bib/testutil.py b/test/bib/testutil.py new file mode 100644 index 00000000..47da18e0 --- /dev/null +++ b/test/bib/testutil.py @@ -0,0 +1,17 @@ +import shutil +import subprocess + + +def journal_cursor(): + output = subprocess.check_output(["journalctl", "-n0", "--show-cursor"], encoding="utf-8").strip() + cursor = output.split("\n")[-1] + return cursor.split("cursor: ")[-1] + + +def journal_after_cursor(cursor): + output = subprocess.check_output(["journalctl", f"--after-cursor={cursor}"]) + return output + + +def has_executable(name): + return shutil.which(name) is not None From b0771dc50151f65509fb137e7ba53c3ed8c7b8c0 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 5 Dec 2023 07:28:11 +0100 Subject: [PATCH 004/279] tests: check that there are no selinux denials This test ensure that we build images without selinux denials in the logs. --- test/bib/test_smoke.py | 37 ++++++++++++++++++++++++++++++++----- test/bib/testutil.py | 2 +- 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 7bed2287..954f525c 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -1,6 +1,7 @@ import json import os import pathlib +import re import subprocess import pytest @@ -36,6 +37,28 @@ def config_json_fixture(output_path): return config_json_path +def log_has_osbuild_selinux_denials(log): + OSBUID_SELINUX_DENIALS_RE = re.compile(r"(?ms)avc:\ +denied.*osbuild") + return re.search(OSBUID_SELINUX_DENIALS_RE, log) + + +def test_osbuild_selinux_denails_re_works(): + fake_log = ( + 'Dec 05 07:19:39 other log msg\n' + 'Dec 05 07:19:39 fedora audit: SELINUX_ERR' + ' op=security_bounded_transition seresult=denied' + ' oldcontext=system_u:system_r:install_t:s0:c42,c355' + ' newcontext=system_u:system_r:mount_t:s0:c42,c355\n' + 'Dec 06 16:00:54 internal audit[14368]: AVC avc: denied ' + '{ nnp_transition nosuid_transition } for pid=14368 ' + 'comm="org.osbuild.ost" scontext=system_u:system_r:install_t:s0:' + 'c516,c631 tcontext=system_u:system_r:mount_t:s0:c516,c631 ' + 'tclass=process2 permissive=0' + ) + assert log_has_osbuild_selinux_denials(fake_log) + assert not log_has_osbuild_selinux_denials("some\nrandom\nlogs") + + @pytest.mark.skipif(os.getuid() != 0, reason="needs root") @pytest.mark.skipif(not testutil.has_executable("podman"), reason="need podman") def test_smoke(output_path, config_json): @@ -56,12 +79,16 @@ def test_smoke(output_path, config_json): "quay.io/centos-bootc/centos-bootc:stream9", "--config", "/output/config.json", ]) - # check that there are no denials - # TODO: actually check this once https://github.com/osbuild/images/pull/287 - # is merged - journal_output = testutil.journal_after_cursor(cursor) - assert journal_output != "" generated_img = pathlib.Path(output_path) / "qcow2/disk.qcow2" assert generated_img.exists(), f"output file missing, dir content: {os.listdir(os.fspath(output_path))}" + + # check that there are no selinux denials + journal_output = testutil.journal_after_cursor(cursor) + assert journal_output != "" + if testutil.has_executable("selinuxenabled") and subprocess.run("selinuxenabled").returncode == 0: + assert not log_has_osbuild_selinux_denials(journal_output), f"denials in log {journal_output}" + else: + print("WARNING: selinux not enabled, cannot check for denials") + # TODO: boot and do basic checks, see # https://github.com/osbuild/bootc-image-builder/compare/main...mvo5:integration-test?expand=1 diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 47da18e0..e1fc954b 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -9,7 +9,7 @@ def journal_cursor(): def journal_after_cursor(cursor): - output = subprocess.check_output(["journalctl", f"--after-cursor={cursor}"]) + output = subprocess.check_output(["journalctl", f"--after-cursor={cursor}"], encoding="utf8") return output From e28102f7cf038b4a3ee59329090b439836409dd8 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 5 Dec 2023 07:51:16 +0100 Subject: [PATCH 005/279] test: boot generated VM and wait for ssh port Do some more testing of the generated image by booting it and checking that ssh comes up. There will be a followup that will actually login into the VM and ensure that also works. --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/test_smoke.py | 9 ++-- test/bib/testutil.py | 23 ++++++++++ test/bib/testutil_test.py | 27 ++++++++++++ test/bib/vm.py | 66 +++++++++++++++++++++++++++++ 5 files changed, 123 insertions(+), 4 deletions(-) create mode 100644 test/bib/testutil_test.py create mode 100644 test/bib/vm.py diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index e548fc58..0e97b1a9 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -69,7 +69,7 @@ jobs: uses: actions/setup-python@v4 - name: Install test dependencies run: | - sudo apt install -y podman python3-pytest flake8 + sudo apt install -y podman python3-pytest flake8 qemu-system-x86 - name: Run tests run: | # podman needs (parts of) the environment but will break when diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 954f525c..f47075cf 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -8,6 +8,7 @@ # local test utils import testutil +from vm import VM @pytest.fixture(name="output_path") @@ -76,7 +77,7 @@ def test_smoke(output_path, config_json): "--security-opt", "label=type:unconfined_t", "-v", f"{output_path}:/output", "bootc-image-builder-test", - "quay.io/centos-bootc/centos-bootc:stream9", + "quay.io/centos-bootc/fedora-bootc:eln", "--config", "/output/config.json", ]) generated_img = pathlib.Path(output_path) / "qcow2/disk.qcow2" @@ -90,5 +91,7 @@ def test_smoke(output_path, config_json): else: print("WARNING: selinux not enabled, cannot check for denials") - # TODO: boot and do basic checks, see - # https://github.com/osbuild/bootc-image-builder/compare/main...mvo5:integration-test?expand=1 + with VM(generated_img) as test_vm: + # TODO: replace with 'test_vm.run("true")' once user creation via + # blueprints works + test_vm.wait_ssh_ready() diff --git a/test/bib/testutil.py b/test/bib/testutil.py index e1fc954b..fda40f28 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -1,5 +1,7 @@ +import socket import shutil import subprocess +import time def journal_cursor(): @@ -15,3 +17,24 @@ def journal_after_cursor(cursor): def has_executable(name): return shutil.which(name) is not None + + +def get_free_port() -> int: + # this is racy but there is no race-free way to do better with the qemu CLI + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("localhost", 0)) + return s.getsockname()[1] + + +def wait_ssh_ready(port, sleep, max_wait_sec): + for i in range(int(max_wait_sec / sleep)): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.settimeout(sleep) + try: + s.connect(("localhost", port)) + data = s.recv(256) + if b"OpenSSH" in data: + return + except (ConnectionRefusedError, TimeoutError): + time.sleep(sleep) + raise ConnectionRefusedError(f"cannot connect to port {port} after {max_wait_sec}s") diff --git a/test/bib/testutil_test.py b/test/bib/testutil_test.py new file mode 100644 index 00000000..026b10a2 --- /dev/null +++ b/test/bib/testutil_test.py @@ -0,0 +1,27 @@ +import contextlib +import subprocess +import time +from unittest.mock import call, patch + +import pytest + +from testutil import has_executable, get_free_port, wait_ssh_ready + + +def test_get_free_port(): + port_nr = get_free_port() + assert port_nr > 1024 and port_nr < 65535 + + +@pytest.mark.skipif(not has_executable("nc"), reason="needs nc") +@patch("time.sleep", wraps=time.sleep) +def test_wait_ssh_ready(mocked_sleep): + port = get_free_port() + with pytest.raises(ConnectionRefusedError): + wait_ssh_ready(port, sleep=0.1, max_wait_sec=0.35) + assert mocked_sleep.call_args_list == [call(0.1), call(0.1), call(0.1)] + # now make port ready + with contextlib.ExitStack() as cm: + p = subprocess.Popen(f"echo OpenSSH | nc -l {port}", shell=True) + cm.callback(p.kill) + wait_ssh_ready(port, sleep=0.1, max_wait_sec=10) diff --git a/test/bib/vm.py b/test/bib/vm.py new file mode 100644 index 00000000..78938e13 --- /dev/null +++ b/test/bib/vm.py @@ -0,0 +1,66 @@ +import pathlib +import subprocess +import sys + +from testutil import get_free_port, wait_ssh_ready + + +class VM: + MEM = "2000" + QEMU = "qemu-system-x86_64" + + def __init__(self, img, snapshot=True): + self._img = pathlib.Path(img) + self._qemu_p = None + self._ssh_port = None + self._snapshot = snapshot + + def __del__(self): + self.force_stop() + + def start(self): + if self._qemu_p is not None: + return + log_path = self._img.with_suffix(".serial-log") + self._ssh_port = get_free_port() + qemu_cmdline = [ + self.QEMU, "-enable-kvm", + "-m", self.MEM, + # get "illegal instruction" inside the VM otherwise + "-cpu", "host", + "-nographic", + "-serial", "stdio", + "-monitor", "none", + "-netdev", f"user,id=net.0,hostfwd=tcp::{self._ssh_port}-:22", + "-device", "rtl8139,netdev=net.0", + ] + if self._snapshot: + qemu_cmdline.append("-snapshot") + qemu_cmdline.append(self._img) + self._log(f"vm starting, log available at {log_path}") + + # XXX: use systemd-run to ensure cleanup? + self._qemu_p = subprocess.Popen( + qemu_cmdline, stdout=sys.stdout, stderr=sys.stderr) + # XXX: also check that qemu is working and did not crash + self.wait_ssh_ready() + self._log(f"vm ready at port {self._ssh_port}") + + def _log(self, msg): + # XXX: use a proper logger + sys.stdout.write(msg.rstrip("\n") + "\n") + + def wait_ssh_ready(self): + wait_ssh_ready(self._ssh_port, sleep=1, max_wait_sec=600) + + def force_stop(self): + if self._qemu_p: + self._qemu_p.kill() + self._qemu_p = None + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, tb): + self.force_stop() From 2002c9ef83db8c2c32cf1e1a7389a3f55508f02f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 04:43:35 +0000 Subject: [PATCH 006/279] build(deps): bump actions/setup-go from 4 to 5 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 4 to 5. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 0e97b1a9..b169fc75 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go 1.19 - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: 1.19 id: go From 4b2f0aade65549cd8161cfb7fed523cc27c1f83a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 7 Dec 2023 18:46:15 +0100 Subject: [PATCH 007/279] tests: actually log into the generated disk image This commit adds an integration test that ensures that the generated disk can be logged into via ssh by the blueprint user. --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/test_smoke.py | 8 +++++--- test/bib/vm.py | 26 ++++++++++++++++++++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index b169fc75..d7f4433c 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -69,7 +69,7 @@ jobs: uses: actions/setup-python@v4 - name: Install test dependencies run: | - sudo apt install -y podman python3-pytest flake8 qemu-system-x86 + sudo apt install -y podman python3-pytest python3-paramiko flake8 qemu-system-x86 - name: Run tests run: | # podman needs (parts of) the environment but will break when diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index f47075cf..2d6f9834 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -92,6 +92,8 @@ def test_smoke(output_path, config_json): print("WARNING: selinux not enabled, cannot check for denials") with VM(generated_img) as test_vm: - # TODO: replace with 'test_vm.run("true")' once user creation via - # blueprints works - test_vm.wait_ssh_ready() + exit_status, _ = test_vm.run("true", user="test", password="password") + assert exit_status == 0 + exit_status, output = test_vm.run("echo hello", user="test", password="password") + assert exit_status == 0 + assert "hello" in output diff --git a/test/bib/vm.py b/test/bib/vm.py index 78938e13..51bb7682 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -1,10 +1,14 @@ import pathlib import subprocess import sys +from io import StringIO from testutil import get_free_port, wait_ssh_ready +from paramiko.client import AutoAddPolicy, SSHClient + + class VM: MEM = "2000" QEMU = "qemu-system-x86_64" @@ -64,3 +68,25 @@ def __enter__(self): def __exit__(self, type, value, tb): self.force_stop() + + def run(self, cmd, user, password): + if not self._qemu_p: + self.start() + client = SSHClient() + client.set_missing_host_key_policy(AutoAddPolicy) + client.connect( + "localhost", self._ssh_port, user, password, + allow_agent=False, look_for_keys=False) + chan = client.get_transport().open_session() + chan.get_pty() + chan.exec_command(cmd) + stdout_f = chan.makefile() + output = StringIO() + while True: + out = stdout_f.readline() + if not out: + break + self._log(out) + output.write(out) + exit_status = stdout_f.channel.recv_exit_status() + return exit_status, output.getvalue() From 8300bd12873353240257bd943f458ab765c1c624 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 11 Dec 2023 12:56:59 +0100 Subject: [PATCH 008/279] test: fix race in wait_ssh_ready() and add test The wait_ssh_ready() had a subtle bug - when ssh was starting up and not yet producing the expected output (e.g. just empty output) the loop that sleeps would not wait but just loop very quickly which could lead to a confuging: ``` ConnectionRefusedError: cannot connect to port 39973 after 600s ``` even though clearly 600s had not passed. This commit fixes this (subtle) bug and adds a test that will generate an unexpected output and ensures there that the expected number of sleeps happend. --- test/bib/testutil.py | 5 +++-- test/bib/testutil_test.py | 45 +++++++++++++++++++++++++++++++-------- 2 files changed, 39 insertions(+), 11 deletions(-) diff --git a/test/bib/testutil.py b/test/bib/testutil.py index fda40f28..f99b1a59 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -35,6 +35,7 @@ def wait_ssh_ready(port, sleep, max_wait_sec): data = s.recv(256) if b"OpenSSH" in data: return - except (ConnectionRefusedError, TimeoutError): - time.sleep(sleep) + except (ConnectionRefusedError, ConnectionResetError, TimeoutError): + pass + time.sleep(sleep) raise ConnectionRefusedError(f"cannot connect to port {port} after {max_wait_sec}s") diff --git a/test/bib/testutil_test.py b/test/bib/testutil_test.py index 026b10a2..7ba4f025 100644 --- a/test/bib/testutil_test.py +++ b/test/bib/testutil_test.py @@ -1,6 +1,5 @@ import contextlib import subprocess -import time from unittest.mock import call, patch import pytest @@ -13,15 +12,43 @@ def test_get_free_port(): assert port_nr > 1024 and port_nr < 65535 -@pytest.mark.skipif(not has_executable("nc"), reason="needs nc") -@patch("time.sleep", wraps=time.sleep) -def test_wait_ssh_ready(mocked_sleep): - port = get_free_port() +@pytest.fixture(name="free_port") +def free_port_fixture(): + return get_free_port() + + +@patch("time.sleep") +def test_wait_ssh_ready_sleeps_no_connection(mocked_sleep, free_port): with pytest.raises(ConnectionRefusedError): - wait_ssh_ready(port, sleep=0.1, max_wait_sec=0.35) + wait_ssh_ready(free_port, sleep=0.1, max_wait_sec=0.35) assert mocked_sleep.call_args_list == [call(0.1), call(0.1), call(0.1)] - # now make port ready + + +def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): + with contextlib.ExitStack() as cm: + p = subprocess.Popen( + f"echo not-ssh | nc -v -l {free_port}", + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + ) + cm.callback(p.kill) + # wait for nc to be ready + while True: + if "Listening " in p.stdout.readline(): + break + # now connect + with patch("time.sleep") as mocked_sleep: + with pytest.raises(ConnectionRefusedError): + wait_ssh_ready(free_port, sleep=0.1, max_wait_sec=0.55) + assert mocked_sleep.call_args_list == [ + call(0.1), call(0.1), call(0.1), call(0.1), call(0.1)] + + +@pytest.mark.skipif(not has_executable("nc"), reason="needs nc") +def test_wait_ssh_ready_integration(free_port, tmp_path): with contextlib.ExitStack() as cm: - p = subprocess.Popen(f"echo OpenSSH | nc -l {port}", shell=True) + p = subprocess.Popen(f"echo OpenSSH | nc -l {free_port}", shell=True) cm.callback(p.kill) - wait_ssh_ready(port, sleep=0.1, max_wait_sec=10) + wait_ssh_ready(free_port, sleep=0.1, max_wait_sec=10) From d7c154858fa127f97cf14c19bc0a91e800a0d7a7 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 11 Dec 2023 16:21:40 +0100 Subject: [PATCH 009/279] test: skip qemu test when running on non-x86_64-v3 CPUs Building ELN requires a CPU that is capable of runing `x86_64-v3` instructions [0]. Most CPUs after 2015 do support this. However it happend that the test got scheduled inside a lab that had older CPUs. In this case skip the qemu test. This is okay because the test is also run in GH runners that are more current. To skip the `avx2` cpu feature is used as a proxy instead of testing all of them. This should be a reasonable approximation but if we run into issues it can always be make more accurate. [0] https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels --- test/bib/test_smoke.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 2d6f9834..0983c95e 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -91,9 +91,14 @@ def test_smoke(output_path, config_json): else: print("WARNING: selinux not enabled, cannot check for denials") - with VM(generated_img) as test_vm: - exit_status, _ = test_vm.run("true", user="test", password="password") - assert exit_status == 0 - exit_status, output = test_vm.run("echo hello", user="test", password="password") - assert exit_status == 0 - assert "hello" in output + # building an ELN image needs x86_64-v3 to work, we use avx2 as a proxy + # to detect if we have x86-64-v3 (not perfect but should be good enough) + if " avx2 " not in pathlib.Path("/proc/cpuinfo").read_text(): + print("WARNING: no x86_64-v3 cpu detected, skipping VM boot test") + else: + with VM(generated_img) as test_vm: + exit_status, _ = test_vm.run("true", user="test", password="password") + assert exit_status == 0 + exit_status, output = test_vm.run("echo hello", user="test", password="password") + assert exit_status == 0 + assert "hello" in output From f9ec161fe9b4f56f1a7a2941afd4af0d71c4b4d4 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 12 Dec 2023 09:19:45 +0100 Subject: [PATCH 010/279] test: add missing check for "nc" to test_wait_ssh_ready_sleeps_wrong_reply --- test/bib/testutil_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/bib/testutil_test.py b/test/bib/testutil_test.py index 7ba4f025..bd887112 100644 --- a/test/bib/testutil_test.py +++ b/test/bib/testutil_test.py @@ -24,6 +24,7 @@ def test_wait_ssh_ready_sleeps_no_connection(mocked_sleep, free_port): assert mocked_sleep.call_args_list == [call(0.1), call(0.1), call(0.1)] +@pytest.mark.skipif(not has_executable("nc"), reason="needs nc") def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): with contextlib.ExitStack() as cm: p = subprocess.Popen( From 2211a32f25366a6f4278fe8018d0172a8aaaf391 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 11 Dec 2023 17:54:59 +0100 Subject: [PATCH 011/279] test: refactor monolithic `test_smoke` into multiple tests The existing test_smoke was becoming a bit too big and especially the fact that part have to be skipped depending on the environment is really not nice. This commit refactors it to be more module. For this it uses the pytest session scope to ensure that the build container and the test image are only build once and then shared accross the various individual tests. --- test/bib/test_smoke.py | 136 +++++++++++++++++++++++++---------------- test/bib/testutil.py | 8 +++ 2 files changed, 92 insertions(+), 52 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 0983c95e..4d496d0e 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -1,3 +1,4 @@ +import collections import json import os import pathlib @@ -11,31 +12,94 @@ from vm import VM -@pytest.fixture(name="output_path") -def output_path_fixture(tmp_path): - output_path = tmp_path / "output" - output_path.mkdir(exist_ok=True) - return output_path +if not testutil.has_executable("podman"): + pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) + +if os.getuid() != 0: + pytest.skip("tests require root to run", allow_module_level=True) + +# building an ELN image needs x86_64-v3 to work, we use avx2 as a proxy +# to detect if we have x86-64-v3 (not perfect but should be good enough) +if not testutil.has_x86_64_v3_cpu(): + pytest.skip("need x86_64-v3 capable CPU", allow_module_level=True) + + +@pytest.fixture(name="build_container", scope="session") +def build_container_fixture(): + """Build a container from the Containerfile and returns the name""" + container_tag = "bootc-image-builder-test" + subprocess.check_call([ + "podman", "build", + "-f", "Containerfile", + "-t", container_tag, + ]) + return container_tag -@pytest.fixture(name="config_json") -def config_json_fixture(output_path): +@pytest.fixture(name="build_image", scope="session") +def build_image_fixture(tmpdir_factory, build_container): + """ + Build an image inside the passed build_container and return a + named tuple with the resulting image path and user/password + """ + username = "test" + password = "password" CFG = { "blueprint": { "customizations": { "user": [ { - "name": "test", - "password": "password", + "name": username, + "password": password, "groups": ["wheel"], }, ], }, }, } + output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + output_path.mkdir(exist_ok=True) + config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(CFG), encoding="utf-8") - return config_json_path + + cursor = testutil.journal_cursor() + # run container to deploy an image into output/qcow2/disk.qcow2 + subprocess.check_call([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{output_path}:/output", + build_container, + "quay.io/centos-bootc/fedora-bootc:eln", + "--config", "/output/config.json", + ]) + journal_output = testutil.journal_after_cursor(cursor) + generated_img = pathlib.Path(output_path) / "qcow2/disk.qcow2" + + ImageFixtureResult = collections.namedtuple( + "BuildImage", ["img_path", "username", "password", "journal_output"]) + return ImageFixtureResult(generated_img, username, password, journal_output) + + +def test_container_builds(build_container): + output = subprocess.check_output([ + "podman", "images", "-n", build_container], encoding="utf-8") + assert build_container in output + + +def test_image_is_generated(build_image): + assert build_image.img_path.exists(), "output file missing, dir "\ + f"content: {os.listdir(os.fspath(build_image.img_path))}" + + +def test_image_boots(build_image): + with VM(build_image.img_path) as test_vm: + exit_status, _ = test_vm.run("true", user=build_image.username, password=build_image.password) + assert exit_status == 0 + exit_status, output = test_vm.run("echo hello", user="test", password="password") + assert exit_status == 0 + assert "hello" in output def log_has_osbuild_selinux_denials(log): @@ -43,7 +107,7 @@ def log_has_osbuild_selinux_denials(log): return re.search(OSBUID_SELINUX_DENIALS_RE, log) -def test_osbuild_selinux_denails_re_works(): +def test_osbuild_selinux_denials_re_works(): fake_log = ( 'Dec 05 07:19:39 other log msg\n' 'Dec 05 07:19:39 fedora audit: SELINUX_ERR' @@ -60,45 +124,13 @@ def test_osbuild_selinux_denails_re_works(): assert not log_has_osbuild_selinux_denials("some\nrandom\nlogs") -@pytest.mark.skipif(os.getuid() != 0, reason="needs root") -@pytest.mark.skipif(not testutil.has_executable("podman"), reason="need podman") -def test_smoke(output_path, config_json): - # build local container - subprocess.check_call([ - "podman", "build", - "-f", "Containerfile", - "-t", "bootc-image-builder-test", - ]) - cursor = testutil.journal_cursor() - # and run container to deploy an image into output/disk.qcow2 - subprocess.check_call([ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", - "-v", f"{output_path}:/output", - "bootc-image-builder-test", - "quay.io/centos-bootc/fedora-bootc:eln", - "--config", "/output/config.json", - ]) - generated_img = pathlib.Path(output_path) / "qcow2/disk.qcow2" - assert generated_img.exists(), f"output file missing, dir content: {os.listdir(os.fspath(output_path))}" +def has_selinux(): + return testutil.has_executable("selinuxenabled") and subprocess.run("selinuxenabled").returncode == 0 - # check that there are no selinux denials - journal_output = testutil.journal_after_cursor(cursor) - assert journal_output != "" - if testutil.has_executable("selinuxenabled") and subprocess.run("selinuxenabled").returncode == 0: - assert not log_has_osbuild_selinux_denials(journal_output), f"denials in log {journal_output}" - else: - print("WARNING: selinux not enabled, cannot check for denials") - - # building an ELN image needs x86_64-v3 to work, we use avx2 as a proxy - # to detect if we have x86-64-v3 (not perfect but should be good enough) - if " avx2 " not in pathlib.Path("/proc/cpuinfo").read_text(): - print("WARNING: no x86_64-v3 cpu detected, skipping VM boot test") - else: - with VM(generated_img) as test_vm: - exit_status, _ = test_vm.run("true", user="test", password="password") - assert exit_status == 0 - exit_status, output = test_vm.run("echo hello", user="test", password="password") - assert exit_status == 0 - assert "hello" in output + +@pytest.mark.skipif(not has_selinux(), reason="selinux not enabled") +def test_image_build_without_se_linux_denials(build_image): + # the journal always contains logs from the image building + assert build_image.journal_output != "" + assert not log_has_osbuild_selinux_denials(build_image.journal_output), \ + f"denials in log {build_image.journal_output}" diff --git a/test/bib/testutil.py b/test/bib/testutil.py index f99b1a59..3bd59c84 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -1,3 +1,4 @@ +import pathlib import socket import shutil import subprocess @@ -39,3 +40,10 @@ def wait_ssh_ready(port, sleep, max_wait_sec): pass time.sleep(sleep) raise ConnectionRefusedError(f"cannot connect to port {port} after {max_wait_sec}s") + + +def has_x86_64_v3_cpu(): + # x86_64-v3 has multiple features, see + # https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels + # but "avx2" is probably a good enough proxy + return " avx2 " in pathlib.Path("/proc/cpuinfo").read_text() From 37cf6278549957c4f3ab33b67c7c23f8ee798b68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 04:43:39 +0000 Subject: [PATCH 012/279] build(deps): bump actions/setup-python from 4 to 5 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index d7f4433c..dad7a8a0 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -66,7 +66,7 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} - name: Setup up python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Install test dependencies run: | sudo apt install -y podman python3-pytest python3-paramiko flake8 qemu-system-x86 From 23ff973a74db9a49a8e9228c51f99972e690b44c Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Wed, 13 Dec 2023 16:47:19 +0100 Subject: [PATCH 013/279] test: build and boot both qcow2 and ami It's not possible to have a parameterised fixture in pytest so instead we: - Separate the build function out of the fixture and add the image type argument. - Create two build fixtures, one for each image type. - Use both build fixtures in the image creation (test_image_is_generated()) and boot (test_image_boots()) tests. --- test/bib/test_smoke.py | 57 ++++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 4d496d0e..4ba17fbb 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -11,7 +11,6 @@ import testutil from vm import VM - if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) @@ -36,8 +35,7 @@ def build_container_fixture(): return container_tag -@pytest.fixture(name="build_image", scope="session") -def build_image_fixture(tmpdir_factory, build_container): +def build_image(build_container, output_path, image_type): """ Build an image inside the passed build_container and return a named tuple with the resulting image path and user/password @@ -57,8 +55,6 @@ def build_image_fixture(tmpdir_factory, build_container): }, }, } - output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" - output_path.mkdir(exist_ok=True) config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(CFG), encoding="utf-8") @@ -73,13 +69,34 @@ def build_image_fixture(tmpdir_factory, build_container): build_container, "quay.io/centos-bootc/fedora-bootc:eln", "--config", "/output/config.json", + "--type", image_type, ]) journal_output = testutil.journal_after_cursor(cursor) - generated_img = pathlib.Path(output_path) / "qcow2/disk.qcow2" - ImageFixtureResult = collections.namedtuple( - "BuildImage", ["img_path", "username", "password", "journal_output"]) - return ImageFixtureResult(generated_img, username, password, journal_output) + artifact = { + "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", + "ami": pathlib.Path(output_path) / "image/disk.raw", + } + generated_img = artifact[image_type] + ImageBuildResult = collections.namedtuple( + "ImageBuildResult", ["img_path", "username", "password", "journal_output"]) + return ImageBuildResult(generated_img, username, password, journal_output) + + +@pytest.fixture(name="build_image_qcow2", scope="session") +def build_qcow2_fixture(tmpdir_factory, build_container): + output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + output_path.mkdir(exist_ok=True) + + return build_image(build_container, output_path, "qcow2") + + +@pytest.fixture(name="build_image_ami", scope="session") +def build_ami_fixture(tmpdir_factory, build_container): + output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + output_path.mkdir(exist_ok=True) + + return build_image(build_container, output_path, "ami") def test_container_builds(build_container): @@ -88,18 +105,20 @@ def test_container_builds(build_container): assert build_container in output -def test_image_is_generated(build_image): - assert build_image.img_path.exists(), "output file missing, dir "\ - f"content: {os.listdir(os.fspath(build_image.img_path))}" +def test_image_is_generated(build_image_qcow2, build_image_ami): + for image in [build_image_qcow2, build_image_ami]: + assert image.img_path.exists(), "output file missing, dir "\ + f"content: {os.listdir(os.fspath(image.img_path))}" -def test_image_boots(build_image): - with VM(build_image.img_path) as test_vm: - exit_status, _ = test_vm.run("true", user=build_image.username, password=build_image.password) - assert exit_status == 0 - exit_status, output = test_vm.run("echo hello", user="test", password="password") - assert exit_status == 0 - assert "hello" in output +def test_image_boots(build_image_qcow2, build_image_ami): + for image in [build_image_qcow2, build_image_ami]: + with VM(image.img_path) as test_vm: + exit_status, _ = test_vm.run("true", user=image.username, password=image.password) + assert exit_status == 0 + exit_status, output = test_vm.run("echo hello", user="test", password="password") + assert exit_status == 0 + assert "hello" in output def log_has_osbuild_selinux_denials(log): From 3107ecd7264d02ecb4e78b17b9ac725a63f0b17b Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Wed, 13 Dec 2023 16:49:41 +0100 Subject: [PATCH 014/279] test: keep the cache between builds By creating the /store volume it will persist between builds and the source cache should be reused between the two builds in the tests. --- test/bib/test_smoke.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 4ba17fbb..a1056d7c 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -66,6 +66,7 @@ def build_image(build_container, output_path, image_type): "--privileged", "--security-opt", "label=type:unconfined_t", "-v", f"{output_path}:/output", + "-v", "/store", # share the cache between builds build_container, "quay.io/centos-bootc/fedora-bootc:eln", "--config", "/output/config.json", From 9dbfcda3a4e3d70ddce643a772967f4454035348 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 13 Dec 2023 20:57:35 +0100 Subject: [PATCH 015/279] test: update test_image_build_without_se_linux_denials() --- test/bib/test_smoke.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index a1056d7c..0063282c 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -149,8 +149,9 @@ def has_selinux(): @pytest.mark.skipif(not has_selinux(), reason="selinux not enabled") -def test_image_build_without_se_linux_denials(build_image): - # the journal always contains logs from the image building - assert build_image.journal_output != "" - assert not log_has_osbuild_selinux_denials(build_image.journal_output), \ - f"denials in log {build_image.journal_output}" +def test_image_build_without_se_linux_denials(build_image_qcow2, build_image_ami): + for build_image in [build_image_qcow2, build_image_ami]: + # the journal always contains logs from the image building + assert build_image.journal_output != "" + assert not log_has_osbuild_selinux_denials(build_image.journal_output), \ + f"denials in log {build_image.journal_output}" From 7d57cd5d47ce2951eb928e46692d64b981530fe2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20Sch=C3=BCller?= Date: Thu, 21 Dec 2023 14:58:19 +0100 Subject: [PATCH 016/279] README: General documentation on building and testing --- test/bib/README.md | 15 ++++++++++++--- test/bib/requirements.txt | 3 +++ 2 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 test/bib/requirements.txt diff --git a/test/bib/README.md b/test/bib/README.md index f6092806..bd501281 100644 --- a/test/bib/README.md +++ b/test/bib/README.md @@ -1,12 +1,21 @@ Integration tests for bootc-image-builder ---------------------------------------------- -This directory contans integration tests for bootc-image-builder. +This directory contains integration tests for bootc-image-builder. -They can be run in two ways: -1. On the local machine by just running `sudo pytest -s -v` +They can be run in two ways +1. On the local machine: + By just running `sudo pytest -s -v` in the _top level folder_ of the project (where `Containerfile` is) + If you have set up `pip` only for your user, you might just want to run the test with elevated privileges + `sudo -E $(which pytest) -s -v` 2. Via `tmt` [0] which will spin up a clean VM and run the tests inside: tmt run -vvv [0] https://github.com/teemtee/tmt + +To install `tmt` on fedora at least those packages are needed: + +```shell +sudo dnf install tmt tmt+provision-virtual +``` diff --git a/test/bib/requirements.txt b/test/bib/requirements.txt new file mode 100644 index 00000000..5d0fbeed --- /dev/null +++ b/test/bib/requirements.txt @@ -0,0 +1,3 @@ +pytest +paramiko +flake8 \ No newline at end of file From 3af18a8c315fde362976cb06d5a79e13545d8c45 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 2 Jan 2024 09:49:40 +0100 Subject: [PATCH 017/279] tests: support `netcat-traditional` as well There are multiple variants of netcat with slightly incompatible syntax. The least common denominator seems to be `netcat-traditional` so use that syntax in the tests. --- test/bib/testutil_test.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/bib/testutil_test.py b/test/bib/testutil_test.py index bd887112..f1e8bea4 100644 --- a/test/bib/testutil_test.py +++ b/test/bib/testutil_test.py @@ -28,7 +28,7 @@ def test_wait_ssh_ready_sleeps_no_connection(mocked_sleep, free_port): def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): with contextlib.ExitStack() as cm: p = subprocess.Popen( - f"echo not-ssh | nc -v -l {free_port}", + f"echo not-ssh | nc -v -l -p {free_port}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -37,7 +37,9 @@ def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): cm.callback(p.kill) # wait for nc to be ready while True: - if "Listening " in p.stdout.readline(): + # netcat tranditional uses "listening", others "Listening" + # so just omit the first char + if "istening " in p.stdout.readline(): break # now connect with patch("time.sleep") as mocked_sleep: @@ -50,6 +52,6 @@ def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): @pytest.mark.skipif(not has_executable("nc"), reason="needs nc") def test_wait_ssh_ready_integration(free_port, tmp_path): with contextlib.ExitStack() as cm: - p = subprocess.Popen(f"echo OpenSSH | nc -l {free_port}", shell=True) + p = subprocess.Popen(f"echo OpenSSH | nc -l -p {free_port}", shell=True) cm.callback(p.kill) wait_ssh_ready(free_port, sleep=0.1, max_wait_sec=10) From 0b6c01dbebbc04ddc4b726085e0f4d4267112bed Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 1 Jan 2024 21:25:08 +0100 Subject: [PATCH 018/279] test: use indirect `pytest.mark.parametrize` to build image This commit tweaks the image tests to use indirect fixtures when creating the images. This allows to write something like: ``` sudo pytest-3 -k 'test_image_is_generated[ami]' ``` to select only specific images to test. --- test/bib/test_smoke.py | 102 +++++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 0063282c..1b58d683 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -1,9 +1,9 @@ -import collections import json import os import pathlib import re import subprocess +from typing import NamedTuple import pytest @@ -35,13 +35,47 @@ def build_container_fixture(): return container_tag -def build_image(build_container, output_path, image_type): +# image types to test +SUPPORTED_IMAGE_TYPES = ["qcow2", "ami"] + + +class ImageBuildResult(NamedTuple): + img_path: str + username: str + password: str + journal_output: str + + +@pytest.fixture(name="image_type", scope="session") +def image_type_fixture(tmpdir_factory, build_container, request): """ - Build an image inside the passed build_container and return a - named tuple with the resulting image path and user/password + Build an image inside the passed build_container and return an + ImageBuildResult with the resulting image path and user/password """ + # image_type is passed via special pytest parameter fixture + image_type = request.param + username = "test" password = "password" + + output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + output_path.mkdir(exist_ok=True) + + journal_log_path = output_path / "journal.log" + artifact = { + "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", + "ami": pathlib.Path(output_path) / "image/disk.raw", + } + assert len(artifact) == len(SUPPORTED_IMAGE_TYPES), \ + "please keep artifact mapping and supported images in sync" + generated_img = artifact[image_type] + + # if the fixture already ran and generated an image, use that + if generated_img.exists(): + journal_output = journal_log_path.read_text(encoding="utf8") + return ImageBuildResult(generated_img, username, password, journal_output) + + # no image yet, build it CFG = { "blueprint": { "customizations": { @@ -73,53 +107,31 @@ def build_image(build_container, output_path, image_type): "--type", image_type, ]) journal_output = testutil.journal_after_cursor(cursor) + journal_log_path.write_text(journal_output, encoding="utf8") - artifact = { - "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", - "ami": pathlib.Path(output_path) / "image/disk.raw", - } - generated_img = artifact[image_type] - ImageBuildResult = collections.namedtuple( - "ImageBuildResult", ["img_path", "username", "password", "journal_output"]) return ImageBuildResult(generated_img, username, password, journal_output) -@pytest.fixture(name="build_image_qcow2", scope="session") -def build_qcow2_fixture(tmpdir_factory, build_container): - output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" - output_path.mkdir(exist_ok=True) - - return build_image(build_container, output_path, "qcow2") - - -@pytest.fixture(name="build_image_ami", scope="session") -def build_ami_fixture(tmpdir_factory, build_container): - output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" - output_path.mkdir(exist_ok=True) - - return build_image(build_container, output_path, "ami") - - def test_container_builds(build_container): output = subprocess.check_output([ "podman", "images", "-n", build_container], encoding="utf-8") assert build_container in output -def test_image_is_generated(build_image_qcow2, build_image_ami): - for image in [build_image_qcow2, build_image_ami]: - assert image.img_path.exists(), "output file missing, dir "\ - f"content: {os.listdir(os.fspath(image.img_path))}" +@pytest.mark.parametrize("image_type", SUPPORTED_IMAGE_TYPES, indirect=["image_type"]) +def test_image_is_generated(image_type): + assert image_type.img_path.exists(), "output file missing, dir "\ + f"content: {os.listdir(os.fspath(image_type.img_path))}" -def test_image_boots(build_image_qcow2, build_image_ami): - for image in [build_image_qcow2, build_image_ami]: - with VM(image.img_path) as test_vm: - exit_status, _ = test_vm.run("true", user=image.username, password=image.password) - assert exit_status == 0 - exit_status, output = test_vm.run("echo hello", user="test", password="password") - assert exit_status == 0 - assert "hello" in output +@pytest.mark.parametrize("image_type", SUPPORTED_IMAGE_TYPES, indirect=["image_type"]) +def test_image_boots(image_type): + with VM(image_type.img_path) as test_vm: + exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) + assert exit_status == 0 + exit_status, output = test_vm.run("echo hello", user=image_type.username, password=image_type.password) + assert exit_status == 0 + assert "hello" in output def log_has_osbuild_selinux_denials(log): @@ -149,9 +161,9 @@ def has_selinux(): @pytest.mark.skipif(not has_selinux(), reason="selinux not enabled") -def test_image_build_without_se_linux_denials(build_image_qcow2, build_image_ami): - for build_image in [build_image_qcow2, build_image_ami]: - # the journal always contains logs from the image building - assert build_image.journal_output != "" - assert not log_has_osbuild_selinux_denials(build_image.journal_output), \ - f"denials in log {build_image.journal_output}" +@pytest.mark.parametrize("image_type", SUPPORTED_IMAGE_TYPES, indirect=["image_type"]) +def test_image_build_without_se_linux_denials(image_type): + # the journal always contains logs from the image building + assert image_type.journal_output != "" + assert not log_has_osbuild_selinux_denials(image_type.journal_output), \ + f"denials in log {image_type.journal_output}" From 05469c3793c6c4208a08eed03b527f2a5ceb6cca Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 5 Dec 2023 08:22:51 +0100 Subject: [PATCH 019/279] tests: run integration test on macos as well --- .github.com/workflows/bibtests.yaml | 22 ++++++++++++++++++++ test/bib/requirements.txt | 6 +++--- test/bib/test_smoke.py | 8 +++++--- test/bib/testutil.py | 32 +++++++++++++++++++++++++++-- test/bib/testutil_test.py | 4 +++- test/bib/vm.py | 1 + 6 files changed, 64 insertions(+), 9 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index dad7a8a0..70e1024e 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -76,3 +76,25 @@ jobs: # XDG_RUNTIME_DIR is set. # TODO: figure out what exactly podman needs sudo -E XDG_RUNTIME_DIR= pytest-3 -s -vv + + integration-macos: + name: "Integration macos" + # needed to get latest cpu + runs-on: macos-13 + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Setup up python + uses: actions/setup-python@v5 + with: + cache: 'pip' + - run: pip install -r test/requirements.txt + - name: Setup up podman + run: | + brew install podman netcat + podman machine init --rootful + podman machine start + - name: Run tests + run: | + pytest -rs -s -vv diff --git a/test/bib/requirements.txt b/test/bib/requirements.txt index 5d0fbeed..8f0230e3 100644 --- a/test/bib/requirements.txt +++ b/test/bib/requirements.txt @@ -1,3 +1,3 @@ -pytest -paramiko -flake8 \ No newline at end of file +pytest==7.4.3 +flake8==6.1.0 +paramiko==2.12.0 diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 1b58d683..97c990c1 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -1,6 +1,7 @@ import json import os import pathlib +import platform import re import subprocess from typing import NamedTuple @@ -14,12 +15,12 @@ if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) -if os.getuid() != 0: - pytest.skip("tests require root to run", allow_module_level=True) +if not testutil.can_start_rootful_containers(): + pytest.skip("tests require to be able to run rootful containers (try: sudo)", allow_module_level=True) # building an ELN image needs x86_64-v3 to work, we use avx2 as a proxy # to detect if we have x86-64-v3 (not perfect but should be good enough) -if not testutil.has_x86_64_v3_cpu(): +if platform.system() == "Linux" and platform.machine() == "x86_64" and not testutil.has_x86_64_v3_cpu(): pytest.skip("need x86_64-v3 capable CPU", allow_module_level=True) @@ -124,6 +125,7 @@ def test_image_is_generated(image_type): f"content: {os.listdir(os.fspath(image_type.img_path))}" +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") @pytest.mark.parametrize("image_type", SUPPORTED_IMAGE_TYPES, indirect=["image_type"]) def test_image_boots(image_type): with VM(image_type.img_path) as test_vm: diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 3bd59c84..ae991aed 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -1,18 +1,28 @@ +import os import pathlib +import platform import socket import shutil import subprocess import time +def run_journalctl(*args): + pre = [] + if platform.system() == "Darwin": + pre = ["podman", "machine", "ssh"] + cmd = pre + ["journalctl"] + list(args) + return subprocess.check_output(cmd, encoding="utf-8").strip() + + def journal_cursor(): - output = subprocess.check_output(["journalctl", "-n0", "--show-cursor"], encoding="utf-8").strip() + output = run_journalctl("-n0", "--show-cursor") cursor = output.split("\n")[-1] return cursor.split("cursor: ")[-1] def journal_after_cursor(cursor): - output = subprocess.check_output(["journalctl", f"--after-cursor={cursor}"], encoding="utf8") + output = run_journalctl(f"--after-cursor={cursor}") return output @@ -47,3 +57,21 @@ def has_x86_64_v3_cpu(): # https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels # but "avx2" is probably a good enough proxy return " avx2 " in pathlib.Path("/proc/cpuinfo").read_text() + + +def can_start_rootful_containers(): + match platform.system(): + case "Linux": + # on linux we need to run "podman" with sudo to get full + # root containers + return os.getuid() == 0 + case "Darwin": + # on darwin a container is root if the podman machine runs + # in "rootful" mode, i.e. no need to run "podman" as root + # as it's just proxying to the VM + res = subprocess.run([ + "podman", "machine", "inspect", "--format={{.Rootful}}", + ], capture_output=True, encoding="utf8", check=True) + return res.stdout.strip() == "true" + case unknown: + raise ValueError(f"unknown platform {unknown}") diff --git a/test/bib/testutil_test.py b/test/bib/testutil_test.py index f1e8bea4..9403f8bd 100644 --- a/test/bib/testutil_test.py +++ b/test/bib/testutil_test.py @@ -1,4 +1,5 @@ import contextlib +import platform import subprocess from unittest.mock import call, patch @@ -28,7 +29,7 @@ def test_wait_ssh_ready_sleeps_no_connection(mocked_sleep, free_port): def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): with contextlib.ExitStack() as cm: p = subprocess.Popen( - f"echo not-ssh | nc -v -l -p {free_port}", + f"echo not-ssh | nc -vv -l -p {free_port}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -49,6 +50,7 @@ def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): call(0.1), call(0.1), call(0.1), call(0.1), call(0.1)] +@pytest.mark.skipif(platform.system() == "Darwin", reason="hangs on macOS") @pytest.mark.skipif(not has_executable("nc"), reason="needs nc") def test_wait_ssh_ready_integration(free_port, tmp_path): with contextlib.ExitStack() as cm: diff --git a/test/bib/vm.py b/test/bib/vm.py index 51bb7682..44c691b0 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -11,6 +11,7 @@ class VM: MEM = "2000" + # TODO: support qemu-system-aarch64 too :) QEMU = "qemu-system-x86_64" def __init__(self, img, snapshot=True): From d652759c4fc85d4388d8798342bb6ade4acce767 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 4 Jan 2024 10:48:40 +0100 Subject: [PATCH 020/279] tests: run macos test on self hosted runners For reference the runner was setup as described in: https://github.com/actions/runner/issues/1056#issuecomment-1237426462 to workaround the issue described in the bugreport https://github.com/actions/runner/issues/1056 --- .github.com/workflows/bibtests.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 70e1024e..97fac326 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -79,8 +79,9 @@ jobs: integration-macos: name: "Integration macos" - # needed to get latest cpu - runs-on: macos-13 + # disabled GH runner as it takes ~50min to run this test + #runs-on: macos-13 # needed to get latest cpu + runs-on: self-hosted steps: - uses: actions/checkout@v4 with: From fe4b42e61a1aa7ec392cc710f8a64a3ef30f6fa5 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 4 Jan 2024 10:50:46 +0100 Subject: [PATCH 021/279] workflow: tweak macos setup of self hosted runner --- .github.com/workflows/bibtests.yaml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 97fac326..0e530b31 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -79,7 +79,8 @@ jobs: integration-macos: name: "Integration macos" - # disabled GH runner as it takes ~50min to run this test + # disabled GH runner as it takes ~50min to run this test, self-hosted + # is much faster (~15min) #runs-on: macos-13 # needed to get latest cpu runs-on: self-hosted steps: @@ -90,12 +91,16 @@ jobs: uses: actions/setup-python@v5 with: cache: 'pip' - - run: pip install -r test/requirements.txt + - run: python3 -m pip install -r test/requirements.txt - name: Setup up podman run: | brew install podman netcat - podman machine init --rootful - podman machine start + if ! podman machine inspect; then + podman machine init --rootful + fi + if [ "$(podman machine inspect --format='{{.State}}')" != "running" ]; then + podman machine start + fi - name: Run tests run: | pytest -rs -s -vv From b47fb1147bf42dc4d753d02bf0024685bede1cda Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Tue, 9 Jan 2024 12:27:35 +0100 Subject: [PATCH 022/279] github: add AWS secrets to env vars for integration tests --- .github.com/workflows/bibtests.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 0e530b31..dd8989c0 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -71,6 +71,9 @@ jobs: run: | sudo apt install -y podman python3-pytest python3-paramiko flake8 qemu-system-x86 - name: Run tests + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | # podman needs (parts of) the environment but will break when # XDG_RUNTIME_DIR is set. @@ -102,5 +105,8 @@ jobs: podman machine start fi - name: Run tests + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | pytest -rs -s -vv From 8dce900e0f0708cc455a08f0f54de448955c1243 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 5 Jan 2024 10:47:23 +0100 Subject: [PATCH 023/279] test: add `BIB_TEST_{BUILD,BOOTC}_CONTAINER_TAG` to customize tests There is a desire to resue our tests to also test when new bootc containers are generated that they still build and boot. To do this we need a way to customize the tests further so that: a) we can omit building our code and instead use a "stable" container that contains bootc-image-builder b) we can specificy what bootc (target) image to test so that the bootc building team can point to their staging area This is currently done via environment variables. It might be worth exploring the pytest commandline option support [0] but that can be done in a followup. [0] https://docs.pytest.org/en/7.1.x/example/simple.html --- test/bib/test_smoke.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 97c990c1..03e9fd85 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -27,6 +27,9 @@ @pytest.fixture(name="build_container", scope="session") def build_container_fixture(): """Build a container from the Containerfile and returns the name""" + if tag_from_env := os.getenv("BIB_TEST_BUILD_CONTAINER_TAG"): + return tag_from_env + container_tag = "bootc-image-builder-test" subprocess.check_call([ "podman", "build", @@ -53,6 +56,13 @@ def image_type_fixture(tmpdir_factory, build_container, request): Build an image inside the passed build_container and return an ImageBuildResult with the resulting image path and user/password """ + # TODO: make this another indirect fixture input, e.g. by making + # making "image_type" an "image" tuple (type, container_ref_to_test) + container_to_build_ref = os.getenv( + "BIB_TEST_BOOTC_CONTAINER_TAG", + "quay.io/centos-bootc/fedora-bootc:eln", + ) + # image_type is passed via special pytest parameter fixture image_type = request.param @@ -103,7 +113,7 @@ def image_type_fixture(tmpdir_factory, build_container, request): "-v", f"{output_path}:/output", "-v", "/store", # share the cache between builds build_container, - "quay.io/centos-bootc/fedora-bootc:eln", + container_to_build_ref, "--config", "/output/config.json", "--type", image_type, ]) From 2f3bb0b247f7ba9886df2d2df8116199ba2a73a3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 Jan 2024 12:02:28 -0500 Subject: [PATCH 024/279] ci: Ensure we `apt update` before `apt install` Otherwise we can operate on cached metadata and fail. --- .github.com/workflows/bibtests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index dd8989c0..8d66fc44 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -69,6 +69,7 @@ jobs: uses: actions/setup-python@v5 - name: Install test dependencies run: | + sudo apt update sudo apt install -y podman python3-pytest python3-paramiko flake8 qemu-system-x86 - name: Run tests env: From f7f1635ef5d18252b04f05ca810282083bb00edd Mon Sep 17 00:00:00 2001 From: Simon de Vlieger Date: Wed, 10 Jan 2024 14:57:10 +0100 Subject: [PATCH 025/279] ci: apt update before install --- .github.com/workflows/bibtests.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 8d66fc44..c426468c 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -67,6 +67,8 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - name: Setup up python uses: actions/setup-python@v5 + - name: Apt update + run: sudo apt update - name: Install test dependencies run: | sudo apt update From fe2998a9474857b82e8cfe1031c208b13ca1a007 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 8 Jan 2024 08:53:45 +0100 Subject: [PATCH 026/279] test: make QEMU subclass of VM class Split out the qemu bits from the VM class and make a subclass called QEMU. The base VM class will be subclassed to make more implementations for other virtualisation environments and cloud platforms. --- test/bib/test_smoke.py | 4 +-- test/bib/testutil.py | 6 ++-- test/bib/testutil_test.py | 8 ++--- test/bib/vm.py | 71 +++++++++++++++++++++++++++------------ 4 files changed, 59 insertions(+), 30 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 03e9fd85..f9d3cf60 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -10,7 +10,7 @@ # local test utils import testutil -from vm import VM +from vm import QEMU if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) @@ -138,7 +138,7 @@ def test_image_is_generated(image_type): @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") @pytest.mark.parametrize("image_type", SUPPORTED_IMAGE_TYPES, indirect=["image_type"]) def test_image_boots(image_type): - with VM(image_type.img_path) as test_vm: + with QEMU(image_type.img_path) as test_vm: exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) assert exit_status == 0 exit_status, output = test_vm.run("echo hello", user=image_type.username, password=image_type.password) diff --git a/test/bib/testutil.py b/test/bib/testutil.py index ae991aed..5067c5a9 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -1,8 +1,8 @@ import os import pathlib import platform -import socket import shutil +import socket import subprocess import time @@ -37,12 +37,12 @@ def get_free_port() -> int: return s.getsockname()[1] -def wait_ssh_ready(port, sleep, max_wait_sec): +def wait_ssh_ready(address, port, sleep, max_wait_sec): for i in range(int(max_wait_sec / sleep)): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.settimeout(sleep) try: - s.connect(("localhost", port)) + s.connect((address, port)) data = s.recv(256) if b"OpenSSH" in data: return diff --git a/test/bib/testutil_test.py b/test/bib/testutil_test.py index 9403f8bd..783e0f0a 100644 --- a/test/bib/testutil_test.py +++ b/test/bib/testutil_test.py @@ -5,7 +5,7 @@ import pytest -from testutil import has_executable, get_free_port, wait_ssh_ready +from testutil import get_free_port, has_executable, wait_ssh_ready def test_get_free_port(): @@ -21,7 +21,7 @@ def free_port_fixture(): @patch("time.sleep") def test_wait_ssh_ready_sleeps_no_connection(mocked_sleep, free_port): with pytest.raises(ConnectionRefusedError): - wait_ssh_ready(free_port, sleep=0.1, max_wait_sec=0.35) + wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=0.35) assert mocked_sleep.call_args_list == [call(0.1), call(0.1), call(0.1)] @@ -45,7 +45,7 @@ def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): # now connect with patch("time.sleep") as mocked_sleep: with pytest.raises(ConnectionRefusedError): - wait_ssh_ready(free_port, sleep=0.1, max_wait_sec=0.55) + wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=0.55) assert mocked_sleep.call_args_list == [ call(0.1), call(0.1), call(0.1), call(0.1), call(0.1)] @@ -56,4 +56,4 @@ def test_wait_ssh_ready_integration(free_port, tmp_path): with contextlib.ExitStack() as cm: p = subprocess.Popen(f"echo OpenSSH | nc -l -p {free_port}", shell=True) cm.callback(p.kill) - wait_ssh_ready(free_port, sleep=0.1, max_wait_sec=10) + wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=10) diff --git a/test/bib/vm.py b/test/bib/vm.py index 44c691b0..565d24f8 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -1,33 +1,74 @@ +import abc import pathlib import subprocess import sys from io import StringIO +from paramiko.client import AutoAddPolicy, SSHClient + from testutil import get_free_port, wait_ssh_ready -from paramiko.client import AutoAddPolicy, SSHClient +class VM(abc.ABC): + + def __init__(self): + self._ssh_port = None + self._address = None + + def __del__(self): + self.force_stop() + + @abc.abstractmethod + def start(self): + """ + Start the VM. This method will be called automatically if it is not called explicitly before calling run(). + """ + def _log(self, msg): + # XXX: use a proper logger + sys.stdout.write(msg.rstrip("\n") + "\n") + + def wait_ssh_ready(self): + wait_ssh_ready(self._address, self._ssh_port, sleep=1, max_wait_sec=600) + + @abc.abstractmethod + def force_stop(self): + """ + Stop the VM and clean up any resources that were created when setting up and starting the machine. + """ + + @abc.abstractmethod + def run(self, cmd, user, password): + """ + Run a command on the VM via SSH using the provided credentials. + """ + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, tb): + self.force_stop() + + +class QEMU(VM): -class VM: MEM = "2000" # TODO: support qemu-system-aarch64 too :) QEMU = "qemu-system-x86_64" def __init__(self, img, snapshot=True): + super().__init__() self._img = pathlib.Path(img) self._qemu_p = None - self._ssh_port = None self._snapshot = snapshot - def __del__(self): - self.force_stop() - def start(self): if self._qemu_p is not None: return log_path = self._img.with_suffix(".serial-log") self._ssh_port = get_free_port() + self._address = "localhost" qemu_cmdline = [ self.QEMU, "-enable-kvm", "-m", self.MEM, @@ -51,24 +92,12 @@ def start(self): self.wait_ssh_ready() self._log(f"vm ready at port {self._ssh_port}") - def _log(self, msg): - # XXX: use a proper logger - sys.stdout.write(msg.rstrip("\n") + "\n") - - def wait_ssh_ready(self): - wait_ssh_ready(self._ssh_port, sleep=1, max_wait_sec=600) - def force_stop(self): if self._qemu_p: self._qemu_p.kill() self._qemu_p = None - - def __enter__(self): - self.start() - return self - - def __exit__(self, type, value, tb): - self.force_stop() + self._address = None + self._ssh_port = None def run(self, cmd, user, password): if not self._qemu_p: @@ -76,7 +105,7 @@ def run(self, cmd, user, password): client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy) client.connect( - "localhost", self._ssh_port, user, password, + self._address, self._ssh_port, user, password, allow_agent=False, look_for_keys=False) chan = client.get_transport().open_session() chan.get_pty() From 25eb6ee10305399fc6794aedc478b42edc27fbf9 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 8 Jan 2024 08:58:49 +0100 Subject: [PATCH 027/279] test/vm: rename __exit__ args Linter complains about redefining the `type` built-in and `tb` not conforming to naming style. Renamed to match the names as they are in the docs. --- test/bib/vm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/vm.py b/test/bib/vm.py index 565d24f8..ddf3dbdd 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -47,7 +47,7 @@ def __enter__(self): self.start() return self - def __exit__(self, type, value, tb): + def __exit__(self, exc_type, exc_value, traceback): self.force_stop() From ab2ecab9c7cf08605ae4e62f46d07aa2a1642e6a Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 8 Jan 2024 11:10:21 +0100 Subject: [PATCH 028/279] test: add boto3 to requirements.txt and test deps Add the AWS python SDK to the test requirements. --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/requirements.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index c426468c..a83f9ea3 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -72,7 +72,7 @@ jobs: - name: Install test dependencies run: | sudo apt update - sudo apt install -y podman python3-pytest python3-paramiko flake8 qemu-system-x86 + sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 - name: Run tests env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} diff --git a/test/bib/requirements.txt b/test/bib/requirements.txt index 8f0230e3..55b5dbca 100644 --- a/test/bib/requirements.txt +++ b/test/bib/requirements.txt @@ -1,3 +1,4 @@ pytest==7.4.3 flake8==6.1.0 paramiko==2.12.0 +boto3==1.33.13 From 216d2d94d0ff359ea0dab2a3471fa0f412c88f44 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 8 Jan 2024 13:17:17 +0100 Subject: [PATCH 029/279] vm: make AWS subclass of VM class The AWS class manages a single EC2 instance from a given AMI. Entering the context defined by the class (i.e. with AWS(id) ...) performs the following actions (defined in start()): - Create a security group that allows SSH logins from any IP. - Create an instance from the given AMI ID and include the new security group. - Block until the instance is running. On exit, the instance is terminated and the security group deleted (in that order). Credentials aren't managed by the class. They are assumed to be managed externally (env vars). Based heavily on examples found in the aws sdk docs: https://github.com/awsdocs/aws-doc-sdk-examples/blob/afb3309b385db90e6ff01c0268429cde9610f53e/python/example_code/ec2/ --- test/bib/testutil.py | 2 + test/bib/vm.py | 122 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 123 insertions(+), 1 deletion(-) diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 5067c5a9..b5da8095 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -6,6 +6,8 @@ import subprocess import time +AWS_REGION = "us-east-1" + def run_journalctl(*args): pre = [] diff --git a/test/bib/vm.py b/test/bib/vm.py index ddf3dbdd..e65efeca 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -2,11 +2,14 @@ import pathlib import subprocess import sys +import uuid from io import StringIO +import boto3 +from botocore.exceptions import ClientError from paramiko.client import AutoAddPolicy, SSHClient -from testutil import get_free_port, wait_ssh_ready +from testutil import AWS_REGION, get_free_port, wait_ssh_ready class VM(abc.ABC): @@ -120,3 +123,120 @@ def run(self, cmd, user, password): output.write(out) exit_status = stdout_f.channel.recv_exit_status() return exit_status, output.getvalue() + + +class AWS(VM): + + _instance_type = "t3.medium" # set based on architecture when we add arm tests + + def __init__(self, ami_id): + super().__init__() + self._ssh_port = 22 + self._ami_id = ami_id + self._ec2_instance = None + self._ec2_security_group = None + self._ec2_resource = boto3.resource("ec2", region_name=AWS_REGION) + + def start(self): + sec_group_ids = [] + if not self._ec2_security_group: + self._set_ssh_security_group() + sec_group_ids = [self._ec2_security_group.id] + try: + self._log(f"Creating ec2 instance from {self._ami_id}") + instances = self._ec2_resource.create_instances( + ImageId=self._ami_id, + InstanceType=self._instance_type, + SecurityGroupIds=sec_group_ids, + MinCount=1, MaxCount=1 + ) + self._ec2_instance = instances[0] + self._log(f"Waiting for instance {self._ec2_instance.id} to start") + self._ec2_instance.wait_until_running() + self._ec2_instance.reload() # make sure the instance info is up to date + self._address = self._ec2_instance.public_ip_address + self._log("Instance is running") + self.wait_ssh_ready() + self._log("SSH is ready") + except ClientError as err: + err_code = err.response["Error"]["Code"] + err_msg = err.response["Error"]["Message"] + self._log(f"Couldn't create instance with image {self._ami_id} and type {self._instance_type}.") + self._log(f"Error {err_code}: {err_msg}") + raise + + def _set_ssh_security_group(self): + group_name = f"bootc-image-builder-test-{str(uuid.uuid4())}" + group_desc = "bootc-image-builder test security group: SSH rule" + try: + self._log(f"Creating security group {group_name}") + self._ec2_security_group = self._ec2_resource.create_security_group(GroupName=group_name, + Description=group_desc) + ip_permissions = [ + { + "IpProtocol": "tcp", + "FromPort": self._ssh_port, + "ToPort": self._ssh_port, + "IpRanges": [{"CidrIp": "0.0.0.0/0"}], + } + ] + self._log(f"Authorizing inbound rule for {group_name} ({self._ec2_security_group})") + self._ec2_security_group.authorize_ingress(IpPermissions=ip_permissions) + self._log("Security group created") + except ClientError as err: + err_code = err.response["Error"]["Code"] + err_msg = err.response["Error"]["Message"] + self._log(f"Couldn't create security group {group_name} or authorize inbound rule.") + self._log(f"Error {err_code}: {err_msg}") + raise + + def force_stop(self): + if self._ec2_instance: + self._log(f"Terminating instance {self._ec2_instance.id}") + try: + self._ec2_instance.terminate() + self._ec2_instance.wait_until_terminated() + self._ec2_instance = None + self._address = None + except ClientError as err: + err_code = err.response["Error"]["Code"] + err_msg = err.response["Error"]["Message"] + self._log(f"Couldn't terminate instance {self._ec2_instance.id}.") + self._log(f"Error {err_code}: {err_msg}") + else: + self._log("No EC2 instance defined. Skipping termination.") + + if self._ec2_security_group: + self._log(f"Deleting security group {self._ec2_security_group.id}") + try: + self._ec2_security_group.delete() + self._ec2_security_group = None + except ClientError as err: + err_code = err.response["Error"]["Code"] + err_msg = err.response["Error"]["Message"] + self._log(f"Couldn't delete security group {self._ec2_security_group.id}.") + self._log(f"Error {err_code}: {err_msg}") + else: + self._log("No security group defined. Skipping deletion.") + + def run(self, cmd, user, password): + if not self._ec2_instance: + self.start() + client = SSHClient() + client.set_missing_host_key_policy(AutoAddPolicy) + client.connect( + self._address, self._ssh_port, user, password, + allow_agent=False, look_for_keys=False) + chan = client.get_transport().open_session() + chan.get_pty() + chan.exec_command(cmd) + stdout_f = chan.makefile() + output = StringIO() + while True: + out = stdout_f.readline() + if not out: + break + self._log(out) + output.write(out) + exit_status = stdout_f.channel.recv_exit_status() + return exit_status, output.getvalue() From 4a4f4b3622edd7caf892326f0370e81033fba86d Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Tue, 9 Jan 2024 14:43:57 +0100 Subject: [PATCH 030/279] test: unify vm.run() implementations Add a new running() method to the base class so that each subclass can define their own condition for determining if the VM is running. This and the IP address were the only things that differed in the two run() implementations. --- test/bib/vm.py | 77 ++++++++++++++++++++++---------------------------- 1 file changed, 33 insertions(+), 44 deletions(-) diff --git a/test/bib/vm.py b/test/bib/vm.py index e65efeca..7a8a6674 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -40,11 +40,36 @@ def force_stop(self): Stop the VM and clean up any resources that were created when setting up and starting the machine. """ - @abc.abstractmethod def run(self, cmd, user, password): """ Run a command on the VM via SSH using the provided credentials. """ + if not self.running(): + self.start() + client = SSHClient() + client.set_missing_host_key_policy(AutoAddPolicy) + client.connect( + self._address, self._ssh_port, user, password, + allow_agent=False, look_for_keys=False) + chan = client.get_transport().open_session() + chan.get_pty() + chan.exec_command(cmd) + stdout_f = chan.makefile() + output = StringIO() + while True: + out = stdout_f.readline() + if not out: + break + self._log(out) + output.write(out) + exit_status = stdout_f.channel.recv_exit_status() + return exit_status, output.getvalue() + + @abc.abstractmethod + def running(self): + """ + True if the VM is running. + """ def __enter__(self): self.start() @@ -67,7 +92,7 @@ def __init__(self, img, snapshot=True): self._snapshot = snapshot def start(self): - if self._qemu_p is not None: + if self.running(): return log_path = self._img.with_suffix(".serial-log") self._ssh_port = get_free_port() @@ -102,27 +127,8 @@ def force_stop(self): self._address = None self._ssh_port = None - def run(self, cmd, user, password): - if not self._qemu_p: - self.start() - client = SSHClient() - client.set_missing_host_key_policy(AutoAddPolicy) - client.connect( - self._address, self._ssh_port, user, password, - allow_agent=False, look_for_keys=False) - chan = client.get_transport().open_session() - chan.get_pty() - chan.exec_command(cmd) - stdout_f = chan.makefile() - output = StringIO() - while True: - out = stdout_f.readline() - if not out: - break - self._log(out) - output.write(out) - exit_status = stdout_f.channel.recv_exit_status() - return exit_status, output.getvalue() + def running(self): + return self._qemu_p is not None class AWS(VM): @@ -138,6 +144,8 @@ def __init__(self, ami_id): self._ec2_resource = boto3.resource("ec2", region_name=AWS_REGION) def start(self): + if self.running(): + return sec_group_ids = [] if not self._ec2_security_group: self._set_ssh_security_group() @@ -219,24 +227,5 @@ def force_stop(self): else: self._log("No security group defined. Skipping deletion.") - def run(self, cmd, user, password): - if not self._ec2_instance: - self.start() - client = SSHClient() - client.set_missing_host_key_policy(AutoAddPolicy) - client.connect( - self._address, self._ssh_port, user, password, - allow_agent=False, look_for_keys=False) - chan = client.get_transport().open_session() - chan.get_pty() - chan.exec_command(cmd) - stdout_f = chan.makefile() - output = StringIO() - while True: - out = stdout_f.readline() - if not out: - break - self._log(out) - output.write(out) - exit_status = stdout_f.channel.recv_exit_status() - return exit_status, output.getvalue() + def running(self): + return self._ec2_instance is not None From 0e91b437aca0257a4be05268394cce3d972262c1 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 8 Jan 2024 18:23:46 +0100 Subject: [PATCH 031/279] testutil: AWS utility functions write_aws_creds(): writes the AWS credentials defined in the environment to a file in the format required by the AWS SDK. deregister_ami(): deregister an image from EC2 given its ID. --- test/bib/test_smoke.py | 4 +++- test/bib/testutil.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index f9d3cf60..c768bc03 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -4,13 +4,15 @@ import platform import re import subprocess +import tempfile +import uuid from typing import NamedTuple import pytest # local test utils import testutil -from vm import QEMU +from vm import AWS, QEMU if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) diff --git a/test/bib/testutil.py b/test/bib/testutil.py index b5da8095..26c8f43e 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -6,6 +6,9 @@ import subprocess import time +import boto3 +from botocore.exceptions import ClientError + AWS_REGION = "us-east-1" @@ -77,3 +80,28 @@ def can_start_rootful_containers(): return res.stdout.strip() == "true" case unknown: raise ValueError(f"unknown platform {unknown}") + + +def write_aws_creds(path): + key_id = os.environ.get("AWS_ACCESS_KEY_ID") + secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY") + if not key_id or not secret_key: + raise RuntimeError("aws credentials not available") + with open(path, mode="w", encoding="utf-8") as creds_file: + creds_file.write("[default]\n") + creds_file.write(f"aws_access_key_id = {key_id}\n") + creds_file.write(f"aws_secret_access_key = {secret_key}\n") + + +def deregister_ami(ami_id): + ec2 = boto3.resource("ec2", region_name=AWS_REGION) + try: + print(f"Deregistering image {ami_id}") + ami = ec2.Image(ami_id) + ami.deregister() + print("Image deregistered") + except ClientError as err: + err_code = err.response["Error"]["Code"] + err_msg = err.response["Error"]["Message"] + print(f"Couldn't deregister image {ami_id}.") + print(f"Error {err_code}: {err_msg}") From 2d1a62dffbcb260677b898092093520858650bbe Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 8 Jan 2024 18:47:06 +0100 Subject: [PATCH 032/279] test/image_type_fixture: add upload options when building AMI When building AMIs, include upload options and credentials: - the `--aws-...` options are added as upload_args to the build command. - credentials are read from the environment, written to a temporary file, and mounted into the container. The environment variables will be set through the CI config. --- test/bib/test_smoke.py | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index c768bc03..c25018c2 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -107,18 +107,36 @@ def image_type_fixture(tmpdir_factory, build_container, request): config_json_path.write_text(json.dumps(CFG), encoding="utf-8") cursor = testutil.journal_cursor() - # run container to deploy an image into output/qcow2/disk.qcow2 - subprocess.check_call([ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", - "-v", f"{output_path}:/output", - "-v", "/store", # share the cache between builds - build_container, - container_to_build_ref, - "--config", "/output/config.json", - "--type", image_type, - ]) + + upload_args = [] + creds_args = [] + with tempfile.TemporaryDirectory() as tempdir: + if image_type == "ami": + upload_args = [ + f"--aws-ami-name=bootc-image-builder-test-{str(uuid.uuid4())}", + f"--aws-region={testutil.AWS_REGION}", + "--aws-bucket=bootc-image-builder-ci", + ] + + creds_file = pathlib.Path(tempdir) / "aws.creds" + testutil.write_aws_creds(creds_file) + creds_args = ["-v", f"{creds_file}:/root/.aws/credentials:ro", + "--env", "AWS_PROFILE=default"] + + # run container to deploy an image into a bootable disk and upload to a cloud service if applicable + subprocess.check_call([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{output_path}:/output", + "-v", "/store", # share the cache between builds + *creds_args, + build_container, + container_to_build_ref, + "--config", "/output/config.json", + "--type", image_type, + *upload_args, + ]) journal_output = testutil.journal_after_cursor(cursor) journal_log_path.write_text(journal_output, encoding="utf8") From a110f32dc1ca8296118a721d57562df10fe3cf80 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 8 Jan 2024 20:20:07 +0100 Subject: [PATCH 033/279] test: add image type and metadata to ImageBuildResult Add the image type name and a metadata dict to the ImageBuildResult. We will use the image type to decide what we can do with the build result (like where and how to boot it). The metadata can be used to communicate information to downstream tests. Here we use it to include the AMI ID in the result. We could have let the tests that use the ImageBuildResult parse the log for the AMI ID, but it's probably better if we only have to do it once and also, parsing it in the image_type_fixture() lets us register the finalizer that will deregister the AMI from EC2. Parsing the log for the AMI ID is a bit dirty and is certainly not the most robust way to get it. We could later print it in a more machine-readable format. --- test/bib/test_smoke.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index c25018c2..5add63dc 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -46,10 +46,12 @@ def build_container_fixture(): class ImageBuildResult(NamedTuple): + img_type: str img_path: str username: str password: str journal_output: str + metadata: dict = {} @pytest.fixture(name="image_type", scope="session") @@ -86,7 +88,7 @@ def image_type_fixture(tmpdir_factory, build_container, request): # if the fixture already ran and generated an image, use that if generated_img.exists(): journal_output = journal_log_path.read_text(encoding="utf8") - return ImageBuildResult(generated_img, username, password, journal_output) + return ImageBuildResult(image_type, generated_img, username, password, journal_output) # no image yet, build it CFG = { @@ -138,9 +140,17 @@ def image_type_fixture(tmpdir_factory, build_container, request): *upload_args, ]) journal_output = testutil.journal_after_cursor(cursor) + metadata = {} + if image_type == "ami": + metadata["ami_id"] = parse_ami_id_from_log(journal_output) + + def del_ami(): + testutil.deregister_ami(metadata["ami_id"]) + request.addfinalizer(del_ami) + journal_log_path.write_text(journal_output, encoding="utf8") - return ImageBuildResult(generated_img, username, password, journal_output) + return ImageBuildResult(image_type, generated_img, username, password, journal_output, metadata) def test_container_builds(build_container): @@ -171,6 +181,13 @@ def log_has_osbuild_selinux_denials(log): return re.search(OSBUID_SELINUX_DENIALS_RE, log) +def parse_ami_id_from_log(log_output): + ami_id_re = re.compile(r"AMI registered: (?Pami-[a-z0-9]+)\n") + ami_ids = ami_id_re.findall(log_output) + assert len(ami_ids) > 0 + return ami_ids[0] + + def test_osbuild_selinux_denials_re_works(): fake_log = ( 'Dec 05 07:19:39 other log msg\n' From f60848d1fd3ac2bd8f058351887e9d6e5ea6dfdb Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 8 Jan 2024 20:27:28 +0100 Subject: [PATCH 034/279] test: add test_ami_boots_in_aws() A variant of the test_image_boots() test that only runs for the AMI image type. The function uses the AWS VM class to boot an AMI (given the AMI ID), run a couple of commands over SSH, then tears it down. As opposed to the local boot test, this one also runs on macOS. --- test/bib/test_smoke.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 5add63dc..27a022a3 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -176,6 +176,16 @@ def test_image_boots(image_type): assert "hello" in output +@pytest.mark.parametrize("image_type", ["ami"], indirect=["image_type"]) +def test_ami_boots_in_aws(image_type): + with AWS(image_type.metadata["ami_id"]) as test_vm: + exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) + assert exit_status == 0 + exit_status, output = test_vm.run("echo hello", user=image_type.username, password=image_type.password) + assert exit_status == 0 + assert "hello" in output + + def log_has_osbuild_selinux_denials(log): OSBUID_SELINUX_DENIALS_RE = re.compile(r"(?ms)avc:\ +denied.*osbuild") return re.search(OSBUID_SELINUX_DENIALS_RE, log) From 50ca5e5abd4089ff16a503b347f8f9c64a0acbb0 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Tue, 9 Jan 2024 17:02:54 +0100 Subject: [PATCH 035/279] test: skip or fail if AWS creds not set Add a new pytest flag, --force-aws-upload, which controls how the AWS build (and boot) test should behave when the AWS credentials aren't set: - if set, the image build will raise a RuntimeError, making any tests that depend on the fixture fail. - if not set, the upload flags will not be set and the AWS boot test is skipped. --- test/bib/conftest.py | 12 ++++++++++++ test/bib/test_smoke.py | 34 ++++++++++++++++++++++------------ test/bib/testutil.py | 5 ++++- 3 files changed, 38 insertions(+), 13 deletions(-) create mode 100644 test/bib/conftest.py diff --git a/test/bib/conftest.py b/test/bib/conftest.py new file mode 100644 index 00000000..faa51f96 --- /dev/null +++ b/test/bib/conftest.py @@ -0,0 +1,12 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption("--force-aws-upload", action="store_true", default=False, + help=("Force AWS upload when building AMI, failing if credentials are not set. " + "If not set, the upload will be performed only when credentials are available.")) + + +@pytest.fixture(name="force_aws_upload", scope="session") +def force_aws_upload_fixture(request): + return request.config.getoption("--force-aws-upload") diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 27a022a3..51d21a62 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -55,7 +55,7 @@ class ImageBuildResult(NamedTuple): @pytest.fixture(name="image_type", scope="session") -def image_type_fixture(tmpdir_factory, build_container, request): +def image_type_fixture(tmpdir_factory, build_container, request, force_aws_upload): """ Build an image inside the passed build_container and return an ImageBuildResult with the resulting image path and user/password @@ -112,18 +112,22 @@ def image_type_fixture(tmpdir_factory, build_container, request): upload_args = [] creds_args = [] + with tempfile.TemporaryDirectory() as tempdir: if image_type == "ami": - upload_args = [ - f"--aws-ami-name=bootc-image-builder-test-{str(uuid.uuid4())}", - f"--aws-region={testutil.AWS_REGION}", - "--aws-bucket=bootc-image-builder-ci", - ] - creds_file = pathlib.Path(tempdir) / "aws.creds" - testutil.write_aws_creds(creds_file) - creds_args = ["-v", f"{creds_file}:/root/.aws/credentials:ro", - "--env", "AWS_PROFILE=default"] + if testutil.write_aws_creds(creds_file): + creds_args = ["-v", f"{creds_file}:/root/.aws/credentials:ro", + "--env", "AWS_PROFILE=default"] + + upload_args = [ + f"--aws-ami-name=bootc-image-builder-test-{str(uuid.uuid4())}", + f"--aws-region={testutil.AWS_REGION}", + "--aws-bucket=bootc-image-builder-ci", + ] + elif force_aws_upload: + # upload forced but credentials aren't set + raise RuntimeError("AWS credentials not available (upload forced)") # run container to deploy an image into a bootable disk and upload to a cloud service if applicable subprocess.check_call([ @@ -141,7 +145,7 @@ def image_type_fixture(tmpdir_factory, build_container, request): ]) journal_output = testutil.journal_after_cursor(cursor) metadata = {} - if image_type == "ami": + if image_type == "ami" and upload_args: metadata["ami_id"] = parse_ami_id_from_log(journal_output) def del_ami(): @@ -177,7 +181,13 @@ def test_image_boots(image_type): @pytest.mark.parametrize("image_type", ["ami"], indirect=["image_type"]) -def test_ami_boots_in_aws(image_type): +def test_ami_boots_in_aws(image_type, force_aws_upload): + if not testutil.write_aws_creds("/dev/null"): # we don't care about the file, just the variables being there + if force_aws_upload: + # upload forced but credentials aren't set + raise RuntimeError("AWS credentials not available") + pytest.skip("AWS credentials not available (upload not forced)") + with AWS(image_type.metadata["ami_id"]) as test_vm: exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) assert exit_status == 0 diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 26c8f43e..3029d93d 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -86,12 +86,15 @@ def write_aws_creds(path): key_id = os.environ.get("AWS_ACCESS_KEY_ID") secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY") if not key_id or not secret_key: - raise RuntimeError("aws credentials not available") + return False + with open(path, mode="w", encoding="utf-8") as creds_file: creds_file.write("[default]\n") creds_file.write(f"aws_access_key_id = {key_id}\n") creds_file.write(f"aws_secret_access_key = {secret_key}\n") + return True + def deregister_ami(ami_id): ec2 = boto3.resource("ec2", region_name=AWS_REGION) From 355a81ad91b19433e3e93b9e1647c37c8463e2b4 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Wed, 10 Jan 2024 12:59:05 +0100 Subject: [PATCH 036/279] test: enable force-aws-upload for testing farm tests Add the AWS secrets to the testing farm action and enable `force-aws-upload` on the pytest call in the testing farm plan. Run these only in testing farm which runs on pull_request_target and can access GitHub secrets. --- test/bib/vm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/vm.py b/test/bib/vm.py index 7a8a6674..d22a3902 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -163,7 +163,7 @@ def start(self): self._ec2_instance.wait_until_running() self._ec2_instance.reload() # make sure the instance info is up to date self._address = self._ec2_instance.public_ip_address - self._log("Instance is running") + self._log(f"Instance is running at {self._address}") self.wait_ssh_ready() self._log("SSH is ready") except ClientError as err: From 0172e001fb3f36b6f00dffa44a04af979f8f78cb Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 10 Jan 2024 17:18:13 +0100 Subject: [PATCH 037/279] bib: add "raw" iamge format --- test/bib/test_smoke.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/bib/test_smoke.py b/test/bib/test_smoke.py index 51d21a62..e6bf47c6 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_smoke.py @@ -42,7 +42,7 @@ def build_container_fixture(): # image types to test -SUPPORTED_IMAGE_TYPES = ["qcow2", "ami"] +SUPPORTED_IMAGE_TYPES = ["qcow2", "ami", "raw"] class ImageBuildResult(NamedTuple): @@ -80,6 +80,7 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa artifact = { "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", "ami": pathlib.Path(output_path) / "image/disk.raw", + "raw": pathlib.Path(output_path) / "image/disk.raw", } assert len(artifact) == len(SUPPORTED_IMAGE_TYPES), \ "please keep artifact mapping and supported images in sync" From 1d7561689a1c02a2e4a2143493e3257e24a26b62 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 11 Jan 2024 10:41:17 +0100 Subject: [PATCH 038/279] bib: implemnt `bootc-image-builder manifest` Now that we have a `bootc-image-builder build` verb we can have a `manifest` one as well that just generates the manifest but does not do the full building. This is mostly useful for testing so it's not exposed by default in the entrypoint (for now). --- test/bib/containerbuild.py | 28 +++++++++++++++++++++++ test/bib/{test_smoke.py => test_build.py} | 25 ++------------------ test/bib/test_manifest.py | 24 +++++++++++++++++++ 3 files changed, 54 insertions(+), 23 deletions(-) create mode 100644 test/bib/containerbuild.py rename test/bib/{test_smoke.py => test_build.py} (91%) create mode 100644 test/bib/test_manifest.py diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py new file mode 100644 index 00000000..96b24fc3 --- /dev/null +++ b/test/bib/containerbuild.py @@ -0,0 +1,28 @@ +import os +import subprocess + +import pytest + + +@pytest.fixture(name="build_container", scope="session") +def build_container_fixture(): + """Build a container from the Containerfile and returns the name""" + if tag_from_env := os.getenv("BIB_TEST_BUILD_CONTAINER_TAG"): + return tag_from_env + + container_tag = "bootc-image-builder-test" + subprocess.check_call([ + "podman", "build", + "-f", "Containerfile", + "-t", container_tag, + ]) + return container_tag + + +def container_to_build_ref(): + # TODO: make this another indirect fixture input, e.g. by making + # making "image_type" an "image" tuple (type, container_ref_to_test) + return os.getenv( + "BIB_TEST_BOOTC_CONTAINER_TAG", + "quay.io/centos-bootc/fedora-bootc:eln", + ) diff --git a/test/bib/test_smoke.py b/test/bib/test_build.py similarity index 91% rename from test/bib/test_smoke.py rename to test/bib/test_build.py index e6bf47c6..99a799d4 100644 --- a/test/bib/test_smoke.py +++ b/test/bib/test_build.py @@ -12,6 +12,7 @@ # local test utils import testutil +from containerbuild import build_container_fixture, container_to_build_ref # noqa: F401 from vm import AWS, QEMU if not testutil.has_executable("podman"): @@ -26,21 +27,6 @@ pytest.skip("need x86_64-v3 capable CPU", allow_module_level=True) -@pytest.fixture(name="build_container", scope="session") -def build_container_fixture(): - """Build a container from the Containerfile and returns the name""" - if tag_from_env := os.getenv("BIB_TEST_BUILD_CONTAINER_TAG"): - return tag_from_env - - container_tag = "bootc-image-builder-test" - subprocess.check_call([ - "podman", "build", - "-f", "Containerfile", - "-t", container_tag, - ]) - return container_tag - - # image types to test SUPPORTED_IMAGE_TYPES = ["qcow2", "ami", "raw"] @@ -60,13 +46,6 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa Build an image inside the passed build_container and return an ImageBuildResult with the resulting image path and user/password """ - # TODO: make this another indirect fixture input, e.g. by making - # making "image_type" an "image" tuple (type, container_ref_to_test) - container_to_build_ref = os.getenv( - "BIB_TEST_BOOTC_CONTAINER_TAG", - "quay.io/centos-bootc/fedora-bootc:eln", - ) - # image_type is passed via special pytest parameter fixture image_type = request.param @@ -139,7 +118,7 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa "-v", "/store", # share the cache between builds *creds_args, build_container, - container_to_build_ref, + container_to_build_ref(), "--config", "/output/config.json", "--type", image_type, *upload_args, diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py new file mode 100644 index 00000000..3f9640ed --- /dev/null +++ b/test/bib/test_manifest.py @@ -0,0 +1,24 @@ +import json +import subprocess + +import pytest + +import testutil + + +if not testutil.has_executable("podman"): + pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) + +from containerbuild import build_container_fixture, container_to_build_ref # noqa: F401 + + +def test_manifest_smoke(build_container): + output = subprocess.check_output([ + "podman", "run", "--rm", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_to_build_ref()}"]', + build_container, + ]) + manifest = json.loads(output) + # just some basic validation + assert manifest["version"] == "2" + assert manifest["pipelines"][0]["name"] == "build" From 86a15a3d562d060a7f2517320b87ba29adfd9e63 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 18 Jan 2024 13:33:07 +0100 Subject: [PATCH 039/279] testutil: remove "match" for centos8/9 compatbility Support for `match` in python is only available in version 3.10. However centos/rhel 8 and 9 have lower versions so this will need to be moved to `if/elif/else` for now. --- test/bib/testutil.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 3029d93d..732c921b 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -65,21 +65,21 @@ def has_x86_64_v3_cpu(): def can_start_rootful_containers(): - match platform.system(): - case "Linux": - # on linux we need to run "podman" with sudo to get full - # root containers - return os.getuid() == 0 - case "Darwin": - # on darwin a container is root if the podman machine runs - # in "rootful" mode, i.e. no need to run "podman" as root - # as it's just proxying to the VM - res = subprocess.run([ - "podman", "machine", "inspect", "--format={{.Rootful}}", - ], capture_output=True, encoding="utf8", check=True) - return res.stdout.strip() == "true" - case unknown: - raise ValueError(f"unknown platform {unknown}") + system = platform.system() + if system == "Linux": + # on linux we need to run "podman" with sudo to get full + # root containers + return os.getuid() == 0 + elif system == "Darwin": + # on darwin a container is root if the podman machine runs + # in "rootful" mode, i.e. no need to run "podman" as root + # as it's just proxying to the VM + res = subprocess.run([ + "podman", "machine", "inspect", "--format={{.Rootful}}", + ], capture_output=True, encoding="utf8", check=True) + return res.stdout.strip() == "true" + else: + raise ValueError(f"unknown platform {system}") def write_aws_creds(path): From acfbb2f17d80662ecf07255b2b2c6c7e2648974b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 23 Jan 2024 15:12:25 +0100 Subject: [PATCH 040/279] test: use last known goodl quay.io/centos-bootc/fedora-bootc in tests Because of the issues with the latest https://github.com/CentOS/centos-bootc/issues/184 and https://github.com/osbuild/bootc-image-builder/issues/149 with the latest quay.io/centos-bootc/fedora-bootc:eln this commit moves to the last known good container id. --- test/bib/containerbuild.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 96b24fc3..7a7f705c 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -24,5 +24,9 @@ def container_to_build_ref(): # making "image_type" an "image" tuple (type, container_ref_to_test) return os.getenv( "BIB_TEST_BOOTC_CONTAINER_TAG", - "quay.io/centos-bootc/fedora-bootc:eln", + # using this tag instead of ":eln" until + # https://github.com/CentOS/centos-bootc/issues/184 and + # https://github.com/osbuild/bootc-image-builder/issues/149 + # are fixed + "quay.io/centos-bootc/fedora-bootc:ed19452a30c50900be0b78db5f68d9826cc14a2e402f752535716cffd92b4445", ) From 633249e68f4009d77390dda293ce3ae2ea51224b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 22 Jan 2024 21:48:15 +0100 Subject: [PATCH 041/279] test: add new `pytest.ini` and use `--basetemp=/var/tmp` by default Our test images can easily be 10GB and if /tmp is mounted via a tmpfs that can be too much for low memory systems. Hence switch to /var/tmp by default. Note that we do not really need 10GB as it's a sparse files but if /tmp is too small we still get ENOSPC. --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index a83f9ea3..baaf0bea 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -112,4 +112,4 @@ jobs: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | - pytest -rs -s -vv + pytest -rs -s -vv --basetemp="${TMPDIR}/tmp" From db0791023ed7b4ffdf123ddae42314dc7166f97b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 11 Jan 2024 12:57:18 +0100 Subject: [PATCH 042/279] test: add container installer test Add a new integration test that checks that the container installer is working. The contaner installer just does an unattended install of a disk. The test will run qemu with the installer.iso and an empty disk. Once a reboot from the ISO is detected (via QMP) qemu exists and boots the test disk and checks that it boots and the test user can login. --- .github.com/workflows/bibtests.yaml | 2 + test/bib/requirements.txt | 1 + test/bib/test_build.py | 25 ++++++++++- test/bib/vm.py | 69 +++++++++++++++++++++++++---- 4 files changed, 87 insertions(+), 10 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index baaf0bea..f27ded36 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -78,6 +78,8 @@ jobs: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | + # make sure test deps are available for root + sudo -E pip install --user -r test/requirements.txt # podman needs (parts of) the environment but will break when # XDG_RUNTIME_DIR is set. # TODO: figure out what exactly podman needs diff --git a/test/bib/requirements.txt b/test/bib/requirements.txt index 55b5dbca..3d6c5505 100644 --- a/test/bib/requirements.txt +++ b/test/bib/requirements.txt @@ -2,3 +2,4 @@ pytest==7.4.3 flake8==6.1.0 paramiko==2.12.0 boto3==1.33.13 +qmp==1.1.0 diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 99a799d4..1364fe27 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -28,7 +28,9 @@ # image types to test -SUPPORTED_IMAGE_TYPES = ["qcow2", "ami", "raw"] +DIRECT_BOOT_IMAGE_TYPES = ["qcow2", "ami", "raw"] +INSTALLER_IMAGE_TYPES = ["iso"] +SUPPORTED_IMAGE_TYPES = DIRECT_BOOT_IMAGE_TYPES + INSTALLER_IMAGE_TYPES class ImageBuildResult(NamedTuple): @@ -60,6 +62,7 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", "ami": pathlib.Path(output_path) / "image/disk.raw", "raw": pathlib.Path(output_path) / "image/disk.raw", + "iso": pathlib.Path(output_path) / "bootiso/install.iso", } assert len(artifact) == len(SUPPORTED_IMAGE_TYPES), \ "please keep artifact mapping and supported images in sync" @@ -150,7 +153,7 @@ def test_image_is_generated(image_type): @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") -@pytest.mark.parametrize("image_type", SUPPORTED_IMAGE_TYPES, indirect=["image_type"]) +@pytest.mark.parametrize("image_type", DIRECT_BOOT_IMAGE_TYPES, indirect=["image_type"]) def test_image_boots(image_type): with QEMU(image_type.img_path) as test_vm: exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) @@ -216,3 +219,21 @@ def test_image_build_without_se_linux_denials(image_type): assert image_type.journal_output != "" assert not log_has_osbuild_selinux_denials(image_type.journal_output), \ f"denials in log {image_type.journal_output}" + + +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") +@pytest.mark.parametrize("image_type", INSTALLER_IMAGE_TYPES, indirect=["image_type"]) +def test_iso_installs(image_type): + installer_iso_path = image_type.img_path + test_disk_path = installer_iso_path.with_name("test-disk.img") + with open(test_disk_path, "w") as fp: + fp.truncate(10_1000_1000_1000) + # install to test disk + with QEMU(test_disk_path, cdrom=installer_iso_path) as vm: + vm.start(wait_event="qmp:RESET", snapshot=False, use_ovmf=True) + vm.force_stop() + # boot test disk and do extremly simple check + with QEMU(test_disk_path) as vm: + vm.start(use_ovmf=True) + exit_status, _ = vm.run("true", user=image_type.username, password=image_type.password) + assert exit_status == 0 diff --git a/test/bib/vm.py b/test/bib/vm.py index d22a3902..3a558690 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -1,7 +1,9 @@ import abc +import os import pathlib import subprocess import sys +import time import uuid from io import StringIO @@ -72,26 +74,42 @@ def running(self): """ def __enter__(self): - self.start() return self def __exit__(self, exc_type, exc_value, traceback): self.force_stop() -class QEMU(VM): +# needed as each distro puts the OVMF.fd in a different location +def find_ovmf(): + for p in [ + "/usr/share/ovmf/OVMF.fd", # Debian + "/usr/share/OVMF/OVMF_CODE.fd", # Fedora + ]: + if os.path.exists(p): + return p + raise ValueError("cannot find a OVMF bios") + +class QEMU(VM): MEM = "2000" # TODO: support qemu-system-aarch64 too :) QEMU = "qemu-system-x86_64" - def __init__(self, img, snapshot=True): + def __init__(self, img, snapshot=True, cdrom=None): super().__init__() self._img = pathlib.Path(img) + self._qmp_socket = self._img.with_suffix(".qemp-socket") self._qemu_p = None self._snapshot = snapshot + self._cdrom = cdrom + self._ssh_port = None - def start(self): + def __del__(self): + self.force_stop() + + # XXX: move args to init() so that __enter__ can use them? + def start(self, wait_event="ssh", snapshot=True, use_ovmf=False): if self.running(): return log_path = self._img.with_suffix(".serial-log") @@ -107,18 +125,53 @@ def start(self): "-monitor", "none", "-netdev", f"user,id=net.0,hostfwd=tcp::{self._ssh_port}-:22", "-device", "rtl8139,netdev=net.0", + "-qmp", f"unix:{self._qmp_socket},server,nowait", ] - if self._snapshot: + if use_ovmf: + qemu_cmdline.extend(["-bios", find_ovmf()]) + if self._cdrom: + qemu_cmdline.extend(["-cdrom", self._cdrom]) + if snapshot: qemu_cmdline.append("-snapshot") qemu_cmdline.append(self._img) self._log(f"vm starting, log available at {log_path}") # XXX: use systemd-run to ensure cleanup? self._qemu_p = subprocess.Popen( - qemu_cmdline, stdout=sys.stdout, stderr=sys.stderr) + qemu_cmdline, + stdout=sys.stdout, + stderr=sys.stderr, + ) # XXX: also check that qemu is working and did not crash - self.wait_ssh_ready() - self._log(f"vm ready at port {self._ssh_port}") + ev = wait_event.split(":") + if ev == ["ssh"]: + self.wait_ssh_ready() + self._log(f"vm ready at port {self._ssh_port}") + elif ev[0] == "qmp": + qmp_event = ev[1] + self.wait_qmp_event(qmp_event) + self._log(f"qmp event {qmp_event}") + else: + raise ValueError(f"unsupported wait_event {wait_event}") + + def _wait_qmp_socket(self, timeout_sec): + for _ in range(timeout_sec): + if os.path.exists(self._qmp_socket): + return True + time.sleep(1) + raise Exception(f"no {self._qmp_socket} after {timeout_sec} seconds") + + def wait_qmp_event(self, qmp_event): + # import lazy to avoid requiring it for all operations + import qmp + self._wait_qmp_socket(30) + mon = qmp.QEMUMonitorProtocol(os.fspath(self._qmp_socket)) + mon.connect() + while True: + event = mon.pull_event(wait=True) + self._log(f"DEBUG: got event {event}") + if event["event"] == qmp_event: + return def force_stop(self): if self._qemu_p: From a33dc62ab691e922a96adafda5b87d7ca0e9f3bc Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 25 Jan 2024 10:31:30 +0100 Subject: [PATCH 043/279] tests: add new OSBUILD_TEST_QEMU_GUI env to make debugging easier When running tests locally it can be useful to have a full qemu gui. E.g. the install test is not putting anything on the serial port AFAICT. So the new environment `OSBUILD_TEST_QEMU_GUI=1` will bring up a graphical qemu during the tests. Use as e.g. ``` $ sudo OSBUILD_TEST_QEMU_GUI=1 pytest -s -vv -rs './test/test_build.py::test_iso_installs[iso]' ``` --- test/bib/vm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/bib/vm.py b/test/bib/vm.py index 3a558690..ad640188 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -120,13 +120,14 @@ def start(self, wait_event="ssh", snapshot=True, use_ovmf=False): "-m", self.MEM, # get "illegal instruction" inside the VM otherwise "-cpu", "host", - "-nographic", "-serial", "stdio", "-monitor", "none", "-netdev", f"user,id=net.0,hostfwd=tcp::{self._ssh_port}-:22", "-device", "rtl8139,netdev=net.0", "-qmp", f"unix:{self._qmp_socket},server,nowait", ] + if not os.environ.get("OSBUILD_TEST_QEMU_GUI"): + qemu_cmdline.append("-nographic") if use_ovmf: qemu_cmdline.extend(["-bios", find_ovmf()]) if self._cdrom: From 0cf3651fa7a80ebd843624171f22d4b8815e31eb Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 25 Jan 2024 12:02:54 +0100 Subject: [PATCH 044/279] test: re-enable quay.io/centos-bootc/fedora-bootc:eln This unbreak macos tests and also fedora-bootc:eln got fixed/reverted upstream. --- test/bib/containerbuild.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 7a7f705c..96b24fc3 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -24,9 +24,5 @@ def container_to_build_ref(): # making "image_type" an "image" tuple (type, container_ref_to_test) return os.getenv( "BIB_TEST_BOOTC_CONTAINER_TAG", - # using this tag instead of ":eln" until - # https://github.com/CentOS/centos-bootc/issues/184 and - # https://github.com/osbuild/bootc-image-builder/issues/149 - # are fixed - "quay.io/centos-bootc/fedora-bootc:ed19452a30c50900be0b78db5f68d9826cc14a2e402f752535716cffd92b4445", + "quay.io/centos-bootc/fedora-bootc:eln", ) From d0d543ea3837acf265faa64444a57ee43dfc8d5f Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 25 Jan 2024 15:11:00 +0100 Subject: [PATCH 045/279] tests: increase podman machine limits on macOS --- .github.com/workflows/bibtests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index f27ded36..df7d1dd7 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -105,6 +105,7 @@ jobs: brew install podman netcat if ! podman machine inspect; then podman machine init --rootful + podman machine set --cpus 4 --memory 4096 fi if [ "$(podman machine inspect --format='{{.State}}')" != "running" ]; then podman machine start From de8e05053315c9d143d1edb75ce5f0d4ea284646 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 2 Feb 2024 18:12:30 +0100 Subject: [PATCH 046/279] github: move to newer golangci-lint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have a bunch of false positive test failures like: ``` Check failure on line 4 in bib/cmd/bootc-image-builder/partition_tables.go GitHub Actions / ⌨ Lint "github.com/osbuild/images/pkg/arch" imported and not used (typecheck) ``` This commit tries to fix it by moving to a newer version of golangci-lint. --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index df7d1dd7..76fa1640 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -37,7 +37,7 @@ jobs: - name: Run golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.54.2 + version: v1.55.2 args: --timeout 5m0s working-directory: bib From a9590e1d141eb580d0ed8fefdd86acda005ee50b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 2 Feb 2024 19:13:10 +0100 Subject: [PATCH 047/279] github: add li{btrfs,devmapper}-dev to lint deps --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 76fa1640..187320a4 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -32,7 +32,7 @@ jobs: # This is needed for the container upload dependencies - name: Install libgpgme devel package - run: sudo apt install -y libgpgme-dev + run: sudo apt install -y libgpgme-dev libbtrfs-dev libdevmapper-dev - name: Run golangci-lint uses: golangci/golangci-lint-action@v3 From 9cf0363035f97938ae318ab825cf33976dd335a6 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 22 Jan 2024 16:45:10 +0100 Subject: [PATCH 048/279] test: run tests with both fedora and centos images Test both `quay.io/centos-bootc/{centos,fedora}-bootc:{eln,stream9}`, now that `bib` supports both fedora and centos bootc we need to start testing both as part of the integration suite. Note that not all combinations are tested to save time, it's a tradeoff and trivial enough to tweak. --- test/bib/containerbuild.py | 9 ------- test/bib/test_build.py | 25 +++++++----------- test/bib/test_manifest.py | 10 ++++--- test/bib/testcases.py | 53 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 70 insertions(+), 27 deletions(-) create mode 100644 test/bib/testcases.py diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 96b24fc3..7d605f26 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -17,12 +17,3 @@ def build_container_fixture(): "-t", container_tag, ]) return container_tag - - -def container_to_build_ref(): - # TODO: make this another indirect fixture input, e.g. by making - # making "image_type" an "image" tuple (type, container_ref_to_test) - return os.getenv( - "BIB_TEST_BOOTC_CONTAINER_TAG", - "quay.io/centos-bootc/fedora-bootc:eln", - ) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 1364fe27..96054ce6 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -12,7 +12,8 @@ # local test utils import testutil -from containerbuild import build_container_fixture, container_to_build_ref # noqa: F401 +from containerbuild import build_container_fixture # noqa: F401 +from testcases import gen_testcases from vm import AWS, QEMU if not testutil.has_executable("podman"): @@ -27,12 +28,6 @@ pytest.skip("need x86_64-v3 capable CPU", allow_module_level=True) -# image types to test -DIRECT_BOOT_IMAGE_TYPES = ["qcow2", "ami", "raw"] -INSTALLER_IMAGE_TYPES = ["iso"] -SUPPORTED_IMAGE_TYPES = DIRECT_BOOT_IMAGE_TYPES + INSTALLER_IMAGE_TYPES - - class ImageBuildResult(NamedTuple): img_type: str img_path: str @@ -49,7 +44,7 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa ImageBuildResult with the resulting image path and user/password """ # image_type is passed via special pytest parameter fixture - image_type = request.param + container_ref, image_type = request.param.split(",") username = "test" password = "password" @@ -64,7 +59,7 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa "raw": pathlib.Path(output_path) / "image/disk.raw", "iso": pathlib.Path(output_path) / "bootiso/install.iso", } - assert len(artifact) == len(SUPPORTED_IMAGE_TYPES), \ + assert len(artifact) == len(set(t.split(",")[1] for t in gen_testcases("all"))), \ "please keep artifact mapping and supported images in sync" generated_img = artifact[image_type] @@ -121,7 +116,7 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa "-v", "/store", # share the cache between builds *creds_args, build_container, - container_to_build_ref(), + container_ref, "--config", "/output/config.json", "--type", image_type, *upload_args, @@ -146,14 +141,14 @@ def test_container_builds(build_container): assert build_container in output -@pytest.mark.parametrize("image_type", SUPPORTED_IMAGE_TYPES, indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("direct-boot"), indirect=["image_type"]) def test_image_is_generated(image_type): assert image_type.img_path.exists(), "output file missing, dir "\ f"content: {os.listdir(os.fspath(image_type.img_path))}" @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") -@pytest.mark.parametrize("image_type", DIRECT_BOOT_IMAGE_TYPES, indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("direct-boot"), indirect=["image_type"]) def test_image_boots(image_type): with QEMU(image_type.img_path) as test_vm: exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) @@ -163,7 +158,7 @@ def test_image_boots(image_type): assert "hello" in output -@pytest.mark.parametrize("image_type", ["ami"], indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) def test_ami_boots_in_aws(image_type, force_aws_upload): if not testutil.write_aws_creds("/dev/null"): # we don't care about the file, just the variables being there if force_aws_upload: @@ -213,7 +208,7 @@ def has_selinux(): @pytest.mark.skipif(not has_selinux(), reason="selinux not enabled") -@pytest.mark.parametrize("image_type", SUPPORTED_IMAGE_TYPES, indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("direct-boot"), indirect=["image_type"]) def test_image_build_without_se_linux_denials(image_type): # the journal always contains logs from the image building assert image_type.journal_output != "" @@ -222,7 +217,7 @@ def test_image_build_without_se_linux_denials(image_type): @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") -@pytest.mark.parametrize("image_type", INSTALLER_IMAGE_TYPES, indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("iso"), indirect=["image_type"]) def test_iso_installs(image_type): installer_iso_path = image_type.img_path test_disk_path = installer_iso_path.with_name("test-disk.img") diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 3f9640ed..d5d22443 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -9,13 +9,17 @@ if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) -from containerbuild import build_container_fixture, container_to_build_ref # noqa: F401 +from containerbuild import build_container_fixture # noqa: F401 +from testcases import gen_testcases -def test_manifest_smoke(build_container): +@pytest.mark.parametrize("image_type", gen_testcases("manifest")) +def test_manifest_smoke(build_container, image_type): + container_ref = image_type.split(",")[0] + output = subprocess.check_output([ "podman", "run", "--rm", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_to_build_ref()}"]', + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', build_container, ]) manifest = json.loads(output) diff --git a/test/bib/testcases.py b/test/bib/testcases.py new file mode 100644 index 00000000..9c4f5ef6 --- /dev/null +++ b/test/bib/testcases.py @@ -0,0 +1,53 @@ +import os + + +def gen_testcases(what): + # supported images that can be directly booted + DIRECT_BOOT_IMAGE_TYPES = ("qcow2", "ami", "raw") + # supported images that require an install + INSTALLER_IMAGE_TYPES = ("iso",) + + # bootc containers that are tested by default + CONTAINERS_TO_TEST = { + "fedora": "quay.io/centos-bootc/fedora-bootc:eln", + "centos": "quay.io/centos-bootc/centos-bootc:stream9", + } + # allow commandline override, this is used when testing + # custom images + if os.getenv("BIB_TEST_BOOTC_CONTAINER_TAG"): + # TODO: make this more elegant + CONTAINERS_TO_TEST = { + "centos": os.getenv("BIB_TEST_BOOTC_CONTAINER_TAG"), + "fedora": [], + } + + if what == "manifest": + return CONTAINERS_TO_TEST.values() + elif what == "ami-boot": + return [cnt + ",ami" for cnt in CONTAINERS_TO_TEST.values()] + elif what == "iso": + test_cases = [] + # only fedora right now, centos iso installer is broken right now: + # https://github.com/osbuild/bootc-image-builder/issues/157 + cnt = CONTAINERS_TO_TEST["fedora"] + for img_type in INSTALLER_IMAGE_TYPES: + test_cases.append(f"{cnt},{img_type}") + return test_cases + elif what == "direct-boot": + # skip some raw/ami tests (they are identical right now) to + # avoid overlong test runs but revisit this later and maybe just + # do more in parallel? + test_cases = [ + CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[0], + CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[1], + CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[2], + CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[0], + ] + return test_cases + elif what == "all": + test_cases = [] + for cnt in CONTAINERS_TO_TEST.values(): + for img_type in DIRECT_BOOT_IMAGE_TYPES + INSTALLER_IMAGE_TYPES: + test_cases.append(f"{cnt},{img_type}") + return test_cases + raise ValueError(f"unknown test-case type {what}") From d2dce4ccd97cc6e47c568913bed533ae743a239e Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 6 Feb 2024 14:50:58 +0100 Subject: [PATCH 049/279] workflow: add diskspace diagnosis --- .github.com/workflows/bibtests.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 187320a4..1cbd63bd 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -73,6 +73,10 @@ jobs: run: | sudo apt update sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 + - name: Diskspace (before) + run: | + df -h + sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh - name: Run tests env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} @@ -84,6 +88,11 @@ jobs: # XDG_RUNTIME_DIR is set. # TODO: figure out what exactly podman needs sudo -E XDG_RUNTIME_DIR= pytest-3 -s -vv + - name: Diskspace (after) + if: ${{ failure() }} + run: | + df -h + sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh integration-macos: name: "Integration macos" From 9c97bcab3d0a76b345f414d1bb52554c70e08e92 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 6 Feb 2024 22:03:01 +0100 Subject: [PATCH 050/279] test: cleanup after each image_type_fixture() run We are running out of disk space on the GH runners so we need to be more aggressive about cleanups. --- test/bib/test_build.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 96054ce6..6afd7953 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -63,11 +63,6 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa "please keep artifact mapping and supported images in sync" generated_img = artifact[image_type] - # if the fixture already ran and generated an image, use that - if generated_img.exists(): - journal_output = journal_log_path.read_text(encoding="utf8") - return ImageBuildResult(image_type, generated_img, username, password, journal_output) - # no image yet, build it CFG = { "blueprint": { @@ -132,7 +127,10 @@ def del_ami(): journal_log_path.write_text(journal_output, encoding="utf8") - return ImageBuildResult(image_type, generated_img, username, password, journal_output, metadata) + yield ImageBuildResult(image_type, generated_img, username, password, journal_output, metadata) + generated_img.unlink() + subprocess.run(["podman", "rmi", container_ref], check=False) + return def test_container_builds(build_container): From b7f3f7a362e58414b2d081a6aafc5bb6441ec372 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 7 Feb 2024 11:46:42 +0100 Subject: [PATCH 051/279] test: share images between tests if possible --- test/bib/test_build.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 6afd7953..76cd538e 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -3,6 +3,7 @@ import pathlib import platform import re +import shutil import subprocess import tempfile import uuid @@ -37,8 +38,14 @@ class ImageBuildResult(NamedTuple): metadata: dict = {} +@pytest.fixture(scope='session') +def shared_tmpdir(tmpdir_factory): + tmp_path = pathlib.Path(tmpdir_factory.mktemp("shared")) + yield tmp_path + + @pytest.fixture(name="image_type", scope="session") -def image_type_fixture(tmpdir_factory, build_container, request, force_aws_upload): +def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload): """ Build an image inside the passed build_container and return an ImageBuildResult with the resulting image path and user/password @@ -49,7 +56,8 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa username = "test" password = "password" - output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + for_image_type = request.param.translate(str.maketrans("/:,", "---")) + output_path = shared_tmpdir / f"cache-{for_image_type}" output_path.mkdir(exist_ok=True) journal_log_path = output_path / "journal.log" @@ -63,6 +71,12 @@ def image_type_fixture(tmpdir_factory, build_container, request, force_aws_uploa "please keep artifact mapping and supported images in sync" generated_img = artifact[image_type] + if generated_img.exists(): + print(f"NOTE: reusing cached image {generated_img}") + journal_output = journal_log_path.read_text(encoding="utf8") + yield ImageBuildResult(image_type, generated_img, username, password, journal_output) + return + # no image yet, build it CFG = { "blueprint": { @@ -128,7 +142,12 @@ def del_ami(): journal_log_path.write_text(journal_output, encoding="utf8") yield ImageBuildResult(image_type, generated_img, username, password, journal_output, metadata) - generated_img.unlink() + # Try to cache as much as possible + disk_usage = shutil.disk_usage(generated_img) + print(f"NOTE: disk usage after {generated_img}: {disk_usage.free / 1_000_000} / {disk_usage.total / 1_000_000}") + if disk_usage.free < 1_000_000_000: + print(f"WARNING: running low on disk space, removing {generated_img}") + generated_img.unlink() subprocess.run(["podman", "rmi", container_ref], check=False) return From a1ce60eaf2fa4cb035db416c4a80af4069e0831f Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 29 Jan 2024 17:48:26 +0100 Subject: [PATCH 052/279] test: add cross-arch build/boot test --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/test_build.py | 26 ++++++++++---- test/bib/testcases.py | 9 +++++ test/bib/vm.py | 55 +++++++++++++++++++---------- 4 files changed, 67 insertions(+), 25 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 1cbd63bd..cbb933e2 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -72,7 +72,7 @@ jobs: - name: Install test dependencies run: | sudo apt update - sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 + sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static - name: Diskspace (before) run: | df -h diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 76cd538e..489f13f3 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -32,6 +32,7 @@ class ImageBuildResult(NamedTuple): img_type: str img_path: str + img_arch: str username: str password: str journal_output: str @@ -51,13 +52,22 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload ImageBuildResult with the resulting image path and user/password """ # image_type is passed via special pytest parameter fixture - container_ref, image_type = request.param.split(",") + if request.param.count(",") == 2: + container_ref, image_type, target_arch = request.param.split(",") + elif request.param.count(",") == 1: + container_ref, image_type = request.param.split(",") + target_arch = None + else: + raise ValueError(f"cannot parse {request.param.count}") username = "test" password = "password" - for_image_type = request.param.translate(str.maketrans("/:,", "---")) - output_path = shared_tmpdir / f"cache-{for_image_type}" + # image_type can be long and the qmp socket (that has a limit of 100ish + # AF_UNIX) is derrived from the path + # so hash the image_type instead of just using it + for_image_type = request.param + output_path = shared_tmpdir / format(abs(hash(for_image_type)), "x") output_path.mkdir(exist_ok=True) journal_log_path = output_path / "journal.log" @@ -74,7 +84,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload if generated_img.exists(): print(f"NOTE: reusing cached image {generated_img}") journal_output = journal_log_path.read_text(encoding="utf8") - yield ImageBuildResult(image_type, generated_img, username, password, journal_output) + yield ImageBuildResult(image_type, generated_img, target_arch, username, password, journal_output) return # no image yet, build it @@ -99,6 +109,9 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload upload_args = [] creds_args = [] + target_arch_args = [] + if target_arch: + target_arch_args = ["--target-arch", target_arch] with tempfile.TemporaryDirectory() as tempdir: if image_type == "ami": @@ -129,6 +142,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload "--config", "/output/config.json", "--type", image_type, *upload_args, + *target_arch_args, ]) journal_output = testutil.journal_after_cursor(cursor) metadata = {} @@ -141,7 +155,7 @@ def del_ami(): journal_log_path.write_text(journal_output, encoding="utf8") - yield ImageBuildResult(image_type, generated_img, username, password, journal_output, metadata) + yield ImageBuildResult(image_type, generated_img, target_arch, username, password, journal_output, metadata) # Try to cache as much as possible disk_usage = shutil.disk_usage(generated_img) print(f"NOTE: disk usage after {generated_img}: {disk_usage.free / 1_000_000} / {disk_usage.total / 1_000_000}") @@ -167,7 +181,7 @@ def test_image_is_generated(image_type): @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") @pytest.mark.parametrize("image_type", gen_testcases("direct-boot"), indirect=["image_type"]) def test_image_boots(image_type): - with QEMU(image_type.img_path) as test_vm: + with QEMU(image_type.img_path, arch=image_type.img_arch) as test_vm: exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) assert exit_status == 0 exit_status, output = test_vm.run("echo hello", user=image_type.username, password=image_type.password) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 9c4f5ef6..ed68deab 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -1,3 +1,4 @@ +import platform import os @@ -43,6 +44,14 @@ def gen_testcases(what): CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[2], CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[0], ] + # do a cross arch test too + if platform.machine() == "x86_64": + # todo: add fedora:eln + test_cases.append( + f'{CONTAINERS_TO_TEST["centos"]},raw,arm64') + elif platform.machine() == "arm64": + # TODO: add arm64->x86_64 cross build test too + pass return test_cases elif what == "all": test_cases = [] diff --git a/test/bib/vm.py b/test/bib/vm.py index ad640188..43c37636 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -1,6 +1,7 @@ import abc import os import pathlib +import platform import subprocess import sys import time @@ -93,10 +94,8 @@ def find_ovmf(): class QEMU(VM): MEM = "2000" - # TODO: support qemu-system-aarch64 too :) - QEMU = "qemu-system-x86_64" - def __init__(self, img, snapshot=True, cdrom=None): + def __init__(self, img, arch="", snapshot=True, cdrom=None): super().__init__() self._img = pathlib.Path(img) self._qmp_socket = self._img.with_suffix(".qemp-socket") @@ -104,22 +103,37 @@ def __init__(self, img, snapshot=True, cdrom=None): self._snapshot = snapshot self._cdrom = cdrom self._ssh_port = None + if not arch: + arch = platform.machine() + self._arch = arch def __del__(self): self.force_stop() - # XXX: move args to init() so that __enter__ can use them? - def start(self, wait_event="ssh", snapshot=True, use_ovmf=False): - if self.running(): - return - log_path = self._img.with_suffix(".serial-log") - self._ssh_port = get_free_port() - self._address = "localhost" - qemu_cmdline = [ - self.QEMU, "-enable-kvm", + def _gen_qemu_cmdline(self, snapshot, use_ovmf): + if self._arch in ("arm64", "aarch64"): + qemu_cmdline = [ + "qemu-system-aarch64", + "-machine", "virt", + "-cpu", "cortex-a57", + "-smp", "2", + "-bios", "/usr/share/AAVMF/AAVMF_CODE.fd", + ] + elif self._arch in ("amd64", "x86_64"): + qemu_cmdline = [ + "qemu-system-x86_64", + "-M", "accel=kvm", + # get "illegal instruction" inside the VM otherwise + "-cpu", "host", + ] + if use_ovmf: + qemu_cmdline.extend(["-bios", find_ovmf()]) + else: + raise ValueError(f"unsupported architecture {self._arch}") + + # common part + qemu_cmdline += [ "-m", self.MEM, - # get "illegal instruction" inside the VM otherwise - "-cpu", "host", "-serial", "stdio", "-monitor", "none", "-netdev", f"user,id=net.0,hostfwd=tcp::{self._ssh_port}-:22", @@ -128,18 +142,23 @@ def start(self, wait_event="ssh", snapshot=True, use_ovmf=False): ] if not os.environ.get("OSBUILD_TEST_QEMU_GUI"): qemu_cmdline.append("-nographic") - if use_ovmf: - qemu_cmdline.extend(["-bios", find_ovmf()]) if self._cdrom: qemu_cmdline.extend(["-cdrom", self._cdrom]) if snapshot: qemu_cmdline.append("-snapshot") qemu_cmdline.append(self._img) - self._log(f"vm starting, log available at {log_path}") + return qemu_cmdline + + # XXX: move args to init() so that __enter__ can use them? + def start(self, wait_event="ssh", snapshot=True, use_ovmf=False): + if self.running(): + return + self._ssh_port = get_free_port() + self._address = "localhost" # XXX: use systemd-run to ensure cleanup? self._qemu_p = subprocess.Popen( - qemu_cmdline, + self._gen_qemu_cmdline(snapshot, use_ovmf), stdout=sys.stdout, stderr=sys.stderr, ) From 9062c258c776a414fda195b8e1fe69a2494aed3b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 8 Feb 2024 12:29:36 +0100 Subject: [PATCH 053/279] github: setup custom basetemp in GH runners --- .github.com/workflows/bibtests.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index cbb933e2..e10750c6 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -87,9 +87,12 @@ jobs: # podman needs (parts of) the environment but will break when # XDG_RUNTIME_DIR is set. # TODO: figure out what exactly podman needs - sudo -E XDG_RUNTIME_DIR= pytest-3 -s -vv + # use custom basetemp here because /var/tmp is on a smaller disk + # than /mnt + sudo mkdir -p /mnt/var/tmp/bib-tests + sudo -E XDG_RUNTIME_DIR= pytest-3 -s -vv --basetemp=/mnt/var/tmp/bib-tests - name: Diskspace (after) - if: ${{ failure() }} + if: ${{ always() }} run: | df -h sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh From 8403ff2b1a56e0f1955494f63a0c383fbf3ee552 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 04:22:06 +0000 Subject: [PATCH 054/279] build(deps): bump golangci/golangci-lint-action from 3 to 4 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3 to 4. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3...v4) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index e10750c6..f299ffca 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -35,7 +35,7 @@ jobs: run: sudo apt install -y libgpgme-dev libbtrfs-dev libdevmapper-dev - name: Run golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: v1.55.2 args: --timeout 5m0s From 9fdc88459108b92c018aa55017141919559bd686 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 31 Jan 2024 08:40:57 -0500 Subject: [PATCH 055/279] Rename `iso` -> `anaconda-iso` I would like to potentially introduce a generic "live iso" that generates *exactly the input container* just as a Live ISO, very similar to how Fedora CoreOS (and one of its parents, the original Container Linux) do it. This has several use cases, such as always running from a PXE boot. I also think this is the long term architecture for an ISO, as opposed to the current Anaconda. This "direct" Live ISO is also the same as how e.g. Fedora Workstation does it, where the installer is just an app in the system. For compatibility we continue to honor the `iso` as an alias. --- test/bib/test_build.py | 4 ++-- test/bib/testcases.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 489f13f3..a3fd6d17 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -75,7 +75,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", "ami": pathlib.Path(output_path) / "image/disk.raw", "raw": pathlib.Path(output_path) / "image/disk.raw", - "iso": pathlib.Path(output_path) / "bootiso/install.iso", + "anaconda-iso": pathlib.Path(output_path) / "bootiso/install.iso", } assert len(artifact) == len(set(t.split(",")[1] for t in gen_testcases("all"))), \ "please keep artifact mapping and supported images in sync" @@ -248,7 +248,7 @@ def test_image_build_without_se_linux_denials(image_type): @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") -@pytest.mark.parametrize("image_type", gen_testcases("iso"), indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) def test_iso_installs(image_type): installer_iso_path = image_type.img_path test_disk_path = installer_iso_path.with_name("test-disk.img") diff --git a/test/bib/testcases.py b/test/bib/testcases.py index ed68deab..09e754af 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -6,7 +6,7 @@ def gen_testcases(what): # supported images that can be directly booted DIRECT_BOOT_IMAGE_TYPES = ("qcow2", "ami", "raw") # supported images that require an install - INSTALLER_IMAGE_TYPES = ("iso",) + INSTALLER_IMAGE_TYPES = ("anaconda-iso",) # bootc containers that are tested by default CONTAINERS_TO_TEST = { @@ -26,7 +26,7 @@ def gen_testcases(what): return CONTAINERS_TO_TEST.values() elif what == "ami-boot": return [cnt + ",ami" for cnt in CONTAINERS_TO_TEST.values()] - elif what == "iso": + elif what == "anaconda-iso": test_cases = [] # only fedora right now, centos iso installer is broken right now: # https://github.com/osbuild/bootc-image-builder/issues/157 From 6b0117cb0dcc395412e50f8e9dccc2305d4751e5 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 5 Mar 2024 10:19:49 +0100 Subject: [PATCH 056/279] test: disable anaconda-iso test because of issue#233 This commit disabled the anaconda-install test that fails currently because of https://github.com/osbuild/bootc-image-builder/issues/233 --- test/bib/test_build.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index a3fd6d17..8c061f25 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -247,6 +247,7 @@ def test_image_build_without_se_linux_denials(image_type): f"denials in log {image_type.journal_output}" +@pytest.mark.skip(reason="see https://github.com/osbuild/bootc-image-builder/issues/233") @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") @pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) def test_iso_installs(image_type): From 7d85db9a6c1093a40b0217aa0d161cacce160d28 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 17 Jan 2024 18:02:57 +0100 Subject: [PATCH 057/279] bib: show progress on ami upload Add a new `--progress` option that defaults to `text` and show upload progress when uploading an AMI. --- test/bib/test_build.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 8c061f25..26b85342 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -35,6 +35,7 @@ class ImageBuildResult(NamedTuple): img_arch: str username: str password: str + bib_output: str journal_output: str metadata: dict = {} @@ -71,6 +72,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload output_path.mkdir(exist_ok=True) journal_log_path = output_path / "journal.log" + bib_output_path = output_path / "bib-output.log" artifact = { "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", "ami": pathlib.Path(output_path) / "image/disk.raw", @@ -84,7 +86,8 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload if generated_img.exists(): print(f"NOTE: reusing cached image {generated_img}") journal_output = journal_log_path.read_text(encoding="utf8") - yield ImageBuildResult(image_type, generated_img, target_arch, username, password, journal_output) + bib_output = bib_output_path.read_text(encoding="utf8") + yield ImageBuildResult(image_type, generated_img, target_arch, username, password, bib_output, journal_output) return # no image yet, build it @@ -130,7 +133,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload raise RuntimeError("AWS credentials not available (upload forced)") # run container to deploy an image into a bootable disk and upload to a cloud service if applicable - subprocess.check_call([ + p = subprocess.Popen([ "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", @@ -143,7 +146,18 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload "--type", image_type, *upload_args, *target_arch_args, - ]) + ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + # not using subprocss.check_output() to ensure we get live output + # during the text + bib_output = "" + while True: + line = p.stdout.readline() + if not line: + break + print(line, end="") + bib_output += line + p.wait(timeout=10) + journal_output = testutil.journal_after_cursor(cursor) metadata = {} if image_type == "ami" and upload_args: @@ -154,8 +168,11 @@ def del_ami(): request.addfinalizer(del_ami) journal_log_path.write_text(journal_output, encoding="utf8") + bib_output_path.write_text(bib_output, encoding="utf8") - yield ImageBuildResult(image_type, generated_img, target_arch, username, password, journal_output, metadata) + yield ImageBuildResult( + image_type, generated_img, target_arch, username, password, + bib_output, journal_output, metadata) # Try to cache as much as possible disk_usage = shutil.disk_usage(generated_img) print(f"NOTE: disk usage after {generated_img}: {disk_usage.free / 1_000_000} / {disk_usage.total / 1_000_000}") @@ -197,6 +214,13 @@ def test_ami_boots_in_aws(image_type, force_aws_upload): raise RuntimeError("AWS credentials not available") pytest.skip("AWS credentials not available (upload not forced)") + # check that upload progress is in the output log. Uploads looks like: + # + # Uploading /output/image/disk.raw to bootc-image-builder-ci:aac64b64-6e57-47df-9730-54763061d84b-disk.raw + # 0 B / 10.00 GiB 0.00% + # In the tests with no pty no progress bar is shown in the output just + # xx / yy zz% + assert " 100.00%\n" in image_type.bib_output with AWS(image_type.metadata["ami_id"]) as test_vm: exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) assert exit_status == 0 From 2990ea4b6fcce8d9837960a1dd5677798cc0ab60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Thu, 7 Mar 2024 11:58:46 +0100 Subject: [PATCH 058/279] github: split test setup and the actual run When I open the log for test run in GitHub actions, I want to see just the test run, not pip install deps. Let's split these two things to make the log more readable. --- .github.com/workflows/bibtests.yaml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index f299ffca..290c0f1a 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -77,10 +77,7 @@ jobs: run: | df -h sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh - - name: Run tests - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + - name: Install test deps run: | # make sure test deps are available for root sudo -E pip install --user -r test/requirements.txt @@ -90,6 +87,11 @@ jobs: # use custom basetemp here because /var/tmp is on a smaller disk # than /mnt sudo mkdir -p /mnt/var/tmp/bib-tests + - name: Run tests + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | sudo -E XDG_RUNTIME_DIR= pytest-3 -s -vv --basetemp=/mnt/var/tmp/bib-tests - name: Diskspace (after) if: ${{ always() }} From 47abbb6c478f7e7e13d00ba5b3956a6065ff5109 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Thu, 7 Mar 2024 15:51:37 +0100 Subject: [PATCH 059/279] test: use multi-image builds Add a new fixtures, images, that reuses the code from the image_type fixture. Both the images and image_type fixture now call a new function, build_images, that handles either building new images or returning cached builds. When one or more disk images are requested, the build_images() function uses the new multi-build feature to build all disk image types and returns the artifacts that were requested. A new test, test_multi_build_request(), ensures that the returned image artifacts exist and match the expected file names. ISO tests are unaffected by these changes since they can only built separately. --- test/bib/test_build.py | 130 +++++++++++++++++++++++++++++--------- test/bib/test_manifest.py | 7 +- test/bib/testcases.py | 20 ++++-- 3 files changed, 117 insertions(+), 40 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 26b85342..46765087 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -7,6 +7,7 @@ import subprocess import tempfile import uuid +from contextlib import contextmanager from typing import NamedTuple import pytest @@ -14,7 +15,8 @@ # local test utils import testutil from containerbuild import build_container_fixture # noqa: F401 -from testcases import gen_testcases +from testcases import (DIRECT_BOOT_IMAGE_TYPES, INSTALLER_IMAGE_TYPES, + gen_testcases) from vm import AWS, QEMU if not testutil.has_executable("podman"): @@ -52,23 +54,47 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload Build an image inside the passed build_container and return an ImageBuildResult with the resulting image path and user/password """ + with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: + yield build_results[0] + + +@pytest.fixture(name="images", scope="session") +def images_fixture(shared_tmpdir, build_container, request, force_aws_upload): + """ + Build one or more images inside the passed build_container and return an + ImageBuildResult array with the resulting image path and user/password + """ + with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: + yield build_results + + +@contextmanager +def build_images(shared_tmpdir, build_container, request, force_aws_upload): + """ + Build all available image types if necessary and return the results for the image types that were requested. + Will return cached results of previous build requests. + """ # image_type is passed via special pytest parameter fixture if request.param.count(",") == 2: - container_ref, image_type, target_arch = request.param.split(",") + container_ref, images, target_arch = request.param.split(",") elif request.param.count(",") == 1: - container_ref, image_type = request.param.split(",") + container_ref, images = request.param.split(",") target_arch = None else: raise ValueError(f"cannot parse {request.param.count}") + # images might be multiple --type args + # split and check each one + image_types = images.split("+") + username = "test" password = "password" - # image_type can be long and the qmp socket (that has a limit of 100ish - # AF_UNIX) is derrived from the path - # so hash the image_type instead of just using it - for_image_type = request.param - output_path = shared_tmpdir / format(abs(hash(for_image_type)), "x") + # params can be long and the qmp socket (that has a limit of 100ish + # AF_UNIX) is derived from the path + # hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to + # different image type combinations + output_path = shared_tmpdir / format(abs(hash(container_ref+str(target_arch))), "x") output_path.mkdir(exist_ok=True) journal_log_path = output_path / "journal.log" @@ -81,17 +107,31 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload } assert len(artifact) == len(set(t.split(",")[1] for t in gen_testcases("all"))), \ "please keep artifact mapping and supported images in sync" - generated_img = artifact[image_type] - if generated_img.exists(): - print(f"NOTE: reusing cached image {generated_img}") - journal_output = journal_log_path.read_text(encoding="utf8") - bib_output = bib_output_path.read_text(encoding="utf8") - yield ImageBuildResult(image_type, generated_img, target_arch, username, password, bib_output, journal_output) + results = [] + for image_type in image_types: + generated_img = artifact[image_type] + print(f"Checking for cached image {image_type} -> {generated_img}") + if generated_img.exists(): + print(f"NOTE: reusing cached image {generated_img}") + journal_output = journal_log_path.read_text(encoding="utf8") + bib_output = bib_output_path.read_text(encoding="utf8") + results.append(ImageBuildResult(image_type, generated_img, target_arch, username, password, + bib_output, journal_output)) + + # Because we always build all image types, regardless of what was requested, we should either have 0 results or all + # should be available, so if we found at least one result but not all of them, this is a problem with our setup + assert not results or len(results) == len(image_types), \ + f"unexpected number of results found: requested {len(image_types)} but got {len(results)}" + + if results: + yield results return - # no image yet, build it - CFG = { + print(f"Requested {len(image_types)} images but found {len(results)} cached images. Building...") + + # not all requested image types are available - build them + cfg = { "blueprint": { "customizations": { "user": [ @@ -106,7 +146,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload } config_json_path = output_path / "config.json" - config_json_path.write_text(json.dumps(CFG), encoding="utf-8") + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") cursor = testutil.journal_cursor() @@ -117,7 +157,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload target_arch_args = ["--target-arch", target_arch] with tempfile.TemporaryDirectory() as tempdir: - if image_type == "ami": + if "ami" in image_types: creds_file = pathlib.Path(tempdir) / "aws.creds" if testutil.write_aws_creds(creds_file): creds_args = ["-v", f"{creds_file}:/root/.aws/credentials:ro", @@ -132,8 +172,14 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload # upload forced but credentials aren't set raise RuntimeError("AWS credentials not available (upload forced)") + # we're either building an iso or all the disk image types + if image_types[0] in INSTALLER_IMAGE_TYPES: + types_arg = [f"--type={image_types[0]}"] + else: + types_arg = [f"--type={it}" for it in DIRECT_BOOT_IMAGE_TYPES] + # run container to deploy an image into a bootable disk and upload to a cloud service if applicable - p = subprocess.Popen([ + cmd = [ "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", @@ -143,10 +189,13 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload build_container, container_ref, "--config", "/output/config.json", - "--type", image_type, + *types_arg, *upload_args, *target_arch_args, - ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + ] + # print the build command for easier tracing + print(" ".join(cmd)) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) # not using subprocss.check_output() to ensure we get live output # during the text bib_output = "" @@ -160,7 +209,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload journal_output = testutil.journal_after_cursor(cursor) metadata = {} - if image_type == "ami" and upload_args: + if "ami" in image_types and upload_args: metadata["ami_id"] = parse_ami_id_from_log(journal_output) def del_ami(): @@ -170,15 +219,25 @@ def del_ami(): journal_log_path.write_text(journal_output, encoding="utf8") bib_output_path.write_text(bib_output, encoding="utf8") - yield ImageBuildResult( - image_type, generated_img, target_arch, username, password, - bib_output, journal_output, metadata) + results = [] + for image_type in image_types: + results.append(ImageBuildResult(image_type, artifact[image_type], target_arch, + username, password, bib_output, journal_output, metadata)) + yield results + # Try to cache as much as possible - disk_usage = shutil.disk_usage(generated_img) - print(f"NOTE: disk usage after {generated_img}: {disk_usage.free / 1_000_000} / {disk_usage.total / 1_000_000}") - if disk_usage.free < 1_000_000_000: - print(f"WARNING: running low on disk space, removing {generated_img}") - generated_img.unlink() + for image_type in image_types: + img = artifact[image_type] + print(f"Checking disk usage for {img}") + if os.path.exists(img): + # might already be removed if we're deleting 'raw' and 'ami' + disk_usage = shutil.disk_usage(img) + print(f"NOTE: disk usage after {img}: {disk_usage.free / 1_000_000} / {disk_usage.total / 1_000_000}") + if disk_usage.free < 1_000_000_000: + print(f"WARNING: running low on disk space, removing {img}") + img.unlink() + else: + print("does not exist") subprocess.run(["podman", "rmi", container_ref], check=False) return @@ -288,3 +347,14 @@ def test_iso_installs(image_type): vm.start(use_ovmf=True) exit_status, _ = vm.run("true", user=image_type.username, password=image_type.password) assert exit_status == 0 + + +@pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) +def test_multi_build_request(images): + artifacts = set() + expected = {"disk.qcow2", "disk.raw"} + for result in images: + filename = os.path.basename(result.img_path) + assert result.img_path.exists() + artifacts.add(filename) + assert artifacts == expected diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index d5d22443..7959f869 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -5,7 +5,6 @@ import testutil - if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) @@ -13,9 +12,9 @@ from testcases import gen_testcases -@pytest.mark.parametrize("image_type", gen_testcases("manifest")) -def test_manifest_smoke(build_container, image_type): - container_ref = image_type.split(",")[0] +@pytest.mark.parametrize("images", gen_testcases("manifest")) +def test_manifest_smoke(build_container, images): + container_ref = images.split(",")[0] output = subprocess.check_output([ "podman", "run", "--rm", diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 09e754af..8f5c664b 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -1,13 +1,14 @@ -import platform import os +import platform +# supported images that can be directly booted +DIRECT_BOOT_IMAGE_TYPES = ("qcow2", "ami", "raw") -def gen_testcases(what): - # supported images that can be directly booted - DIRECT_BOOT_IMAGE_TYPES = ("qcow2", "ami", "raw") - # supported images that require an install - INSTALLER_IMAGE_TYPES = ("anaconda-iso",) +# supported images that require an install +INSTALLER_IMAGE_TYPES = ("anaconda-iso",) + +def gen_testcases(what): # bootc containers that are tested by default CONTAINERS_TO_TEST = { "fedora": "quay.io/centos-bootc/fedora-bootc:eln", @@ -59,4 +60,11 @@ def gen_testcases(what): for img_type in DIRECT_BOOT_IMAGE_TYPES + INSTALLER_IMAGE_TYPES: test_cases.append(f"{cnt},{img_type}") return test_cases + elif what == "multidisk": + # single test that specifies all image types + test_cases = [] + for cnt in CONTAINERS_TO_TEST.values(): + img_type = "+".join(DIRECT_BOOT_IMAGE_TYPES) + test_cases.append(f"{cnt},{img_type}") + return test_cases raise ValueError(f"unknown test-case type {what}") From d2935883384aab17e2cb4b44f34fff57ac1d1417 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 8 Mar 2024 12:30:59 +0100 Subject: [PATCH 060/279] test: clarify test_manifest_smoke parameters --- test/bib/test_manifest.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 7959f869..8746809a 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -12,9 +12,10 @@ from testcases import gen_testcases -@pytest.mark.parametrize("images", gen_testcases("manifest")) -def test_manifest_smoke(build_container, images): - container_ref = images.split(",")[0] +@pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) +def test_manifest_smoke(build_container, testcase_ref): + # testcases_ref has the form "container_url,img_type1+img_type2,arch" + container_ref = testcase_ref.split(",")[0] output = subprocess.check_output([ "podman", "run", "--rm", From f668e9fe120ef89e2350c58f73cfcd59e045e79d Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 8 Mar 2024 12:48:37 +0100 Subject: [PATCH 061/279] test: tweak docstrings/variable names --- test/bib/test_build.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 46765087..cd3dd612 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -71,17 +71,22 @@ def images_fixture(shared_tmpdir, build_container, request, force_aws_upload): @contextmanager def build_images(shared_tmpdir, build_container, request, force_aws_upload): """ - Build all available image types if necessary and return the results for the image types that were requested. + Build all available image types if necessary and return the results for + the image types that were requested via :request:. + Will return cached results of previous build requests. + + :request.parm: has the form "container_url,img_type1+img_type2,arch" """ # image_type is passed via special pytest parameter fixture - if request.param.count(",") == 2: - container_ref, images, target_arch = request.param.split(",") - elif request.param.count(",") == 1: - container_ref, images = request.param.split(",") + testcase_ref = request.param + if testcase_ref.count(",") == 2: + container_ref, images, target_arch = testcase_ref.split(",") + elif testcase_ref.count(",") == 1: + container_ref, images = testcase_ref.split(",") target_arch = None else: - raise ValueError(f"cannot parse {request.param.count}") + raise ValueError(f"cannot parse {testcase_ref.count}") # images might be multiple --type args # split and check each one @@ -94,7 +99,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): # AF_UNIX) is derived from the path # hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to # different image type combinations - output_path = shared_tmpdir / format(abs(hash(container_ref+str(target_arch))), "x") + output_path = shared_tmpdir / format(abs(hash(container_ref + str(target_arch))), "x") output_path.mkdir(exist_ok=True) journal_log_path = output_path / "journal.log" @@ -116,13 +121,14 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): print(f"NOTE: reusing cached image {generated_img}") journal_output = journal_log_path.read_text(encoding="utf8") bib_output = bib_output_path.read_text(encoding="utf8") - results.append(ImageBuildResult(image_type, generated_img, target_arch, username, password, - bib_output, journal_output)) + results.append(ImageBuildResult( + image_type, generated_img, target_arch, username, password, + bib_output, journal_output)) # Because we always build all image types, regardless of what was requested, we should either have 0 results or all # should be available, so if we found at least one result but not all of them, this is a problem with our setup assert not results or len(results) == len(image_types), \ - f"unexpected number of results found: requested {len(image_types)} but got {len(results)}" + f"unexpected number of results found: requested {image_types} but got {results}" if results: yield results From 35e6cd83a3346982dc3706f66dca5b3a2a18dcfa Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 8 Mar 2024 17:21:01 +0100 Subject: [PATCH 062/279] test: remove AMI test cases from "direct-boot" The AMI tests are done via "ami-boot". Having parts in "direct-boot" is confusing and also leads to bugs, i.e. we generate the ami in the "direct-boot" test but then the ami is not uploaded to AWS but just stored locally. This means that the boottest in AWS will fail and also that the upload progress check will fail. The ami-boot tests are done via tmt so there is no test coverage regression here. --- test/bib/testcases.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 8f5c664b..86869805 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -36,12 +36,13 @@ def gen_testcases(what): test_cases.append(f"{cnt},{img_type}") return test_cases elif what == "direct-boot": - # skip some raw/ami tests (they are identical right now) to - # avoid overlong test runs but revisit this later and maybe just - # do more in parallel? + # Do not do any AMI tests here, just test the raw image (which is + # identical to ami right now). The AMI needs to also get uploaded + # so they really need their own dedicated AMI test via the "ami-boot" + # test-cases test_cases = [ CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[0], - CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[1], + CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[2], CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[2], CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[0], ] From a8ee2e3a938fc13fa8e0b031b5725b583cae43ff Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Sat, 9 Mar 2024 09:40:32 +0100 Subject: [PATCH 063/279] test: stop caching ami artifacts for now The "ami" image from the cache is reused regardless of if it is a local boot test or a boot test in AWS. This is a problem because when the local image is generated first (without upload credentials ) it will never have been uploaded to AWS but the cache still tries to reuse it. Worse because the filename of raw and ami are identical it will reuse a raw image. For now to unblock us skip AMI caching. This should be fixed in a followup and this commit contains a todo with various options. --- test/bib/test_build.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index cd3dd612..f01ce9fd 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -113,8 +113,20 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): assert len(artifact) == len(set(t.split(",")[1] for t in gen_testcases("all"))), \ "please keep artifact mapping and supported images in sync" + # this helper checks the cache results = [] for image_type in image_types: + # TODO: properly cache amis here. The issue right now is that + # ami and raw are the same image on disk which means that if a test + # like "boots_in_aws" requests an ami it will get the raw file on + # disk. However that is not sufficient because part of the ami test + # is the upload to AWS and the generated metadata. The fix could be + # to make the boot-in-aws a new image type like "ami-aws" where we + # cache the metadata instead of the disk image. Alternatively we + # could stop testing ami locally at all and just skip any ami tests + # if there are no AWS credentials. + if image_type == "ami": + continue generated_img = artifact[image_type] print(f"Checking for cached image {image_type} -> {generated_img}") if generated_img.exists(): From 5ecb6429d2a2ca42fb98aa16e1cf74060f102a4b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Sat, 9 Mar 2024 10:31:30 +0100 Subject: [PATCH 064/279] test: change build_images() to not assert because order is random The order of the test runs is random but the build_images helper makes assumptions that do not match this. I.e. it has an assert that assumes that because all images are build the cache will contain either all images requested or none. However if the sequence of events is that - build single qcow2 in test_image_boots - build multi-output test at this point the cache will have a qcow2 image already and the assertion is violated and the test dies even though that is a okay scenario. This commit remove the assertion. The helper grew a bit too big, we should rework it and also add tests for it (recursion ftw!). Alternatively we could use something like pytest-order to ensure that the if the multi test runs it runs first. --- test/bib/test_build.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index f01ce9fd..6ee56fba 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -137,12 +137,14 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): image_type, generated_img, target_arch, username, password, bib_output, journal_output)) - # Because we always build all image types, regardless of what was requested, we should either have 0 results or all - # should be available, so if we found at least one result but not all of them, this is a problem with our setup - assert not results or len(results) == len(image_types), \ - f"unexpected number of results found: requested {image_types} but got {results}" - - if results: + # return cached results only if we have exactly the amount of images + # requested. the reason is that for multi-images we cannot just return + # partial results. And because tests run in random order it maybe that + # a multi-image test runs after a single image test that already generated + # one of the types the multi images requested. + # TODO: rework the whole helper and extract the cache check and the build + # into their (tested) functions + if len(results) == len(image_types): yield results return From 9163c2f1ea34de80767511a1dc14a009e494c213 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 11 Mar 2024 10:09:24 +0100 Subject: [PATCH 065/279] workflow: cancel in-progress tests on PR updates (HMS-3697) Similar to the osbuild PR#1636 this comit cancels CI runs for PRs that got updated. This changes the behavior in a way that whenever a PR gets updated all still-in-progress runs get canceled and new runs get spawned. --- .github.com/workflows/bibtests.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 290c0f1a..1ece8a61 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -11,6 +11,10 @@ on: # for merge queue merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: lint: name: "⌨ Lint" From 3f620ca58cf7836288f6fc24ab2406871580a59f Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 6 Mar 2024 18:03:13 +0100 Subject: [PATCH 066/279] workflow: make `pytest` less verbose This is a bit of an experiment - we currently run `pytest` with `--capture=no` which means we get realtime updates as the tests run in GH actions. This is nice but it also leads to extremly verbose test output which is hard to read because on failure all test output is visible not just the failed one(s). So this switches to the default mode of pytest to catpure the output of successful tests and only show the output of tests that failed. We lose the ability to view tests in realtime but that is (nowdays) rarely needed (plus it's trivial to run the tests locally). --- .github.com/workflows/bibtests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 1ece8a61..270c6947 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -96,7 +96,7 @@ jobs: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | - sudo -E XDG_RUNTIME_DIR= pytest-3 -s -vv --basetemp=/mnt/var/tmp/bib-tests + sudo -E XDG_RUNTIME_DIR= pytest-3 --basetemp=/mnt/var/tmp/bib-tests - name: Diskspace (after) if: ${{ always() }} run: | @@ -133,4 +133,4 @@ jobs: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | - pytest -rs -s -vv --basetemp="${TMPDIR}/tmp" + pytest -rs --basetemp="${TMPDIR}/tmp" From c80fb3cf03f08bb547013cc110b28aeb704d03c7 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 7 Mar 2024 12:05:47 +0100 Subject: [PATCH 067/279] test: add integration tests for new --chown feature This also adds some infrastructure to fake the actual osbuild run which is potentially useful in the future as well. --- test/bib/containerbuild.py | 33 +++++++++++++++++++++++++++++++++ test/bib/test_opts.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 test/bib/test_opts.py diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 7d605f26..550f3bc0 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -1,5 +1,6 @@ import os import subprocess +import textwrap import pytest @@ -17,3 +18,35 @@ def build_container_fixture(): "-t", container_tag, ]) return container_tag + + +@pytest.fixture(name="build_fake_container", scope="session") +def build_fake_container_fixture(tmpdir_factory, build_container): + """Build a container with a fake osbuild and returns the name""" + tmp_path = tmpdir_factory.mktemp("build-fake-container") + + fake_osbuild_path = tmp_path / "fake-osbuild" + fake_osbuild_path.write_text(textwrap.dedent("""\ + #!/bin/sh -e + + mkdir -p /output/qcow2 + echo "fake-disk.qcow2" > /output/qcow2/disk.qcow2 + + echo "Done" + """), encoding="utf8") + + cntf_path = tmp_path / "Containerfile" + + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {build_container} + COPY fake-osbuild /usr/bin/osbuild + RUN chmod 755 /usr/bin/osbuild + """), encoding="utf8") + + container_tag = "bootc-image-builder-test-faked-osbuild" + subprocess.check_call([ + "podman", "build", + "-t", container_tag, + tmp_path, + ]) + return container_tag diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py new file mode 100644 index 00000000..5e3159a3 --- /dev/null +++ b/test/bib/test_opts.py @@ -0,0 +1,29 @@ +import subprocess + +import pytest + +from containerbuild import build_container_fixture, build_fake_container_fixture # noqa: F401 + + +@pytest.mark.parametrize("chown_opt,expected_uid_gid", [ + ([], (0, 0)), + (["--chown", "1000:1000"], (1000, 1000)), + (["--chown", "1000"], (1000, 0)), +]) +def test_bib_chown_opts(tmp_path, build_fake_container, chown_opt, expected_uid_gid): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + subprocess.check_call([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{output_path}:/output", + build_fake_container, + "quay.io/centos-bootc/centos-bootc:stream9", + ] + chown_opt) + expected_output_disk = output_path / "qcow2/disk.qcow2" + for p in output_path, expected_output_disk: + assert p.exists() + assert p.stat().st_uid == expected_uid_gid[0] + assert p.stat().st_gid == expected_uid_gid[1] From 2ca953edb2718ffcd87a888d17caba94bd7c9e25 Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Mon, 11 Mar 2024 15:08:21 +0000 Subject: [PATCH 068/279] test/test_build: extract request params Create a new method to get the `container_ref`, `images` and `target_arch` that we can re-use. --- test/bib/test_build.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 6ee56fba..8c6cd32b 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -42,6 +42,19 @@ class ImageBuildResult(NamedTuple): metadata: dict = {} +def parse_request_params(request): + # image_type is passed via special pytest parameter fixture + testcase_ref = request.param + if testcase_ref.count(",") == 2: + container_ref, images, target_arch, local = testcase_ref.split(",") + elif testcase_ref.count(",") == 1: + container_ref, images = testcase_ref.split(",") + target_arch = None + else: + raise ValueError(f"cannot parse {testcase_ref.count}") + return container_ref, images, target_arch + + @pytest.fixture(scope='session') def shared_tmpdir(tmpdir_factory): tmp_path = pathlib.Path(tmpdir_factory.mktemp("shared")) @@ -78,15 +91,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): :request.parm: has the form "container_url,img_type1+img_type2,arch" """ - # image_type is passed via special pytest parameter fixture - testcase_ref = request.param - if testcase_ref.count(",") == 2: - container_ref, images, target_arch = testcase_ref.split(",") - elif testcase_ref.count(",") == 1: - container_ref, images = testcase_ref.split(",") - target_arch = None - else: - raise ValueError(f"cannot parse {testcase_ref.count}") + container_ref, images, target_arch = parse_request_params(request) # images might be multiple --type args # split and check each one From 62c190f5c58f58c3271166384f2f401882c591a1 Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Mon, 11 Mar 2024 15:40:01 +0000 Subject: [PATCH 069/279] test/test_build: add local flag Prepare the test's build command to accept a `local` flag which enable ability to build local images in a follow up commit. --- test/bib/test_build.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 8c6cd32b..72879b0f 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -210,6 +210,13 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "--security-opt", "label=type:unconfined_t", "-v", f"{output_path}:/output", "-v", "/store", # share the cache between builds + ] + + # we need to mount the host's container store + if local: + cmd.extend(["-v", "/var/lib/containers/storage:/var/lib/containers/storage"]) + + cmd.extend([ *creds_args, build_container, container_ref, @@ -217,7 +224,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): *types_arg, *upload_args, *target_arch_args, - ] + "--local" if local else "--local=false", + ]) + # print the build command for easier tracing print(" ".join(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) From e0ca48f35e389ae9eee6686d829c494c0cf821cf Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Mon, 11 Mar 2024 15:53:05 +0000 Subject: [PATCH 070/279] test/test_build: test local container builds Add an integration tests for the local storage implementation. The test creates a local container and then mounts the local container store to podman, passing the `--local` flag to the build command. --- test/bib/test_build.py | 39 +++++++++++++++++++++++++++++++++------ test/bib/testcases.py | 2 ++ 2 files changed, 35 insertions(+), 6 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 72879b0f..8d77784c 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -2,9 +2,11 @@ import os import pathlib import platform +import random import re import shutil import subprocess +import string import tempfile import uuid from contextlib import contextmanager @@ -45,14 +47,19 @@ class ImageBuildResult(NamedTuple): def parse_request_params(request): # image_type is passed via special pytest parameter fixture testcase_ref = request.param - if testcase_ref.count(",") == 2: + if testcase_ref.count(",") == 3: container_ref, images, target_arch, local = testcase_ref.split(",") + local = local is not None + elif testcase_ref.count(",") == 2: + container_ref, images, target_arch = testcase_ref.split(",") + local = False elif testcase_ref.count(",") == 1: container_ref, images = testcase_ref.split(",") target_arch = None + local = False else: raise ValueError(f"cannot parse {testcase_ref.count}") - return container_ref, images, target_arch + return container_ref, images, target_arch, local @pytest.fixture(scope='session') @@ -66,9 +73,29 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload """ Build an image inside the passed build_container and return an ImageBuildResult with the resulting image path and user/password + In the case an image is being built from a local container, the + function will build the required local container for the test. """ - with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: - yield build_results[0] + container_ref, images, target_arch, local = parse_request_params(request) + + if not local: + with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: + yield build_results[0] + else: + cont_tag = "localhost/cont-base-" + "".join(random.choices(string.digits, k=12)) + + # we are not cross-building local images (for now) + request.param = ",".join([cont_tag, images, "", "true"]) + + # copy the container into containers-storage + subprocess.check_call([ + "skopeo", "copy", + f"docker://{container_ref}", + f"containers-storage:[overlay@/var/lib/containers/storage+/run/containers/storage]{cont_tag}" + ]) + + with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: + yield build_results[0] @pytest.fixture(name="images", scope="session") @@ -89,9 +116,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): Will return cached results of previous build requests. - :request.parm: has the form "container_url,img_type1+img_type2,arch" + :request.param: has the form "container_url,img_type1+img_type2,arch,local" """ - container_ref, images, target_arch = parse_request_params(request) + container_ref, images, target_arch, local = parse_request_params(request) # images might be multiple --type args # split and check each one diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 86869805..923c716d 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -45,6 +45,8 @@ def gen_testcases(what): CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[2], CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[2], CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[0], + CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[0] + ",,true", + CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[2] + ",,true", ] # do a cross arch test too if platform.machine() == "x86_64": From 0bf08a3cb0f817161face60da6193b97c7b3b000 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Wed, 13 Mar 2024 12:29:57 +0100 Subject: [PATCH 071/279] github: don't run any workflows on push We already run everything in the merge queue, so I don't think there's any additional value in also running them when pushing to main. This also finally removes pushing any artifacts to ghcr. We now use RHTAP and quay.io for everything, and the ghcr images aren't even documented anywhere, so let's just drop this. --- .github.com/workflows/bibtests.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 270c6947..365a5125 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -5,9 +5,6 @@ on: pull_request: branches: - "*" - push: - branches: - - main # for merge queue merge_group: From 2ab4ed1352c73f26c2c22b518816698d95be825e Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 8 Mar 2024 17:31:48 +0100 Subject: [PATCH 072/279] test: add test that ensures "vmdk" images are build The vmdk images cannot be booted easily in qemu so we will just check that the image is there but do no functional testing. --- test/bib/test_build.py | 11 ++++++----- test/bib/testcases.py | 33 +++++++++++++++++---------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 8d77784c..7dc81806 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -17,7 +17,7 @@ # local test utils import testutil from containerbuild import build_container_fixture # noqa: F401 -from testcases import (DIRECT_BOOT_IMAGE_TYPES, INSTALLER_IMAGE_TYPES, +from testcases import (QEMU_BOOT_IMAGE_TYPES, INSTALLER_IMAGE_TYPES, gen_testcases) from vm import AWS, QEMU @@ -140,6 +140,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", "ami": pathlib.Path(output_path) / "image/disk.raw", "raw": pathlib.Path(output_path) / "image/disk.raw", + "vmdk": pathlib.Path(output_path) / "vmdk/disk.vmdk", "anaconda-iso": pathlib.Path(output_path) / "bootiso/install.iso", } assert len(artifact) == len(set(t.split(",")[1] for t in gen_testcases("all"))), \ @@ -228,7 +229,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): if image_types[0] in INSTALLER_IMAGE_TYPES: types_arg = [f"--type={image_types[0]}"] else: - types_arg = [f"--type={it}" for it in DIRECT_BOOT_IMAGE_TYPES] + types_arg = [f"--type={it}" for it in QEMU_BOOT_IMAGE_TYPES] # run container to deploy an image into a bootable disk and upload to a cloud service if applicable cmd = [ @@ -309,14 +310,14 @@ def test_container_builds(build_container): assert build_container in output -@pytest.mark.parametrize("image_type", gen_testcases("direct-boot"), indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("all"), indirect=["image_type"]) def test_image_is_generated(image_type): assert image_type.img_path.exists(), "output file missing, dir "\ f"content: {os.listdir(os.fspath(image_type.img_path))}" @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") -@pytest.mark.parametrize("image_type", gen_testcases("direct-boot"), indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("qemu-boot"), indirect=["image_type"]) def test_image_boots(image_type): with QEMU(image_type.img_path, arch=image_type.img_arch) as test_vm: exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) @@ -383,7 +384,7 @@ def has_selinux(): @pytest.mark.skipif(not has_selinux(), reason="selinux not enabled") -@pytest.mark.parametrize("image_type", gen_testcases("direct-boot"), indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("qemu-boot"), indirect=["image_type"]) def test_image_build_without_se_linux_denials(image_type): # the journal always contains logs from the image building assert image_type.journal_output != "" diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 923c716d..87811fbf 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -2,7 +2,13 @@ import platform # supported images that can be directly booted -DIRECT_BOOT_IMAGE_TYPES = ("qcow2", "ami", "raw") +QEMU_BOOT_IMAGE_TYPES = ("qcow2", "raw") + +# images that can *not* be booted directly from qemu +NON_QEMU_BOOT_IMAGE_TYPES = ("vmdk",) + +# images that can *not* be booted directly from qemu +NON_QEMU_BOOT_IMAGE_TYPES = ("vmdk",) # supported images that require an install INSTALLER_IMAGE_TYPES = ("anaconda-iso",) @@ -35,19 +41,11 @@ def gen_testcases(what): for img_type in INSTALLER_IMAGE_TYPES: test_cases.append(f"{cnt},{img_type}") return test_cases - elif what == "direct-boot": - # Do not do any AMI tests here, just test the raw image (which is - # identical to ami right now). The AMI needs to also get uploaded - # so they really need their own dedicated AMI test via the "ami-boot" - # test-cases - test_cases = [ - CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[0], - CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[2], - CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[2], - CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[0], - CONTAINERS_TO_TEST["centos"] + "," + DIRECT_BOOT_IMAGE_TYPES[0] + ",,true", - CONTAINERS_TO_TEST["fedora"] + "," + DIRECT_BOOT_IMAGE_TYPES[2] + ",,true", - ] + elif what == "qemu-boot": + test_cases = [] + for cnt in CONTAINERS_TO_TEST.values(): + for img_type in QEMU_BOOT_IMAGE_TYPES: + test_cases.append(f"{cnt},{img_type}") # do a cross arch test too if platform.machine() == "x86_64": # todo: add fedora:eln @@ -60,14 +58,17 @@ def gen_testcases(what): elif what == "all": test_cases = [] for cnt in CONTAINERS_TO_TEST.values(): - for img_type in DIRECT_BOOT_IMAGE_TYPES + INSTALLER_IMAGE_TYPES: + for img_type in QEMU_BOOT_IMAGE_TYPES + NON_QEMU_BOOT_IMAGE_TYPES + INSTALLER_IMAGE_TYPES: test_cases.append(f"{cnt},{img_type}") return test_cases + # TODO: make images generate a superdisk manifest with pipelines for + # qcow2+vmdk+raw so that we can just generate them all via a + # single build elif what == "multidisk": # single test that specifies all image types test_cases = [] for cnt in CONTAINERS_TO_TEST.values(): - img_type = "+".join(DIRECT_BOOT_IMAGE_TYPES) + img_type = "+".join(QEMU_BOOT_IMAGE_TYPES) test_cases.append(f"{cnt},{img_type}") return test_cases raise ValueError(f"unknown test-case type {what}") From d8d20b5e7ef9b56d92f5246ba9a4f2eddfa4ba96 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 11 Mar 2024 12:24:22 +0100 Subject: [PATCH 073/279] Revert "test: change build_images() to not assert because order is random" This reverts commit 3c0d1348df1c7f315459c3347fa41af2f49fc35e. --- test/bib/test_build.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 7dc81806..f3f428bb 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -170,14 +170,12 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): image_type, generated_img, target_arch, username, password, bib_output, journal_output)) - # return cached results only if we have exactly the amount of images - # requested. the reason is that for multi-images we cannot just return - # partial results. And because tests run in random order it maybe that - # a multi-image test runs after a single image test that already generated - # one of the types the multi images requested. - # TODO: rework the whole helper and extract the cache check and the build - # into their (tested) functions - if len(results) == len(image_types): + # Because we always build all image types, regardless of what was requested, we should either have 0 results or all + # should be available, so if we found at least one result but not all of them, this is a problem with our setup + assert not results or len(results) == len(image_types), \ + f"unexpected number of results found: requested {image_types} but got {results}" + + if results: yield results return From b9ab78b9047594818bcd2d1b78ac9916d1cdf8ab Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 11 Mar 2024 12:58:26 +0100 Subject: [PATCH 074/279] test: add CLOUD_BOOT_IMAGE_TYPES for images that boot in a cloud This new image types represents images that we test via cloud boot like AMIs. It also means that some of the cache logic can be reworked. --- test/bib/test_build.py | 15 ++++++++------- test/bib/testcases.py | 9 ++++++--- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index f3f428bb..638437d7 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -17,8 +17,7 @@ # local test utils import testutil from containerbuild import build_container_fixture # noqa: F401 -from testcases import (QEMU_BOOT_IMAGE_TYPES, INSTALLER_IMAGE_TYPES, - gen_testcases) +from testcases import CLOUD_BOOT_IMAGE_TYPES, QEMU_BOOT_IMAGE_TYPES, gen_testcases from vm import AWS, QEMU if not testutil.has_executable("podman"): @@ -158,7 +157,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): # cache the metadata instead of the disk image. Alternatively we # could stop testing ami locally at all and just skip any ami tests # if there are no AWS credentials. - if image_type == "ami": + if image_type in CLOUD_BOOT_IMAGE_TYPES: continue generated_img = artifact[image_type] print(f"Checking for cached image {image_type} -> {generated_img}") @@ -223,11 +222,13 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): # upload forced but credentials aren't set raise RuntimeError("AWS credentials not available (upload forced)") - # we're either building an iso or all the disk image types - if image_types[0] in INSTALLER_IMAGE_TYPES: - types_arg = [f"--type={image_types[0]}"] - else: + # qemu boot images can be build with a single invocation + # TODO: ensure that *all* images can be built with the same call, to + # do this we need the "superimage" support in images first + if image_types[0] in QEMU_BOOT_IMAGE_TYPES: types_arg = [f"--type={it}" for it in QEMU_BOOT_IMAGE_TYPES] + else: + types_arg = [f"--type={image_types[0]}"] # run container to deploy an image into a bootable disk and upload to a cloud service if applicable cmd = [ diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 87811fbf..0e285289 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -4,8 +4,8 @@ # supported images that can be directly booted QEMU_BOOT_IMAGE_TYPES = ("qcow2", "raw") -# images that can *not* be booted directly from qemu -NON_QEMU_BOOT_IMAGE_TYPES = ("vmdk",) +# supported images that can be booted in a cloud +CLOUD_BOOT_IMAGE_TYPES = ("ami",) # images that can *not* be booted directly from qemu NON_QEMU_BOOT_IMAGE_TYPES = ("vmdk",) @@ -58,7 +58,10 @@ def gen_testcases(what): elif what == "all": test_cases = [] for cnt in CONTAINERS_TO_TEST.values(): - for img_type in QEMU_BOOT_IMAGE_TYPES + NON_QEMU_BOOT_IMAGE_TYPES + INSTALLER_IMAGE_TYPES: + for img_type in QEMU_BOOT_IMAGE_TYPES + \ + CLOUD_BOOT_IMAGE_TYPES + \ + NON_QEMU_BOOT_IMAGE_TYPES + \ + INSTALLER_IMAGE_TYPES: test_cases.append(f"{cnt},{img_type}") return test_cases # TODO: make images generate a superdisk manifest with pipelines for From 0adaeb424190861f8082ca3fe4fd6e885c1c57e2 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 14 Mar 2024 15:10:36 +0100 Subject: [PATCH 075/279] test: build all disk image types in a single build The vmdk images cannot be booted easily in qemu so we will just check that the image is there but do no functional testing. --- test/bib/test_build.py | 14 ++++++-------- test/bib/testcases.py | 14 +++++++------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 638437d7..f69fe04d 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -17,7 +17,7 @@ # local test utils import testutil from containerbuild import build_container_fixture # noqa: F401 -from testcases import CLOUD_BOOT_IMAGE_TYPES, QEMU_BOOT_IMAGE_TYPES, gen_testcases +from testcases import CLOUD_BOOT_IMAGE_TYPES, DISK_IMAGE_TYPES, gen_testcases from vm import AWS, QEMU if not testutil.has_executable("podman"): @@ -222,11 +222,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): # upload forced but credentials aren't set raise RuntimeError("AWS credentials not available (upload forced)") - # qemu boot images can be build with a single invocation - # TODO: ensure that *all* images can be built with the same call, to - # do this we need the "superimage" support in images first - if image_types[0] in QEMU_BOOT_IMAGE_TYPES: - types_arg = [f"--type={it}" for it in QEMU_BOOT_IMAGE_TYPES] + # all disk-image types can be generated via a single build + if image_types[0] in DISK_IMAGE_TYPES: + types_arg = [f"--type={it}" for it in DISK_IMAGE_TYPES] else: types_arg = [f"--type={image_types[0]}"] @@ -309,7 +307,7 @@ def test_container_builds(build_container): assert build_container in output -@pytest.mark.parametrize("image_type", gen_testcases("all"), indirect=["image_type"]) +@pytest.mark.parametrize("image_type", gen_testcases("multidisk"), indirect=["image_type"]) def test_image_is_generated(image_type): assert image_type.img_path.exists(), "output file missing, dir "\ f"content: {os.listdir(os.fspath(image_type.img_path))}" @@ -413,7 +411,7 @@ def test_iso_installs(image_type): @pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) def test_multi_build_request(images): artifacts = set() - expected = {"disk.qcow2", "disk.raw"} + expected = {"disk.qcow2", "disk.raw", "disk.vmdk"} for result in images: filename = os.path.basename(result.img_path) assert result.img_path.exists() diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 0e285289..cda0c5e3 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -4,12 +4,15 @@ # supported images that can be directly booted QEMU_BOOT_IMAGE_TYPES = ("qcow2", "raw") -# supported images that can be booted in a cloud -CLOUD_BOOT_IMAGE_TYPES = ("ami",) - # images that can *not* be booted directly from qemu NON_QEMU_BOOT_IMAGE_TYPES = ("vmdk",) +# disk image types can be build from a single manifest +DISK_IMAGE_TYPES = QEMU_BOOT_IMAGE_TYPES + NON_QEMU_BOOT_IMAGE_TYPES + +# supported images that can be booted in a cloud +CLOUD_BOOT_IMAGE_TYPES = ("ami",) + # supported images that require an install INSTALLER_IMAGE_TYPES = ("anaconda-iso",) @@ -64,14 +67,11 @@ def gen_testcases(what): INSTALLER_IMAGE_TYPES: test_cases.append(f"{cnt},{img_type}") return test_cases - # TODO: make images generate a superdisk manifest with pipelines for - # qcow2+vmdk+raw so that we can just generate them all via a - # single build elif what == "multidisk": # single test that specifies all image types test_cases = [] for cnt in CONTAINERS_TO_TEST.values(): - img_type = "+".join(QEMU_BOOT_IMAGE_TYPES) + img_type = "+".join(DISK_IMAGE_TYPES) test_cases.append(f"{cnt},{img_type}") return test_cases raise ValueError(f"unknown test-case type {what}") From 1846725ef8ea4e156fac08124d435d4777b81771 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Thu, 14 Mar 2024 15:41:32 +0100 Subject: [PATCH 076/279] main: switch to using containers-storage even without --local I think that pulling always is not the greatest idea since we switched to a container storage, but it keeps the backward compatibility. Note that the manifest test now has to be rootful because we run podman pull. --- test/bib/test_manifest.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 8746809a..7f0cdc5a 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -7,6 +7,8 @@ if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) +if not testutil.can_start_rootful_containers(): + pytest.skip("tests require to be able to run rootful containers (try: sudo)", allow_module_level=True) from containerbuild import build_container_fixture # noqa: F401 from testcases import gen_testcases @@ -19,6 +21,8 @@ def test_manifest_smoke(build_container, testcase_ref): output = subprocess.check_output([ "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', build_container, ]) From 1935329a2e2b4fa38c34e3243beecd61b066039a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Thu, 14 Mar 2024 16:53:07 +0100 Subject: [PATCH 077/279] test: check the bib's return code in test_build --- test/bib/test_build.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index f69fe04d..e91e3780 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -264,7 +264,8 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): break print(line, end="") bib_output += line - p.wait(timeout=10) + rc = p.wait(timeout=10) + assert rc == 0, f"bootc-image-builder failed with return code {rc}" journal_output = testutil.journal_after_cursor(cursor) metadata = {} From bb4fb791a7bd90e98cdc0ae08507130713cba4c4 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 20 Mar 2024 14:12:21 +0100 Subject: [PATCH 078/279] workflow: move to podman 4 in the tests During the test of PR#291 we got a failure that should get fixed by moving to a newer podman. --- .github.com/workflows/bibtests.yaml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 365a5125..e2ceffc3 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -78,7 +78,17 @@ jobs: run: | df -h sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh - - name: Install test deps + - name: Update podman + run: | + # from https://askubuntu.com/questions/1414446/whats-the-recommended-way-of-installing-podman-4-in-ubuntu-22-04 + ubuntu_version='22.04' + key_url="https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key" + sources_url="https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}" + echo "deb $sources_url/ /" | sudo tee /etc/apt/sources.list.d/devel-kubic-libcontainers-unstable.list + curl -fsSL $key_url | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/devel_kubic_libcontainers_unstable.gpg > /dev/null + sudo apt update + sudo apt install -y podman + - name: Install python test deps run: | # make sure test deps are available for root sudo -E pip install --user -r test/requirements.txt From 3ca790f66c519aa5c01c085d0974d27cd3edab3d Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 7 Mar 2024 13:36:55 +0100 Subject: [PATCH 079/279] bib: move to cheggaaa/pb/v3 There is a new version of cheggaaa/pb that we should use. Minor API changes and the output changes slightly. --- test/bib/test_build.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index e91e3780..174e69fc 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -334,12 +334,8 @@ def test_ami_boots_in_aws(image_type, force_aws_upload): pytest.skip("AWS credentials not available (upload not forced)") # check that upload progress is in the output log. Uploads looks like: - # - # Uploading /output/image/disk.raw to bootc-image-builder-ci:aac64b64-6e57-47df-9730-54763061d84b-disk.raw - # 0 B / 10.00 GiB 0.00% - # In the tests with no pty no progress bar is shown in the output just - # xx / yy zz% - assert " 100.00%\n" in image_type.bib_output + # 4.30 GiB / 10.00 GiB [------------>____________] 43.02% 58.04 MiB p/s + assert "] 100.00%" in image_type.bib_output with AWS(image_type.metadata["ami_id"]) as test_vm: exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) assert exit_status == 0 From a77c63586376d432dd140c45af824fab79e824e1 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 20 Mar 2024 09:25:49 +0100 Subject: [PATCH 080/279] test: add new make_container() helper --- test/bib/containerbuild.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 550f3bc0..0bfed2ee 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -1,10 +1,25 @@ import os +import random +import string import subprocess import textwrap +from contextlib import contextmanager import pytest +@contextmanager +def make_container(container_path): + # BIB only supports container tags, not hashes + container_tag = "bib-test-" + "".join(random.choices(string.digits, k=12)) + subprocess.check_call([ + "podman", "build", + "-t", container_tag, + container_path], encoding="utf8") + yield container_tag + subprocess.check_call(["podman", "rmi", container_tag]) + + @pytest.fixture(name="build_container", scope="session") def build_container_fixture(): """Build a container from the Containerfile and returns the name""" From c508349c3ae345e26fab51c11dc76e09401cc9b4 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 20 Mar 2024 09:03:13 +0100 Subject: [PATCH 081/279] test: add test that ensure disk space is doubled Ensure that the disk size of a container is taken into account when the image is generated. The current heuristic is that we just double the container size. The test will not build an image just generate a manifest and check that the image file is generated with the expected size. --- test/bib/test_manifest.py | 47 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 7f0cdc5a..d1f197cb 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -1,5 +1,6 @@ import json import subprocess +import textwrap import pytest @@ -10,10 +11,20 @@ if not testutil.can_start_rootful_containers(): pytest.skip("tests require to be able to run rootful containers (try: sudo)", allow_module_level=True) -from containerbuild import build_container_fixture # noqa: F401 +from containerbuild import build_container_fixture, make_container # noqa: F401 from testcases import gen_testcases +def find_image_size_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "image": + for st in pipl["stages"]: + if st["type"] == "org.osbuild.truncate": + return st["options"]["size"] + raise ValueError(f"cannot find disk size in manifest:\n{manifest_str}") + + @pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) def test_manifest_smoke(build_container, testcase_ref): # testcases_ref has the form "container_url,img_type1+img_type2,arch" @@ -30,3 +41,37 @@ def test_manifest_smoke(build_container, testcase_ref): # just some basic validation assert manifest["version"] == "2" assert manifest["pipelines"][0]["name"] == "build" + # default disk size is 10G + disk_size = find_image_size_from(output) + # default image size is 10G + assert int(disk_size) == 10 * 1024 * 1024 * 1024 + + +@pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) +def test_manifest_disksize(tmp_path, build_container, testcase_ref): + # create derrived container with 6G silly file to ensure that + # bib doubles the size to 12G+ + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {testcase_ref} + RUN truncate -s 2G /big-file1 + RUN truncate -s 2G /big-file2 + RUN truncate -s 2G /big-file3 + """), encoding="utf8") + + print(f"building big size container from {testcase_ref}") + with make_container(tmp_path) as container_tag: + print(f"using {container_tag}") + manifest_str = subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + # ensure local storage is here + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + # need different entry point + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--local", "localhost/{container_tag}"]', + build_container, + ], encoding="utf8") + # ensure disk size is bigger than the default 10G + disk_size = find_image_size_from(manifest_str) + assert int(disk_size) > 11_000_000_000 From 6a58d07098e8c4459b8d2db00833fa37c5bd05f0 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 20 Mar 2024 13:07:29 +0100 Subject: [PATCH 082/279] bib: check /var/lib/containers/storage when using "--local" Ensure to error early when the user is not passing the required ``` -v /var/lib/containers/storage:/var/lib/containers/storage ``` when doing a local build. --- test/bib/test_manifest.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index d1f197cb..41218484 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -75,3 +75,37 @@ def test_manifest_disksize(tmp_path, build_container, testcase_ref): # ensure disk size is bigger than the default 10G disk_size = find_image_size_from(manifest_str) assert int(disk_size) > 11_000_000_000 + + +def test_manifest_local_checks_containers_storage_errors(build_container): + # note that the + # "-v /var/lib/containers/storage:/var/lib/containers/storage" + # is missing here + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + '--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--local", "arg-not-used"]', + build_container, + ], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8") + assert res.returncode == 1 + err = 'Error: local storage not working, did you forget -v /var/lib/containers/storage:/var/lib/containers/storage?' + assert err in res.stderr + + +@pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) +def test_manifest_local_checks_containers_storage_works(tmp_path, build_container, testcase_ref): + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {testcase_ref} + """), encoding="utf8") + + with make_container(tmp_path) as container_tag: + subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "--security-opt", "label=type:unconfined_t", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--local", "localhost/{container_tag}"]', + build_container, + ], check=True, encoding="utf8") From 29a07e04c218f80b5983fbe88df4b05f5a16e5c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Fri, 22 Mar 2024 09:54:10 +0100 Subject: [PATCH 083/279] github: disable tests on macos The tests are broken for weeks, probably due to broken podman store on our macos runner. This already happened in the past and it always requires a manual intervention to fix it. Since we don't think that these tests provide much value over what we are already testing on Linux in GitHub Actions and in Testing Farm, let's disable them. --- .github.com/workflows/bibtests.yaml | 32 ----------------------------- 1 file changed, 32 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index e2ceffc3..a804a05d 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -109,35 +109,3 @@ jobs: run: | df -h sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh - - integration-macos: - name: "Integration macos" - # disabled GH runner as it takes ~50min to run this test, self-hosted - # is much faster (~15min) - #runs-on: macos-13 # needed to get latest cpu - runs-on: self-hosted - steps: - - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Setup up python - uses: actions/setup-python@v5 - with: - cache: 'pip' - - run: python3 -m pip install -r test/requirements.txt - - name: Setup up podman - run: | - brew install podman netcat - if ! podman machine inspect; then - podman machine init --rootful - podman machine set --cpus 4 --memory 4096 - fi - if [ "$(podman machine inspect --format='{{.State}}')" != "running" ]; then - podman machine start - fi - - name: Run tests - env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - run: | - pytest -rs --basetemp="${TMPDIR}/tmp" From c6d5d5cfbaa200858025baeecdbccca1c1c4b271 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 12 Apr 2024 17:53:30 -0400 Subject: [PATCH 084/279] document that `customizations.kernel.append` exists I noticed this was wired up in the code, but undocumented and untested. There are valid use cases for machine-specific kernel arguments, so let's support that here. (What's the best place to verify that the kargs show up in the test VMs? ) --- test/bib/test_build.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 174e69fc..7f1409dc 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -191,6 +191,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "groups": ["wheel"], }, ], + "kernel": { + "append": "user.sometestkarg=sometestvalue" + } }, }, } From 006a8b88b5008e32585bd76c01f35bc9147cee3a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 21 Mar 2024 11:45:02 +0100 Subject: [PATCH 085/279] test: map /var/lib/containers/storage when running bib Now that we always pull the containers this may speed up things when layers are found in the container storage. --- test/bib/test_build.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 7f1409dc..01b3acb7 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -236,6 +236,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "-v", f"{output_path}:/output", "-v", "/store", # share the cache between builds ] From 15605b6914c5542f01b16618715bda5c9713d355 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 15 Apr 2024 11:41:00 +0200 Subject: [PATCH 086/279] test: test that kernel commandline args are passed correctly --- test/bib/test_build.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 01b3acb7..51c8604a 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -38,6 +38,7 @@ class ImageBuildResult(NamedTuple): img_arch: str username: str password: str + kargs: str bib_output: str journal_output: str metadata: dict = {} @@ -125,6 +126,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): username = "test" password = "password" + kargs = "user.sometestkarg=sometestvalue" # params can be long and the qmp socket (that has a limit of 100ish # AF_UNIX) is derived from the path @@ -167,6 +169,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( image_type, generated_img, target_arch, username, password, + kargs, bib_output, journal_output)) # Because we always build all image types, regardless of what was requested, we should either have 0 results or all @@ -192,7 +195,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): }, ], "kernel": { - "append": "user.sometestkarg=sometestvalue" + "append": kargs, } }, }, @@ -286,7 +289,7 @@ def del_ami(): results = [] for image_type in image_types: results.append(ImageBuildResult(image_type, artifact[image_type], target_arch, - username, password, bib_output, journal_output, metadata)) + username, password, kargs, bib_output, journal_output, metadata)) yield results # Try to cache as much as possible @@ -318,6 +321,14 @@ def test_image_is_generated(image_type): f"content: {os.listdir(os.fspath(image_type.img_path))}" +def assert_kernel_args(test_vm, image_type): + exit_status, kcmdline = test_vm.run("cat /proc/cmdline", user=image_type.username, password=image_type.password) + assert exit_status == 0 + # the kernel arg string must have a space as the prefix and either a space + # as suffix or be the last element of the kernel commandline + assert re.search(f" {re.escape(image_type.kargs)}( |$)", kcmdline) + + @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") @pytest.mark.parametrize("image_type", gen_testcases("qemu-boot"), indirect=["image_type"]) def test_image_boots(image_type): @@ -327,6 +338,7 @@ def test_image_boots(image_type): exit_status, output = test_vm.run("echo hello", user=image_type.username, password=image_type.password) assert exit_status == 0 assert "hello" in output + assert_kernel_args(test_vm, image_type) @pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) @@ -407,6 +419,7 @@ def test_iso_installs(image_type): vm.start(use_ovmf=True) exit_status, _ = vm.run("true", user=image_type.username, password=image_type.password) assert exit_status == 0 + assert_kernel_args(vm, image_type) @pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) From 7b6164edf0b2f95348d77b3b6cb19bf2dcbe5e41 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 12 Apr 2024 10:20:18 -0400 Subject: [PATCH 087/279] Automatically pick up `/config.json` if it exists When passing a config, it's an ergonomic hit to need to provide it on *both* the `podman run` invocation *and* the arguments to the binary. Change things so that we automatically pick up `/config.json` if it exists in the container. This way the user only needs to give the podman argument. --- test/bib/test_build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 51c8604a..afaf3204 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -240,6 +240,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "--privileged", "--security-opt", "label=type:unconfined_t", "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "-v", f"{config_json_path}:/config.json:ro", "-v", f"{output_path}:/output", "-v", "/store", # share the cache between builds ] @@ -252,7 +253,6 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): *creds_args, build_container, container_ref, - "--config", "/output/config.json", *types_arg, *upload_args, *target_arch_args, From b98e215d91b0fe950be72be8b6677adeecea1413 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 16 Apr 2024 19:39:42 +0200 Subject: [PATCH 088/279] test: check that `bootc status` contains the right container_ref We are currently not testing that when the build image has a valid `bootc status` that container the original container reference. --- test/bib/test_build.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index afaf3204..b96025c8 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -36,6 +36,7 @@ class ImageBuildResult(NamedTuple): img_type: str img_path: str img_arch: str + container_ref: str username: str password: str kargs: str @@ -168,9 +169,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): journal_output = journal_log_path.read_text(encoding="utf8") bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( - image_type, generated_img, target_arch, username, password, - kargs, - bib_output, journal_output)) + image_type, generated_img, target_arch, container_ref, + username, password, + kargs, bib_output, journal_output)) # Because we always build all image types, regardless of what was requested, we should either have 0 results or all # should be available, so if we found at least one result but not all of them, this is a problem with our setup @@ -288,8 +289,10 @@ def del_ami(): results = [] for image_type in image_types: - results.append(ImageBuildResult(image_type, artifact[image_type], target_arch, - username, password, kargs, bib_output, journal_output, metadata)) + results.append(ImageBuildResult( + image_type, artifact[image_type], target_arch, container_ref, + username, password, + kargs, bib_output, journal_output, metadata)) yield results # Try to cache as much as possible @@ -339,6 +342,14 @@ def test_image_boots(image_type): assert exit_status == 0 assert "hello" in output assert_kernel_args(test_vm, image_type) + # ensure bootc points to the right image + # TODO: replace this ssh root instead login, see PR#357 + _, output = test_vm.run( + f"echo {image_type.password} | sudo -S bootc status", + user=image_type.username, password=image_type.password, + ) + # XXX: read the fully yaml instead? + assert f"image: {image_type.container_ref}" in output @pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) From 68b863128d2f5612c52217c507c8c4518e801c6d Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Wed, 17 Apr 2024 20:05:42 +0200 Subject: [PATCH 089/279] github: use go 1.20 for the lint job The Go version in go.mod was updated to 1.20 in cb78e831cef1f48695b1491654d41617b4f4f1c4 but the linter was still on 1.19, which causes _weird_ issues like "unused imports" false positives. --- .github.com/workflows/bibtests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index a804a05d..3af3ee9b 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -17,10 +17,10 @@ jobs: name: "⌨ Lint" runs-on: ubuntu-latest steps: - - name: Set up Go 1.19 + - name: Set up Go 1.20 uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version: "1.20" id: go - name: Check out code into the Go module directory From 3ff296d0df986c5d3409d049b107997b9b43cb7a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 18 Apr 2024 10:17:25 +0200 Subject: [PATCH 090/279] github: run unittests as part of the CI For some reason we missed that :/ --- .github.com/workflows/bibtests.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 3af3ee9b..a40334b8 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -14,7 +14,7 @@ concurrency: jobs: lint: - name: "⌨ Lint" + name: "⌨ Lint & unittests" runs-on: ubuntu-latest steps: - name: Set up Go 1.20 @@ -42,6 +42,9 @@ jobs: args: --timeout 5m0s working-directory: bib + - name: Run unit tests + run: (cd bib && go test -race ./...) + shellcheck: name: "🐚 Shellcheck" runs-on: ubuntu-20.04 From f8f5812db6627e154fa3988c393a1b16eacb6078 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 17 Apr 2024 18:06:05 +0200 Subject: [PATCH 091/279] main: decorate more errors with context and avoid panics We recently had a bugreport where a panic had very little error context: ``` Generating manifest manifest-iso.json panic: exec: no command ``` this prompted this commit so that: a) we do add more context to our errors b) avoid panicing when we can also return errors --- test/bib/test_manifest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 41218484..0b182fbb 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -89,7 +89,7 @@ def test_manifest_local_checks_containers_storage_errors(build_container): build_container, ], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8") assert res.returncode == 1 - err = 'Error: local storage not working, did you forget -v /var/lib/containers/storage:/var/lib/containers/storage?' + err = 'local storage not working, did you forget -v /var/lib/containers/storage:/var/lib/containers/storage?' assert err in res.stderr From 759a63969288c0bc4b2677731490bf9a4b664945 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 18 Apr 2024 08:37:02 +0200 Subject: [PATCH 092/279] bib: ignore --target-arch if it is the same as the running arch If the user passes `--target-arch` and it is the same as the current arch just ignore the option as this is the default behavior anyway. This partly closes https://github.com/osbuild/bootc-image-builder/issues/316 --- test/bib/containerbuild.py | 4 ++++ test/bib/test_opts.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 0bfed2ee..e53c734e 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -44,6 +44,10 @@ def build_fake_container_fixture(tmpdir_factory, build_container): fake_osbuild_path.write_text(textwrap.dedent("""\ #!/bin/sh -e + # injest generated manifest from the images library, if we do not + # do this images may fail with "broken" pipe errors + cat - + mkdir -p /output/qcow2 echo "fake-disk.qcow2" > /output/qcow2/disk.qcow2 diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 5e3159a3..e125cf5f 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -1,3 +1,4 @@ +import platform import subprocess import pytest @@ -27,3 +28,31 @@ def test_bib_chown_opts(tmp_path, build_fake_container, chown_opt, expected_uid_ assert p.exists() assert p.stat().st_uid == expected_uid_gid[0] assert p.stat().st_gid == expected_uid_gid[1] + + +@pytest.mark.parametrize("target_arch_opt, expected_err", [ + ([], ""), + (["--target-arch=amd64"], ""), + (["--target-arch=x86_64"], ""), + (["--target-arch=arm64"], "cannot build iso for different target arches yet"), +]) +@pytest.mark.skipif(platform.uname().machine != "x86_64", reason="cross build test only runs on x86") +def test_opts_arch_is_same_arch_is_fine(tmp_path, build_fake_container, target_arch_opt, expected_err): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "--type=iso", + "quay.io/centos-bootc/centos-bootc:stream9", + ] + target_arch_opt, check=False, capture_output=True, text=True) + if expected_err == "": + assert res.returncode == 0 + else: + assert res.returncode != 0 + assert expected_err in res.stderr From 578eee29758e26eb23a124653319cc36fe980439 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 22 Apr 2024 11:38:03 +0200 Subject: [PATCH 093/279] github: put `/var/lib/containers` on `/mnt` via bind mount On GH runners /mnt has 70G free space and because we ran out of diskspace in PR#338 in what appears the container storage move the container storage via a bind mount to `/mnt/var/lib/containers`. --- .github.com/workflows/bibtests.yaml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index a40334b8..289e92fd 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -95,17 +95,23 @@ jobs: run: | # make sure test deps are available for root sudo -E pip install --user -r test/requirements.txt - # podman needs (parts of) the environment but will break when - # XDG_RUNTIME_DIR is set. - # TODO: figure out what exactly podman needs + - name: Workarounds for GH runner diskspace + run: | # use custom basetemp here because /var/tmp is on a smaller disk # than /mnt sudo mkdir -p /mnt/var/tmp/bib-tests + # on GH runners /mnt has 70G free space, use that for our container + # storage + sudo mkdir -p /mnt/var/lib/containers + sudo mount -o bind /mnt/var/lib/containers /var/lib/containers - name: Run tests env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | + # podman needs (parts of) the environment but will break when + # XDG_RUNTIME_DIR is set. + # TODO: figure out what exactly podman needs sudo -E XDG_RUNTIME_DIR= pytest-3 --basetemp=/mnt/var/tmp/bib-tests - name: Diskspace (after) if: ${{ always() }} From a82314c1c9fb70aa9aea2a09d05da4ae00020c3d Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 22 Apr 2024 16:08:06 +0200 Subject: [PATCH 094/279] workflow,tests: use `/var/tmp/osbuild-test-store` and cache that This hopefully speeds up the tests, especially the iso ones. --- .github.com/workflows/bibtests.yaml | 9 ++++++++- test/bib/test_build.py | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 289e92fd..32e4f7f6 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -103,7 +103,14 @@ jobs: # on GH runners /mnt has 70G free space, use that for our container # storage sudo mkdir -p /mnt/var/lib/containers - sudo mount -o bind /mnt/var/lib/containers /var/lib/containers + sudo mount -o bind /mnt/var/lib/containers /var/lib/containers + - run: | + mkdir -p /var/tmp/osbuild-test-store + - name: Cache osbuild env + uses: actions/cache@v4 + with: + path: /var/tmp/osbuild-test-store + key: no-key-needed-here - name: Run tests env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} diff --git a/test/bib/test_build.py b/test/bib/test_build.py index b96025c8..6b742444 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -243,7 +243,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "-v", f"{config_json_path}:/config.json:ro", "-v", f"{output_path}:/output", - "-v", "/store", # share the cache between builds + "-v", "/var/tmp/osbuild-test-store:/store", # share the cache between builds ] # we need to mount the host's container store From cf07dda09e3174f67389add2e3f7c79ea7e0a17c Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 18 Apr 2024 14:55:52 +0200 Subject: [PATCH 095/279] test: enable iso installer tests for both centos/fedora Now that we can inspect the image enable the iso installer tests again. --- test/bib/test_build.py | 1 - test/bib/testcases.py | 8 +++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 6b742444..e8813d32 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -413,7 +413,6 @@ def test_image_build_without_se_linux_denials(image_type): f"denials in log {image_type.journal_output}" -@pytest.mark.skip(reason="see https://github.com/osbuild/bootc-image-builder/issues/233") @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") @pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) def test_iso_installs(image_type): diff --git a/test/bib/testcases.py b/test/bib/testcases.py index cda0c5e3..e5b1f6aa 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -38,11 +38,9 @@ def gen_testcases(what): return [cnt + ",ami" for cnt in CONTAINERS_TO_TEST.values()] elif what == "anaconda-iso": test_cases = [] - # only fedora right now, centos iso installer is broken right now: - # https://github.com/osbuild/bootc-image-builder/issues/157 - cnt = CONTAINERS_TO_TEST["fedora"] - for img_type in INSTALLER_IMAGE_TYPES: - test_cases.append(f"{cnt},{img_type}") + for cnt in CONTAINERS_TO_TEST.values(): + for img_type in INSTALLER_IMAGE_TYPES: + test_cases.append(f"{cnt},{img_type}") return test_cases elif what == "qemu-boot": test_cases = [] From 7834602524ea0d8c0cd1abfd4b56869d86a990b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Tue, 23 Apr 2024 10:44:09 +0200 Subject: [PATCH 096/279] test: Make sure that the osbuild store exists This breaks the tests running locally and in Testing Farm. --- test/bib/test_build.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index e8813d32..17802b7e 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -136,6 +136,10 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): output_path = shared_tmpdir / format(abs(hash(container_ref + str(target_arch))), "x") output_path.mkdir(exist_ok=True) + # make sure that the test store exists, because podman refuses to start if the source directory for a volume + # doesn't exist + pathlib.Path("/var/tmp/osbuild-test-store").mkdir(exist_ok=True, parents=True) + journal_log_path = output_path / "journal.log" bib_output_path = output_path / "bib-output.log" artifact = { From 079a7f3545c25923030f4cda96eaef370bfa29cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Tue, 23 Apr 2024 10:22:08 +0200 Subject: [PATCH 097/279] Fix passing the --tls-verify flag to podman pull When switching to always leveraging the local storage, I forgot to pass the --tls-verify flag. This commit fixes it. Co-Authored-By: Michael Vogt --- test/bib/containerbuild.py | 17 +++++++++++++++++ test/bib/test_opts.py | 32 +++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index e53c734e..762c6eec 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -40,6 +40,20 @@ def build_fake_container_fixture(tmpdir_factory, build_container): """Build a container with a fake osbuild and returns the name""" tmp_path = tmpdir_factory.mktemp("build-fake-container") + # see https://github.com/osbuild/osbuild/blob/main/osbuild/testutil/__init__.py#L91 + tracing_podman_path = tmp_path / "tracing-podman" + tracing_podman_path.write_text(textwrap.dedent("""\ + #!/bin/sh -e + + TRACE_PATH=/output/"$(basename $0)".log + for arg in "$@"; do + echo "$arg" >> "$TRACE_PATH" + done + # extra separator to differenciate between calls + echo >> "$TRACE_PATH" + exec "$0".real "$@" + """), encoding="utf8") + fake_osbuild_path = tmp_path / "fake-osbuild" fake_osbuild_path.write_text(textwrap.dedent("""\ #!/bin/sh -e @@ -60,6 +74,9 @@ def build_fake_container_fixture(tmpdir_factory, build_container): FROM {build_container} COPY fake-osbuild /usr/bin/osbuild RUN chmod 755 /usr/bin/osbuild + COPY --from={build_container} /usr/bin/podman /usr/bin/podman.real + COPY tracing-podman /usr/bin/podman + RUN chmod 755 /usr/bin/podman """), encoding="utf8") container_tag = "bootc-image-builder-test-faked-osbuild" diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index e125cf5f..b5dd98cf 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -6,12 +6,17 @@ from containerbuild import build_container_fixture, build_fake_container_fixture # noqa: F401 +@pytest.fixture(name="container_storage", scope="session") +def container_storage_fixture(tmp_path_factory): + return tmp_path_factory.mktemp("storage") + + @pytest.mark.parametrize("chown_opt,expected_uid_gid", [ ([], (0, 0)), (["--chown", "1000:1000"], (1000, 1000)), (["--chown", "1000"], (1000, 0)), ]) -def test_bib_chown_opts(tmp_path, build_fake_container, chown_opt, expected_uid_gid): +def test_bib_chown_opts(tmp_path, container_storage, build_fake_container, chown_opt, expected_uid_gid): output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) @@ -19,6 +24,7 @@ def test_bib_chown_opts(tmp_path, build_fake_container, chown_opt, expected_uid_ "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", "-v", f"{output_path}:/output", build_fake_container, "quay.io/centos-bootc/centos-bootc:stream9", @@ -56,3 +62,27 @@ def test_opts_arch_is_same_arch_is_fine(tmp_path, build_fake_container, target_a else: assert res.returncode != 0 assert expected_err in res.stderr + + +@pytest.mark.parametrize("tls_opt,expected_cmdline", [ + ([], "--tls-verify=true"), + (["--tls-verify"], "--tls-verify=true"), + (["--tls-verify=true"], "--tls-verify=true"), + (["--tls-verify=false"], "--tls-verify=false"), + (["--tls-verify=0"], "--tls-verify=false"), +]) +def test_bib_tls_opts(tmp_path, container_storage, build_fake_container, tls_opt, expected_cmdline): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + subprocess.check_call([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "quay.io/centos-bootc/centos-bootc:stream9" + ] + tls_opt) + podman_log = output_path / "podman.log" + assert expected_cmdline in podman_log.read_text() From 52ea42ea9f9a73c299a73eb501a70994d3abaf01 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 18 Apr 2024 12:30:40 +0200 Subject: [PATCH 098/279] test: add `arch` argument to `make_container()` test helper This is useful for cross arch container buildtesting. Note that this always pass `--arch` when doing `podman build` because without that the default behavior is to pull whatever arch was pulled for this image ref last but we want "native" if nothing else is specified. Note: This just passes `platform.uname().machine` which uses the kernel architecture names. podman seems to translate those kernel arch names to go arches automatically. --- test/bib/containerbuild.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 762c6eec..d751ccfb 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -1,4 +1,5 @@ import os +import platform import random import string import subprocess @@ -9,12 +10,23 @@ @contextmanager -def make_container(container_path): +def make_container(container_path, arch=None): # BIB only supports container tags, not hashes container_tag = "bib-test-" + "".join(random.choices(string.digits, k=12)) + + if not arch: + # Always provide an architecture here because without that the default + # behavior is to pull whatever arch was pulled for this image ref + # last but we want "native" if nothing else is specified. + # + # Note: podman seems to translate kernel arch to go arches + # automatically it seems. + arch = platform.uname().machine + subprocess.check_call([ "podman", "build", "-t", container_tag, + "--arch", arch, container_path], encoding="utf8") yield container_tag subprocess.check_call(["podman", "rmi", container_tag]) From 784e7d81d18a611ed46322453420c756cc354157 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 12 Apr 2024 09:42:25 +0200 Subject: [PATCH 099/279] bib: check that architecture is expected arch When trying to build an image with an incompatible target arch bib will currently not error because the container resolver is not very strict about the architecture request. This commit fixes this by double checking that the resolved container is actually of the expected architecture. This requires https://github.com/osbuild/images/pull/585 --- test/bib/test_manifest.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 0b182fbb..c048d088 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -1,4 +1,5 @@ import json +import platform import subprocess import textwrap @@ -109,3 +110,26 @@ def test_manifest_local_checks_containers_storage_works(tmp_path, build_containe f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--local", "localhost/{container_tag}"]', build_container, ], check=True, encoding="utf8") + + +@pytest.mark.skipif(platform.uname().machine != "x86_64", reason="cross build test only runs on x86") +def test_manifest_cross_arch_check(tmp_path, build_container): + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent("""\n + # build for x86_64 only + FROM scratch + """), encoding="utf8") + + with make_container(tmp_path, arch="x86_64") as container_tag: + with pytest.raises(subprocess.CalledProcessError) as exc: + subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "--security-opt", "label=type:unconfined_t", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest",\ + "--target-arch=aarch64", "--local", \ + "localhost/{container_tag}"]', + build_container, + ], check=True, capture_output=True, encoding="utf8") + assert 'image found is for unexpected architecture "x86_64"' in exc.value.stderr From 7e8f91b6c7bdba62437f715d8b8bf64ce7865892 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 23 Apr 2024 11:06:18 +0200 Subject: [PATCH 100/279] test: add test (somewhat) for rootfs configuration from bootc Basic test that checks that the rootfs is honored from the container. Not ideal because we only get xfs for both fedora and centos. --- test/bib/test_manifest.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index c048d088..88e7acbd 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -133,3 +133,37 @@ def test_manifest_cross_arch_check(tmp_path, build_container): build_container, ], check=True, capture_output=True, encoding="utf8") assert 'image found is for unexpected architecture "x86_64"' in exc.value.stderr + + +def find_rootfs_type_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "image": + for st in pipl["stages"]: + if st["type"].startswith("org.osbuild.mkfs."): + if st.get("options", {}).get("label") == "root": + return st["type"].rpartition(".")[2] + raise ValueError(f"cannot find rootfs type in manifest:\n{manifest_str}") + + +@pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) +def test_manifest_rootfs_respected(build_container, testcase_ref): + # testcases_ref has the form "container_url,img_type1+img_type2,arch" + container_ref = testcase_ref.split(",")[0] + + # TODO: derive container and fake "bootc install print-configuration"? + output = subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', + build_container, + ]) + rootfs_type = find_rootfs_type_from(output) + match container_ref: + case "quay.io/centos-bootc/centos-bootc:stream9": + assert rootfs_type == "xfs" + case "quay.io/centos-bootc/fedora-bootc:eln": + assert rootfs_type == "xfs" + case _: + pytest.fail(f"unknown container_ref {container_ref} please update test") From 09b291d3cd26e0451056c5be540763ba7a550b10 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 26 Apr 2024 07:57:20 +0200 Subject: [PATCH 101/279] test: add integration test that checks that config.toml is loaded Tiny integration test that ensures config.toml is honored during manifest generation. --- test/bib/test_manifest.py | 41 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 88e7acbd..77697d01 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -167,3 +167,44 @@ def test_manifest_rootfs_respected(build_container, testcase_ref): assert rootfs_type == "xfs" case _: pytest.fail(f"unknown container_ref {container_ref} please update test") + + +def find_user_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "image": + for st in pipl["stages"]: + if st["type"] == "org.osbuild.users": + return st + raise ValueError(f"cannot find users stage in manifest:\n{manifest_str}") + + +def test_manifest_user_customizations_toml(tmp_path, build_container): + # no need to parameterize this test, toml is the same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config_toml_path = tmp_path / "config.toml" + config_toml_path.write_text(textwrap.dedent("""\ + [[blueprint.customizations.user]] + name = "alice" + password = "$5$xx$aabbccddeeffgghhiijj" # notsecret + key = "ssh-rsa AAA ... user@email.com" + groups = ["wheel"] + """)) + output = subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "-v", f"{config_toml_path}:/config.toml", + "--security-opt", "label=type:unconfined_t", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', + build_container, + ]) + user_stage = find_user_stage_from(output) + assert user_stage["options"]["users"].get("alice") == { + # use very fake password here, if it looks too real the + # infosec "leak detect" get very nervous + "password": "$5$xx$aabbccddeeffgghhiijj", # notsecret + "key": "ssh-rsa AAA ... user@email.com", + "groups": ["wheel"], + } From ea66c37cfc69ef751e1a17083b5ae294f9fb6f7f Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Fri, 26 Apr 2024 20:29:23 +0100 Subject: [PATCH 102/279] test: tweak itest for cross-arch manifest handline We now need a real bootable container because internal/containers/containers tries to mount the container and fails if it's just a FROM scratch container. --- test/bib/test_manifest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 77697d01..4b3da1d8 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -117,7 +117,7 @@ def test_manifest_cross_arch_check(tmp_path, build_container): cntf_path = tmp_path / "Containerfile" cntf_path.write_text(textwrap.dedent("""\n # build for x86_64 only - FROM scratch + FROM quay.io/centos-bootc/centos-bootc:stream9 """), encoding="utf8") with make_container(tmp_path, arch="x86_64") as container_tag: From 4dc0c008e14ee15cd9cc5d5e4b6a0b4693d5a260 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Apr 2024 04:38:01 +0000 Subject: [PATCH 103/279] build(deps): bump golangci/golangci-lint-action from 4 to 5 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 4 to 5. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v4...v5) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 32e4f7f6..3473c8d3 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -36,7 +36,7 @@ jobs: run: sudo apt install -y libgpgme-dev libbtrfs-dev libdevmapper-dev - name: Run golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v5 with: version: v1.55.2 args: --timeout 5m0s From 47d7b76454953a2bb1668d60ee13fb3e66095410 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 25 Apr 2024 11:38:47 +0200 Subject: [PATCH 104/279] test: add smoke test that checks that `--log-level` is honored Trivial test that ensures that the `--log-level` is honored from the commandline. Uses the fake_container with an empty osbuild so it is reasonable fast (about 20s on my system). --- test/bib/test_opts.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index b5dd98cf..aebb9a19 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -86,3 +86,22 @@ def test_bib_tls_opts(tmp_path, container_storage, build_fake_container, tls_opt ] + tls_opt) podman_log = output_path / "podman.log" assert expected_cmdline in podman_log.read_text() + + +@pytest.mark.parametrize("with_debug", [False, True]) +def test_bib_log_level_smoke(tmp_path, container_storage, build_fake_container, with_debug): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + log_debug = ["--log-level", "debug"] if with_debug else [] + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + *log_debug, + "quay.io/centos-bootc/centos-bootc:stream9" + ], check=True, capture_output=True, text=True) + assert ('level=debug' in res.stderr) == with_debug From 831d8076a7dd4c02fac890c126ef2b2fcfda6353 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 25 Apr 2024 11:40:55 +0200 Subject: [PATCH 105/279] test: use system container storage when running test_opts as root By using the sharde systemwide container storage the tests will run faster in GH action that runs everything as root by default. The reason is that the images used are already pulled for the build/boot tests. --- test/bib/test_opts.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index aebb9a19..c4919493 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -1,3 +1,4 @@ +import os import platform import subprocess @@ -8,6 +9,10 @@ @pytest.fixture(name="container_storage", scope="session") def container_storage_fixture(tmp_path_factory): + # share systemwide storage when running as root, this makes the GH + # tests faster because they already have the test images used here + if os.getuid() == 0: + return "/var/lib/containers/storage" return tmp_path_factory.mktemp("storage") From 612e691f611805a619791cf1da06e043074a6440 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 26 Apr 2024 13:45:27 +0200 Subject: [PATCH 106/279] buildconfig: make buildconfig blueprint compatible and support legacy Make the buildconfig a blueprint but keep support for the legacy format for now. --- test/bib/test_build.py | 24 +++++++++++------------- test/bib/test_manifest.py | 2 +- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 17802b7e..8d3c951c 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -190,19 +190,17 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): # not all requested image types are available - build them cfg = { - "blueprint": { - "customizations": { - "user": [ - { - "name": username, - "password": password, - "groups": ["wheel"], - }, - ], - "kernel": { - "append": kargs, - } - }, + "customizations": { + "user": [ + { + "name": username, + "password": password, + "groups": ["wheel"], + }, + ], + "kernel": { + "append": kargs, + } }, } diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 4b3da1d8..df191293 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -185,7 +185,7 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): config_toml_path = tmp_path / "config.toml" config_toml_path.write_text(textwrap.dedent("""\ - [[blueprint.customizations.user]] + [[customizations.user]] name = "alice" password = "$5$xx$aabbccddeeffgghhiijj" # notsecret key = "ssh-rsa AAA ... user@email.com" From 53f01b765a7ea49743b58f34f4e0f72e51490022 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Tue, 30 Apr 2024 15:41:52 +0200 Subject: [PATCH 107/279] bib: add --roofs to be able to select the root fs type Some container images don't have a default filesystem type. In this case, a disk image build will fail. I'm a strong believer that we need a one-line command to build a disk image. Thus, this commit adds --rootfs which sets the root filesystem type if the container image doesn't have one (or overrides it if it has one). By using --rootfs, users will be able to run just one command to build an image once again. In the future, we will support custom partitioning via blueprints (or different format). This means that we will need to figure out what takes precedence. However, I suggest to cross that bridge when we get there. --- test/bib/test_manifest.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index df191293..1dda99cf 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -169,6 +169,22 @@ def test_manifest_rootfs_respected(build_container, testcase_ref): pytest.fail(f"unknown container_ref {container_ref} please update test") +def test_manifest_rootfs_override(build_container): + # no need to parameterize this test, --rootfs behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + output = subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest",\ + "--rootfs", "ext4", "{container_ref}"]', + build_container, + ]) + rootfs_type = find_rootfs_type_from(output) + assert rootfs_type == "ext4" + + def find_user_stage_from(manifest_str): manifest = json.loads(manifest_str) for pipl in manifest["pipelines"]: From c29cd72779ca6b7f1caab915ed6c2fc7b1a97fbe Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Fri, 22 Mar 2024 11:41:18 +0000 Subject: [PATCH 108/279] test: minor fix to image_type fixture Small fix to the `image_type` fixture by deduplicating the call to `build_images` --- test/bib/test_build.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 8d3c951c..0e7a1cd8 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -79,10 +79,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload """ container_ref, images, target_arch, local = parse_request_params(request) - if not local: - with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: - yield build_results[0] - else: + if local: cont_tag = "localhost/cont-base-" + "".join(random.choices(string.digits, k=12)) # we are not cross-building local images (for now) @@ -95,8 +92,8 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload f"containers-storage:[overlay@/var/lib/containers/storage+/run/containers/storage]{cont_tag}" ]) - with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: - yield build_results[0] + with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: + yield build_results[0] @pytest.fixture(name="images", scope="session") From 8ea1d7c111af3b90271db82ba1f267cdb0deb5b3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 18 Apr 2024 07:23:04 +0200 Subject: [PATCH 109/279] test: switch kargs to systemd.journald.forward_to_console=1 This allows us to test our customization with something actually really useful as it will give us easier debug when test runs fail. --- test/bib/test_build.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 0e7a1cd8..4e49c1e0 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -124,7 +124,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): username = "test" password = "password" - kargs = "user.sometestkarg=sometestvalue" + kargs = "systemd.journald.forward_to_console=1" # params can be long and the qmp socket (that has a limit of 100ish # AF_UNIX) is derived from the path From 2e5f3823587c99bf374b9bdd42358a1180b507af Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 22 Mar 2024 12:37:56 +0100 Subject: [PATCH 110/279] test: update tests to allow ssh keys With ssh a root login is only possible via a sshkey. So let's support this so that we can run `bootc status` which requires root privs. With the switch to bootc we need to adjust the testing. We inject a root ssh key now and just use that for login. --- test/bib/test_build.py | 41 +++++++++++++++++++++++++++++++---------- test/bib/vm.py | 10 ++++++++-- 2 files changed, 39 insertions(+), 12 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 4e49c1e0..12fa844c 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -39,6 +39,7 @@ class ImageBuildResult(NamedTuple): container_ref: str username: str password: str + ssh_keyfile_private_path: str kargs: str bib_output: str journal_output: str @@ -139,6 +140,10 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): journal_log_path = output_path / "journal.log" bib_output_path = output_path / "bib-output.log" + + ssh_keyfile_private_path = output_path / "ssh-keyfile" + ssh_keyfile_public_path = ssh_keyfile_private_path.with_suffix(".pub") + artifact = { "qcow2": pathlib.Path(output_path) / "qcow2/disk.qcow2", "ami": pathlib.Path(output_path) / "image/disk.raw", @@ -171,9 +176,21 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( image_type, generated_img, target_arch, container_ref, - username, password, + username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) + # generate new keyfile + if not ssh_keyfile_private_path.exists(): + subprocess.run([ + "ssh-keygen", + "-N", "", + # be very conservative with keys for paramiko + "-b", "2048", + "-t", "rsa", + "-f", os.fspath(ssh_keyfile_private_path), + ], check=True) + ssh_pubkey = ssh_keyfile_public_path.read_text(encoding="utf8").strip() + # Because we always build all image types, regardless of what was requested, we should either have 0 results or all # should be available, so if we found at least one result but not all of them, this is a problem with our setup assert not results or len(results) == len(image_types), \ @@ -190,6 +207,11 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "customizations": { "user": [ { + "name": "root", + "key": ssh_pubkey, + # cannot use default /root as is on a read-only place + "home": "/var/roothome", + }, { "name": username, "password": password, "groups": ["wheel"], @@ -197,7 +219,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): ], "kernel": { "append": kargs, - } + }, }, } @@ -290,7 +312,7 @@ def del_ami(): for image_type in image_types: results.append(ImageBuildResult( image_type, artifact[image_type], target_arch, container_ref, - username, password, + username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results @@ -335,18 +357,17 @@ def assert_kernel_args(test_vm, image_type): @pytest.mark.parametrize("image_type", gen_testcases("qemu-boot"), indirect=["image_type"]) def test_image_boots(image_type): with QEMU(image_type.img_path, arch=image_type.img_arch) as test_vm: + # user/password login works exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) assert exit_status == 0 - exit_status, output = test_vm.run("echo hello", user=image_type.username, password=image_type.password) + # root/ssh login also works + exit_status, output = test_vm.run("id", user="root", keyfile=image_type.ssh_keyfile_private_path) assert exit_status == 0 - assert "hello" in output + assert "uid=0" in output + # check generic image options assert_kernel_args(test_vm, image_type) # ensure bootc points to the right image - # TODO: replace this ssh root instead login, see PR#357 - _, output = test_vm.run( - f"echo {image_type.password} | sudo -S bootc status", - user=image_type.username, password=image_type.password, - ) + _, output = test_vm.run("bootc status", user="root", keyfile=image_type.ssh_keyfile_private_path) # XXX: read the fully yaml instead? assert f"image: {image_type.container_ref}" in output diff --git a/test/bib/vm.py b/test/bib/vm.py index 43c37636..1bb21f5b 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -1,5 +1,6 @@ import abc import os +import paramiko import pathlib import platform import subprocess @@ -43,7 +44,7 @@ def force_stop(self): Stop the VM and clean up any resources that were created when setting up and starting the machine. """ - def run(self, cmd, user, password): + def run(self, cmd, user, password="", keyfile=None): """ Run a command on the VM via SSH using the provided credentials. """ @@ -51,8 +52,13 @@ def run(self, cmd, user, password): self.start() client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy) + # workaround, see https://github.com/paramiko/paramiko/issues/2048 + pkey = None + if keyfile: + pkey = paramiko.RSAKey.from_private_key_file(keyfile) client.connect( - self._address, self._ssh_port, user, password, + self._address, self._ssh_port, + user, password, pkey=pkey, allow_agent=False, look_for_keys=False) chan = client.get_transport().open_session() chan.get_pty() From 7d25a1446aeb73f6a35de7632dfdac59de2f3fbc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 04:40:53 +0000 Subject: [PATCH 111/279] build(deps): bump golangci/golangci-lint-action from 5 to 6 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5 to 6. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v5...v6) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 3473c8d3..1589b6f4 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -36,7 +36,7 @@ jobs: run: sudo apt install -y libgpgme-dev libbtrfs-dev libdevmapper-dev - name: Run golangci-lint - uses: golangci/golangci-lint-action@v5 + uses: golangci/golangci-lint-action@v6 with: version: v1.55.2 args: --timeout 5m0s From 341db87494744e835c51d61fb28c274fafac6f86 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 8 May 2024 12:15:43 +0200 Subject: [PATCH 112/279] workflow: run tests on "push" to "main" to populate cache When a merge queue runs it will update the cache in the context of `merge_group`, not `main` [0] - this means that the cache cannot be restored by future PRs and because of the restrictions for caches [1] we currently have no caching. This is attempt to "fix" this. It sucks a bit because the tests that are run as part of the merge queue will be run again when "main" is updated just so that the cache gets populated. So maybe it's not worth it but right, we need to measure. Either it's this or we can remove the cache action as it is not working right now. [0] https://github.com/orgs/community/discussions/66430 [1] https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache --- .github.com/workflows/bibtests.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 1589b6f4..71f31b42 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -5,6 +5,9 @@ on: pull_request: branches: - "*" + push: + branches: + - "main" # for merge queue merge_group: From fd43c777bd3b80264ce5c749efef7cce8bfb0de3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 27 May 2024 09:30:22 +0200 Subject: [PATCH 113/279] README,bib: hide `--config` from users, remove from docs With the default to pick up the config from `/config.{toml,json}` nowdays the `--config` flag is not very useful anymore. It is actually confusing users as they may hink they to pass two config arguments into the container. So this commit removes `--config` from the README and hides it in bib. It is not removed entirely for two reasons: 1. backward compatibility with existing scripts 2. it is useful for developers to run ``` $ sudo ./bootc-image-builder manifest --config local-config.json ``` to get a manifest from outside a container for debugging purposes (but it's not a strong reason). --- test/bib/test_opts.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index c4919493..9d8db5cc 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -110,3 +110,22 @@ def test_bib_log_level_smoke(tmp_path, container_storage, build_fake_container, "quay.io/centos-bootc/centos-bootc:stream9" ], check=True, capture_output=True, text=True) assert ('level=debug' in res.stderr) == with_debug + + +def test_bib_help_hides_config(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "manifest", "--help", + ], check=True, capture_output=True, text=True) + # --config should not be user visible + assert '--config' not in res.stdout + # but other options should be + assert '--log-level' in res.stdout From b28ea74dbde14bef8a253380bfbf59cd0195c348 Mon Sep 17 00:00:00 2001 From: "Brian C. Lane" Date: Mon, 15 Jan 2024 17:51:25 -0800 Subject: [PATCH 114/279] test: Check the qcow2 image filesystem changes Make sure / is larger than the default of 10GiB and make sure there is a new /var/log mountpoint. --- test/bib/test_build.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 12fa844c..7e183c85 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -217,6 +217,16 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "groups": ["wheel"], }, ], + "filesystem": [ + { + "mountpoint": "/", + "minsize": "12GiB" + }, + { + "mountpoint": "/var/log", + "minsize": "1GiB" + }, + ], "kernel": { "append": kargs, }, @@ -371,6 +381,15 @@ def test_image_boots(image_type): # XXX: read the fully yaml instead? assert f"image: {image_type.container_ref}" in output + # Figure out how big / is and make sure it is > 10GiB + # Note that df output is in 1k blocks, not bytes + for line in output.splitlines(): + fields = line.split() + if fields[0] == "/sysroot": + size = int(fields[1]) + assert size > 10 * 1024 * 1024 + break + @pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) def test_ami_boots_in_aws(image_type, force_aws_upload): From 2a0509b2dd9cd516e0e0288e590aaefc397faf66 Mon Sep 17 00:00:00 2001 From: "Brian C. Lane" Date: Tue, 23 Jan 2024 16:42:53 -0800 Subject: [PATCH 115/279] test: Add a test for mountpoint policy violations Users cannot create a mountpoint on /ostree, make sure that an error is returned when this happens. --- test/bib/test_manifest.py | 45 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 1dda99cf..f5c7687b 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -1,5 +1,6 @@ import json import platform +import pathlib import subprocess import textwrap @@ -224,3 +225,47 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): "key": "ssh-rsa AAA ... user@email.com", "groups": ["wheel"], } + + +@pytest.mark.parametrize("image_type", gen_testcases("manifest")) +def test_mount_ostree_error(tmpdir_factory, build_container, image_type): + container_ref = image_type.split(",")[0] + CFG = { + "blueprint": { + "customizations": { + "filesystem": [ + { + "mountpoint": "/", + "minsize": "12GiB" + }, + { + "mountpoint": "/var/log", + "minsize": "1GiB" + }, + { + "mountpoint": "/ostree", + "minsize": "10GiB" + } + ] + }, + }, + } + + output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + output_path.mkdir(exist_ok=True) + config_json_path = output_path / "config.json" + config_json_path.write_text(json.dumps(CFG), encoding="utf-8") + + try: + subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{output_path}:/output", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', + build_container, + "--config", "/output/config.json", + ], stderr=subprocess.PIPE) + assert False, "Did not raise a CalledProcessError when mounting /ostree" + except subprocess.CalledProcessError as err: + assert 'The following custom mountpoints are not supported ["/ostree"]' in err.stderr.decode("utf-8") From 86f2fbd5928b5ffaa7f5f222c368ba1c195c32eb Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 28 May 2024 08:56:01 +0200 Subject: [PATCH 116/279] test: small pytest related tweaks This commit tweaks the test setup slightly to use pytest.raises for exception checking and also run test_mount_ostree_error() only for the centos image (as the error checking/policies are exactly the same for both images). --- test/bib/test_manifest.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index f5c7687b..bbe5715d 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -227,9 +227,10 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): } -@pytest.mark.parametrize("image_type", gen_testcases("manifest")) -def test_mount_ostree_error(tmpdir_factory, build_container, image_type): - container_ref = image_type.split(",")[0] +def test_mount_ostree_error(tmpdir_factory, build_container): + # no need to parameterize this test, toml is the same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + CFG = { "blueprint": { "customizations": { @@ -245,7 +246,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container, image_type): { "mountpoint": "/ostree", "minsize": "10GiB" - } + }, ] }, }, @@ -256,16 +257,15 @@ def test_mount_ostree_error(tmpdir_factory, build_container, image_type): config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(CFG), encoding="utf-8") - try: + with pytest.raises(subprocess.CalledProcessError) as exc: subprocess.check_output([ "podman", "run", "--rm", "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", "-v", f"{output_path}:/output", f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', build_container, "--config", "/output/config.json", - ], stderr=subprocess.PIPE) - assert False, "Did not raise a CalledProcessError when mounting /ostree" - except subprocess.CalledProcessError as err: - assert 'The following custom mountpoints are not supported ["/ostree"]' in err.stderr.decode("utf-8") + ], stderr=subprocess.PIPE, encoding="utf8") + assert 'The following custom mountpoints are not supported ["/ostree"]' in exc.value.stderr From 99b7b012676231c3b008dcebc1186f6836bb3e1a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 28 May 2024 09:06:56 +0200 Subject: [PATCH 117/279] image: only allow tweaks to /, /boot for now The "images" library does not support custom mount points for bootc based images just yet. The reason is that images will generate an osbuild manifest that contains all the "mounts" for the generated disk. This means that with an extra partition like `/var/log` this is visible for the "bootc install-to-filesystem" stage. And that will trip up bootc because it validates the content of the target directory. Example error with `/var/log` as a custom mount point: ``` ... Installing image: docker://quay.io/centos-bootc/centos-bootc:stream9 ERROR Installing to filesystem: Verifying empty rootfs: Non-empty root filesystem; found "var" Traceback (most recent call last): File "/run/osbuild/bin/org.osbuild.bootc.install-to-filesystem", line 53, in r = main(args["options"], args["inputs"], args["paths"]) File "/run/osbuild/bin/org.osbuild.bootc.install-to-filesystem", line 48, in main subprocess.run(pargs, env=env, check=True) File "/usr/lib64/python3.9/subprocess.py", line 528, in run raise CalledProcessError(retcode, process.args, subprocess.CalledProcessError: Command '['bootc', 'install', 'to-filesystem', '--source-imgref', 'containers-storage:[overlay@/run/osbuild/containers/storage+/run/containers/storage]3b612dd1fae2437c00ae3187d0e63daa7a94711560fb1712389edd4121668c96', '--skip-fetch-check', '--generic-image', '--karg', 'rw', '--karg', 'console=tty0', '--karg', 'console=ttyS0', '--karg', 'systemd.journald.forward_to_console=1', '--target-imgref', 'quay.io/centos-bootc/centos-bootc:stream9', '/run/osbuild/mounts']' returned non-zero exit status 1. ``` So AFAICT "images" need sto be changed so that: 1. The "install-to-filesystem" stage only takes the "essential" mounts (/, /boot/, /boot/efi) 2. After "install-to-filesystem" ran we need a "org.osbuild.mkdir" stage for the extra mount points that also only mounts the "essential" mounts As a first step on the journy this commit limits customizations to "/" and "/boot" which is already very useful as many people have asked for precisely those. --- test/bib/test_build.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 7e183c85..da27fbc6 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -222,10 +222,6 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "mountpoint": "/", "minsize": "12GiB" }, - { - "mountpoint": "/var/log", - "minsize": "1GiB" - }, ], "kernel": { "append": kargs, @@ -381,13 +377,13 @@ def test_image_boots(image_type): # XXX: read the fully yaml instead? assert f"image: {image_type.container_ref}" in output - # Figure out how big / is and make sure it is > 10GiB + # Figure out how big / is and make sure it is > 11bGiB # Note that df output is in 1k blocks, not bytes for line in output.splitlines(): fields = line.split() if fields[0] == "/sysroot": size = int(fields[1]) - assert size > 10 * 1024 * 1024 + assert size > 11 * 1024 * 1024 break From 2caacdd895cdcdecb7d8bb7632edecb87dff67ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Fri, 21 Jun 2024 14:59:51 +0200 Subject: [PATCH 118/279] test: switch from Fedora ELN to Fedora 40 Fedora ELN now targets RHEL 11, which is 3+ years away. Let's switch our testing efforts to Fedora 40. In order to achieve this, I had to add --rootfs ext4 to many places because Fedora 40 bootc images don't specify a default filesystem. --- test/bib/test_build.py | 1 + test/bib/test_manifest.py | 18 +++++++++--------- test/bib/testcases.py | 5 ++++- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index da27fbc6..337b51f9 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -285,6 +285,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): *upload_args, *target_arch_args, "--local" if local else "--local=false", + "--rootfs", "ext4", ]) # print the build command for easier tracing diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index bbe5715d..cb16f5d4 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -36,7 +36,7 @@ def test_manifest_smoke(build_container, testcase_ref): "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--rootfs", "ext4", "{container_ref}"]', build_container, ]) manifest = json.loads(output) @@ -71,8 +71,9 @@ def test_manifest_disksize(tmp_path, build_container, testcase_ref): # ensure local storage is here "-v", "/var/lib/containers/storage:/var/lib/containers/storage", # need different entry point - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--local", "localhost/{container_tag}"]', + "--entrypoint", "/usr/bin/bootc-image-builder", build_container, + "manifest", "--local", "--rootfs", "ext4", f"localhost/{container_tag}", ], encoding="utf8") # ensure disk size is bigger than the default 10G disk_size = find_image_size_from(manifest_str) @@ -108,8 +109,9 @@ def test_manifest_local_checks_containers_storage_works(tmp_path, build_containe "--privileged", "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--local", "localhost/{container_tag}"]', + "--entrypoint", "/usr/bin/bootc-image-builder", build_container, + "manifest", "--local", "--rootfs", "ext4", f"localhost/{container_tag}", ], check=True, encoding="utf8") @@ -129,7 +131,7 @@ def test_manifest_cross_arch_check(tmp_path, build_container): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest",\ - "--target-arch=aarch64", "--local", \ + "--target-arch=aarch64", "--rootfs", "ext4", "--local", \ "localhost/{container_tag}"]', build_container, ], check=True, capture_output=True, encoding="utf8") @@ -147,7 +149,7 @@ def find_rootfs_type_from(manifest_str): raise ValueError(f"cannot find rootfs type in manifest:\n{manifest_str}") -@pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) +@pytest.mark.parametrize("testcase_ref", gen_testcases("default-rootfs")) def test_manifest_rootfs_respected(build_container, testcase_ref): # testcases_ref has the form "container_url,img_type1+img_type2,arch" container_ref = testcase_ref.split(",")[0] @@ -164,8 +166,6 @@ def test_manifest_rootfs_respected(build_container, testcase_ref): match container_ref: case "quay.io/centos-bootc/centos-bootc:stream9": assert rootfs_type == "xfs" - case "quay.io/centos-bootc/fedora-bootc:eln": - assert rootfs_type == "xfs" case _: pytest.fail(f"unknown container_ref {container_ref} please update test") @@ -214,7 +214,7 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "-v", f"{config_toml_path}:/config.toml", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--rootfs", "ext4", "{container_ref}"]', build_container, ]) user_stage = find_user_stage_from(output) @@ -264,7 +264,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", "-v", f"{output_path}:/output", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--rootfs", "ext4", "{container_ref}"]', build_container, "--config", "/output/config.json", ], stderr=subprocess.PIPE, encoding="utf8") diff --git a/test/bib/testcases.py b/test/bib/testcases.py index e5b1f6aa..0ba01f59 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -20,7 +20,7 @@ def gen_testcases(what): # bootc containers that are tested by default CONTAINERS_TO_TEST = { - "fedora": "quay.io/centos-bootc/fedora-bootc:eln", + "fedora": "quay.io/fedora/fedora-bootc:40", "centos": "quay.io/centos-bootc/centos-bootc:stream9", } # allow commandline override, this is used when testing @@ -34,6 +34,9 @@ def gen_testcases(what): if what == "manifest": return CONTAINERS_TO_TEST.values() + elif what == "default-rootfs": + # Fedora doesn't have a default rootfs + return [CONTAINERS_TO_TEST["centos"]] elif what == "ami-boot": return [cnt + ",ami" for cnt in CONTAINERS_TO_TEST.values()] elif what == "anaconda-iso": From f31819ac0e7af67efd6cd3618b10ccc31a9ded0f Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 24 Jun 2024 10:29:02 +0200 Subject: [PATCH 119/279] main: set `SilenceErrors` to avoid double error printing We handle the errors ourselfs in bootc-image-builder but there was a setting missing which lead to double prints: ``` $ go build && sudo ./bootc-image-builder manifest quay.io/fedora/fedora-bootc:40 Error: cannot generate manifest: cannot get rootfs type for container: container does not include a default root filesystem type 2024/06/24 10:26:42 error: cannot generate manifest: cannot get rootfs type for container: container does not include a default root filesystem type ``` This commit fixes this and adds a test: ``` $ go build && sudo ./bootc-image-builder manifest quay.io/fedora/fedora-bootc:40 2024/06/24 10:43:38 error: cannot generate manifest: cannot get rootfs type for container: container does not include a default root filesystem type ``` --- test/bib/test_opts.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 9d8db5cc..8f0cc697 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -129,3 +129,20 @@ def test_bib_help_hides_config(tmp_path, container_storage, build_fake_container assert '--config' not in res.stdout # but other options should be assert '--log-level' in res.stdout + + +def test_bib_errors_only_once(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "localhost/no-such-image", + ], check=False, capture_output=True, text=True) + needle = "cannot build manifest: failed to pull container image:" + assert res.stderr.count(needle) == 1 From 590f17bab4393edd4069e95e843c6fb3cd6fd403 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 1 Jul 2024 16:48:14 +0200 Subject: [PATCH 120/279] bib: add (hidden) `bootc-image-builder version` command This commit adds a (hidden) `version` command. It currently only contains the git revision of build - that is useful for easier debugging, i.e. we can now easily ask what revision the user was using when a bug is reported. Once we actually do real bib releases we could also include the real version number. --- test/bib/test_opts.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 8f0cc697..50204ee6 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -146,3 +146,21 @@ def test_bib_errors_only_once(tmp_path, container_storage, build_fake_container) ], check=False, capture_output=True, text=True) needle = "cannot build manifest: failed to pull container image:" assert res.stderr.count(needle) == 1 + + +def test_bib_version(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + "--entrypoint=/usr/bin/bootc-image-builder", + build_fake_container, + "version", + ], check=True, capture_output=True, text=True) + needle = "revision: " + assert needle in res.stdout From d54a4c5a72cd5bbb567ee6a9bf0fc7ffb95196ff Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Tue, 25 Jun 2024 12:04:13 +0200 Subject: [PATCH 121/279] test: add manifest smoke test for ISO The existing manifest smoke test only generates the disk image manifest. Adding a separate manifest smoke test for ISOs. We use a separate test that doesn't check for disk size but instead verifies the expected pipeline names. --- test/bib/test_manifest.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index cb16f5d4..6dac0274 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -1,6 +1,6 @@ import json -import platform import pathlib +import platform import subprocess import textwrap @@ -13,7 +13,8 @@ if not testutil.can_start_rootful_containers(): pytest.skip("tests require to be able to run rootful containers (try: sudo)", allow_module_level=True) -from containerbuild import build_container_fixture, make_container # noqa: F401 +from containerbuild import build_container_fixture # noqa: F401 +from containerbuild import make_container from testcases import gen_testcases @@ -49,6 +50,26 @@ def test_manifest_smoke(build_container, testcase_ref): assert int(disk_size) == 10 * 1024 * 1024 * 1024 +@pytest.mark.parametrize("testcase_ref", gen_testcases("anaconda-iso")) +def test_iso_manifest_smoke(build_container, testcase_ref): + # testcases_ref has the form "container_url,img_type1+img_type2,arch" + container_ref = testcase_ref.split(",")[0] + + output = subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + ('--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--rootfs", "ext4", ' + f'"--type=anaconda-iso", "{container_ref}"]'), + build_container, + ]) + manifest = json.loads(output) + # just some basic validation + expected_pipeline_names = ["build", "anaconda-tree", "rootfs-image", "efiboot-tree", "bootiso-tree", "bootiso"] + assert manifest["version"] == "2" + assert [pipeline["name"] for pipeline in manifest["pipelines"]] == expected_pipeline_names + + @pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) def test_manifest_disksize(tmp_path, build_container, testcase_ref): # create derrived container with 6G silly file to ensure that From 95cb3372cd9bd3a30db174dca11b78a426c150cd Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Tue, 25 Jun 2024 13:48:03 +0200 Subject: [PATCH 122/279] test: add user kickstart content customization test Add a custom kickstart file to an ISO config and check the manifest for the expected input and stage. Co-authored-by: Michael Vogt --- test/bib/test_manifest.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 6dac0274..870790b3 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -1,3 +1,5 @@ +import base64 +import hashlib import json import pathlib import platform @@ -248,6 +250,41 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): } +def test_manifest_installer_customizations(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config_toml_path = tmp_path / "config.toml" + config_toml_path.write_text(textwrap.dedent("""\ + [customizations.installer.kickstart] + contents = \"\"\" + autopart --type=lvm + \"\"\" + """)) + output = subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "-v", f"{config_toml_path}:/config.toml", + "--security-opt", "label=type:unconfined_t", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--type=anaconda-iso", "{container_ref}"]', + build_container, + ]) + manifest = json.loads(output) + + # expected values for the following inline file contents + ks_content = textwrap.dedent("""\ + %include /run/install/repo/osbuild-base.ks + autopart --type=lvm + """).encode("utf8") + expected_data = base64.b64encode(ks_content).decode() + expected_content_hash = hashlib.sha256(ks_content).hexdigest() + expected_content_id = f"sha256:{expected_content_hash}" # hash with algo prefix + + # check the inline source for the custom kickstart contents + assert expected_content_id in manifest["sources"]["org.osbuild.inline"]["items"] + assert manifest["sources"]["org.osbuild.inline"]["items"][expected_content_id]["data"] == expected_data + + def test_mount_ostree_error(tmpdir_factory, build_container): # no need to parameterize this test, toml is the same for all containers container_ref = "quay.io/centos-bootc/centos-bootc:stream9" From 97284f61043099d86be7735beae16ce69cc0e200 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Thu, 4 Jul 2024 12:29:51 +0200 Subject: [PATCH 123/279] github: update Go version used in workflows --- .github.com/workflows/bibtests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 71f31b42..5d48e73d 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -20,10 +20,10 @@ jobs: name: "⌨ Lint & unittests" runs-on: ubuntu-latest steps: - - name: Set up Go 1.20 + - name: Set up Go 1.21 uses: actions/setup-go@v5 with: - go-version: "1.20" + go-version: "1.21" id: go - name: Check out code into the Go module directory From c775a4f40e9efcb88b00aa4a4e59c0a192ab7486 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 1 Jul 2024 16:55:57 +0200 Subject: [PATCH 124/279] test: unify how `--entrypoint=` is used The usage of `--entrypoint` was a bit all over the place. This commit unifies it so that if the entrypoint is not "build" we always pass `--entrypoint=/usr/bin/bootc-image-builder` and the the arguments to bib are passed after the container. This makes test (hopefully) more readable. --- test/bib/test_manifest.py | 35 +++++++++++++++++++++-------------- test/bib/test_opts.py | 1 + 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 870790b3..51b35a4d 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -39,8 +39,9 @@ def test_manifest_smoke(build_container, testcase_ref): "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--rootfs", "ext4", "{container_ref}"]', + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", "--rootfs", "ext4", f"{container_ref}", ]) manifest = json.loads(output) # just some basic validation @@ -61,9 +62,10 @@ def test_iso_manifest_smoke(build_container, testcase_ref): "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", - ('--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--rootfs", "ext4", ' - f'"--type=anaconda-iso", "{container_ref}"]'), + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", "--rootfs", "ext4", + "--type=anaconda-iso", f"{container_ref}", ]) manifest = json.loads(output) # just some basic validation @@ -111,8 +113,9 @@ def test_manifest_local_checks_containers_storage_errors(build_container): "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", - '--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--local", "arg-not-used"]', + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", "--local", "arg-not-used", ], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8") assert res.returncode == 1 err = 'local storage not working, did you forget -v /var/lib/containers/storage:/var/lib/containers/storage?' @@ -132,7 +135,7 @@ def test_manifest_local_checks_containers_storage_works(tmp_path, build_containe "--privileged", "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", - "--entrypoint", "/usr/bin/bootc-image-builder", + "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--local", "--rootfs", "ext4", f"localhost/{container_tag}", ], check=True, encoding="utf8") @@ -153,10 +156,10 @@ def test_manifest_cross_arch_check(tmp_path, build_container): "--privileged", "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest",\ - "--target-arch=aarch64", "--rootfs", "ext4", "--local", \ - "localhost/{container_tag}"]', + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", "--target-arch=aarch64", + "--rootfs", "ext4", "--local", f"localhost/{container_tag}" ], check=True, capture_output=True, encoding="utf8") assert 'image found is for unexpected architecture "x86_64"' in exc.value.stderr @@ -182,8 +185,9 @@ def test_manifest_rootfs_respected(build_container, testcase_ref): "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", f"{container_ref}", ]) rootfs_type = find_rootfs_type_from(output) match container_ref: @@ -201,9 +205,9 @@ def test_manifest_rootfs_override(build_container): "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest",\ - "--rootfs", "ext4", "{container_ref}"]', + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", "--rootfs", "ext4", f"{container_ref}", ]) rootfs_type = find_rootfs_type_from(output) assert rootfs_type == "ext4" @@ -237,8 +241,9 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "-v", f"{config_toml_path}:/config.toml", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--rootfs", "ext4", "{container_ref}"]', + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", "--rootfs", "ext4", f"{container_ref}", ]) user_stage = find_user_stage_from(output) assert user_stage["options"]["users"].get("alice") == { @@ -266,8 +271,9 @@ def test_manifest_installer_customizations(tmp_path, build_container): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "-v", f"{config_toml_path}:/config.toml", "--security-opt", "label=type:unconfined_t", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--type=anaconda-iso", "{container_ref}"]', + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", "--type=anaconda-iso", f"{container_ref}", ]) manifest = json.loads(output) @@ -322,8 +328,9 @@ def test_mount_ostree_error(tmpdir_factory, build_container): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", "-v", f"{output_path}:/output", - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "--rootfs", "ext4", "{container_ref}"]', + "--entrypoint=/usr/bin/bootc-image-builder", build_container, + "manifest", "--rootfs", "ext4", f"{container_ref}", "--config", "/output/config.json", ], stderr=subprocess.PIPE, encoding="utf8") assert 'The following custom mountpoints are not supported ["/ostree"]' in exc.value.stderr diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 50204ee6..32f57e44 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -122,6 +122,7 @@ def test_bib_help_hides_config(tmp_path, container_storage, build_fake_container "--security-opt", "label=type:unconfined_t", "-v", f"{container_storage}:/var/lib/containers/storage", "-v", f"{output_path}:/output", + "--entrypoint=/usr/bin/bootc-image-builder", build_fake_container, "manifest", "--help", ], check=True, capture_output=True, text=True) From 2873c445059a79b8cf1f09ed02a873e6724e1c1a Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Wed, 19 Jun 2024 12:30:37 +0200 Subject: [PATCH 125/279] test/test_manifest: fix import order --- test/bib/test_manifest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 51b35a4d..6e3edcc6 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -9,15 +9,15 @@ import pytest import testutil +from containerbuild import build_container_fixture # noqa: F401 +from containerbuild import make_container +from testcases import gen_testcases if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) if not testutil.can_start_rootful_containers(): pytest.skip("tests require to be able to run rootful containers (try: sudo)", allow_module_level=True) -from containerbuild import build_container_fixture # noqa: F401 -from containerbuild import make_container -from testcases import gen_testcases def find_image_size_from(manifest_str): From 1556c44a07c7af17d2fb604f0d30267f3caac9e5 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 18 Jun 2024 10:26:22 +0200 Subject: [PATCH 126/279] test: add new integration test for ValidateHasContainerTags() Add new `test_manifest_checks_build_container_is_bootc` test that validates that `botoc-iamge-build manifest` fails with the expected error on a non-bootc image url. Note that this will implicitly check `build` too as the same check is performed in bib for manifest and build (i.e. bib needs to generate a manifest first before it can build). Co-authored-by: bstrausser --- test/bib/test_manifest.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 6e3edcc6..de3caf19 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -19,7 +19,6 @@ pytest.skip("tests require to be able to run rootful containers (try: sudo)", allow_module_level=True) - def find_image_size_from(manifest_str): manifest = json.loads(manifest_str) for pipl in manifest["pipelines"]: @@ -334,3 +333,28 @@ def test_mount_ostree_error(tmpdir_factory, build_container): "--config", "/output/config.json", ], stderr=subprocess.PIPE, encoding="utf8") assert 'The following custom mountpoints are not supported ["/ostree"]' in exc.value.stderr + + +@pytest.mark.parametrize( + "container_ref,should_error,expected_error", + [ + ("quay.io/centos/centos:stream9", True, "image quay.io/centos/centos:stream9 is not a bootc image"), + ("quay.io/centos-bootc/centos-bootc:stream9", False, None), + ], +) +def test_manifest_checks_build_container_is_bootc(build_container, container_ref, should_error, expected_error): + def check_image_ref(): + subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "--security-opt", "label=type:unconfined_t", + f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', + build_container, + ], stderr=subprocess.PIPE, encoding="utf8") + if should_error: + with pytest.raises(subprocess.CalledProcessError) as exc: + check_image_ref() + assert expected_error in exc.value.stderr + else: + check_image_ref() From d06af09bf0031a2d8eef05e650c27f888df6e2d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Fri, 10 May 2024 11:50:50 +0200 Subject: [PATCH 127/279] bib: add btrfs support Now that images has support for creating btrfs, let's make it avaialble in bootc-image-builder too. Use with `--rootfs btrfs` on e.g. the fedora images. --- test/bib/test_manifest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index de3caf19..c25b0b20 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -206,10 +206,10 @@ def test_manifest_rootfs_override(build_container): "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, - "manifest", "--rootfs", "ext4", f"{container_ref}", + "manifest", "--rootfs", "btrfs", f"{container_ref}", ]) rootfs_type = find_rootfs_type_from(output) - assert rootfs_type == "ext4" + assert rootfs_type == "btrfs" def find_user_stage_from(manifest_str): From bb8332b6a81faa93224e52e011065c4235f7fdb7 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 4 Jul 2024 22:07:01 +0200 Subject: [PATCH 128/279] test: test btrfs as part of the fedora boot test --- test/bib/test_build.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 337b51f9..9f84a258 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -277,6 +277,13 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): if local: cmd.extend(["-v", "/var/lib/containers/storage:/var/lib/containers/storage"]) + # fedora has no default roofs, pick "brfs" for testing here + # TODO: make part of testcase instead of hacking it in here + if "fedora" in container_ref: + rootfs_args = ["--rootfs", "btrfs"] + else: + rootfs_args = [] + cmd.extend([ *creds_args, build_container, @@ -285,7 +292,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): *upload_args, *target_arch_args, "--local" if local else "--local=false", - "--rootfs", "ext4", + *rootfs_args, ]) # print the build command for easier tracing From 979f774570e5e2d393688a8f0fe10e47dda7ad01 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 4 Jul 2024 22:42:45 +0200 Subject: [PATCH 129/279] test: rework how test cases are represented/generated The old way of generating test cases was via strings that would use a `,` to differenciate between various parameters like container_ref or image_type. With the adding of the rootfs this becomes unwieldy (arguable it was unwieldy already and now it's unentable). This commit introduces a proper "TestCase" class that makes generating and reading the test cases a lot eaiser (IMHO). Note that this was prompted because of PR#439 where we want to make the rootfs tests on fedora "btrfs". --- test/bib/conftest.py | 10 ++++++ test/bib/test_build.py | 51 +++++++++++-------------------- test/bib/test_manifest.py | 64 +++++++++++++++++++-------------------- test/bib/testcases.py | 61 ++++++++++++++++++++++++++++--------- 4 files changed, 105 insertions(+), 81 deletions(-) diff --git a/test/bib/conftest.py b/test/bib/conftest.py index faa51f96..ef0e7bf9 100644 --- a/test/bib/conftest.py +++ b/test/bib/conftest.py @@ -1,5 +1,7 @@ import pytest +from testcases import TestCase + def pytest_addoption(parser): parser.addoption("--force-aws-upload", action="store_true", default=False, @@ -10,3 +12,11 @@ def pytest_addoption(parser): @pytest.fixture(name="force_aws_upload", scope="session") def force_aws_upload_fixture(request): return request.config.getoption("--force-aws-upload") + + +# see https://hackebrot.github.io/pytest-tricks/param_id_func/ and +# https://docs.pytest.org/en/7.1.x/reference/reference.html#pytest.hookspec.pytest_make_parametrize_id +def pytest_make_parametrize_id(config, val): + if isinstance(val, TestCase): + return f"{val}" + return None diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 9f84a258..0fddfb4c 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -46,24 +46,6 @@ class ImageBuildResult(NamedTuple): metadata: dict = {} -def parse_request_params(request): - # image_type is passed via special pytest parameter fixture - testcase_ref = request.param - if testcase_ref.count(",") == 3: - container_ref, images, target_arch, local = testcase_ref.split(",") - local = local is not None - elif testcase_ref.count(",") == 2: - container_ref, images, target_arch = testcase_ref.split(",") - local = False - elif testcase_ref.count(",") == 1: - container_ref, images = testcase_ref.split(",") - target_arch = None - local = False - else: - raise ValueError(f"cannot parse {testcase_ref.count}") - return container_ref, images, target_arch, local - - @pytest.fixture(scope='session') def shared_tmpdir(tmpdir_factory): tmp_path = pathlib.Path(tmpdir_factory.mktemp("shared")) @@ -78,13 +60,13 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload In the case an image is being built from a local container, the function will build the required local container for the test. """ - container_ref, images, target_arch, local = parse_request_params(request) + container_ref = request.param.container_ref - if local: + if request.param.local: cont_tag = "localhost/cont-base-" + "".join(random.choices(string.digits, k=12)) # we are not cross-building local images (for now) - request.param = ",".join([cont_tag, images, "", "true"]) + request.param.target_arch = "" # copy the container into containers-storage subprocess.check_call([ @@ -117,11 +99,12 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): :request.param: has the form "container_url,img_type1+img_type2,arch,local" """ - container_ref, images, target_arch, local = parse_request_params(request) + # the testcases.TestCase comes from the request.parameter + tc = request.param # images might be multiple --type args # split and check each one - image_types = images.split("+") + image_types = request.param.image.split("+") username = "test" password = "password" @@ -131,7 +114,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): # AF_UNIX) is derived from the path # hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to # different image type combinations - output_path = shared_tmpdir / format(abs(hash(container_ref + str(target_arch))), "x") + output_path = shared_tmpdir / format(abs(hash(tc.container_ref + str(tc.target_arch))), "x") output_path.mkdir(exist_ok=True) # make sure that the test store exists, because podman refuses to start if the source directory for a volume @@ -151,7 +134,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "vmdk": pathlib.Path(output_path) / "vmdk/disk.vmdk", "anaconda-iso": pathlib.Path(output_path) / "bootiso/install.iso", } - assert len(artifact) == len(set(t.split(",")[1] for t in gen_testcases("all"))), \ + assert len(artifact) == len(set(tc.image for tc in gen_testcases("all"))), \ "please keep artifact mapping and supported images in sync" # this helper checks the cache @@ -175,7 +158,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): journal_output = journal_log_path.read_text(encoding="utf8") bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( - image_type, generated_img, target_arch, container_ref, + image_type, generated_img, tc.target_arch, tc.container_ref, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) @@ -237,8 +220,8 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): upload_args = [] creds_args = [] target_arch_args = [] - if target_arch: - target_arch_args = ["--target-arch", target_arch] + if tc.target_arch: + target_arch_args = ["--target-arch", tc.target_arch] with tempfile.TemporaryDirectory() as tempdir: if "ami" in image_types: @@ -274,7 +257,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): ] # we need to mount the host's container store - if local: + if tc.local: cmd.extend(["-v", "/var/lib/containers/storage:/var/lib/containers/storage"]) # fedora has no default roofs, pick "brfs" for testing here @@ -287,12 +270,12 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): cmd.extend([ *creds_args, build_container, - container_ref, + tc.container_ref, *types_arg, *upload_args, *target_arch_args, - "--local" if local else "--local=false", - *rootfs_args, + *tc.rootfs_args(), + "--local" if tc.local else "--local=false", ]) # print the build command for easier tracing @@ -325,7 +308,7 @@ def del_ami(): results = [] for image_type in image_types: results.append(ImageBuildResult( - image_type, artifact[image_type], target_arch, container_ref, + image_type, artifact[image_type], tc.target_arch, tc.container_ref, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results @@ -343,7 +326,7 @@ def del_ami(): img.unlink() else: print("does not exist") - subprocess.run(["podman", "rmi", container_ref], check=False) + subprocess.run(["podman", "rmi", tc.container_ref], check=False) return diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index c25b0b20..ac12bf3c 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -29,18 +29,17 @@ def find_image_size_from(manifest_str): raise ValueError(f"cannot find disk size in manifest:\n{manifest_str}") -@pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) -def test_manifest_smoke(build_container, testcase_ref): - # testcases_ref has the form "container_url,img_type1+img_type2,arch" - container_ref = testcase_ref.split(",")[0] - +@pytest.mark.parametrize("tc", gen_testcases("manifest")) +def test_manifest_smoke(build_container, tc): output = subprocess.check_output([ "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, - "manifest", "--rootfs", "ext4", f"{container_ref}", + "manifest", + *tc.rootfs_args(), + f"{tc.container_ref}", ]) manifest = json.loads(output) # just some basic validation @@ -52,19 +51,17 @@ def test_manifest_smoke(build_container, testcase_ref): assert int(disk_size) == 10 * 1024 * 1024 * 1024 -@pytest.mark.parametrize("testcase_ref", gen_testcases("anaconda-iso")) -def test_iso_manifest_smoke(build_container, testcase_ref): - # testcases_ref has the form "container_url,img_type1+img_type2,arch" - container_ref = testcase_ref.split(",")[0] - +@pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) +def test_iso_manifest_smoke(build_container, tc): output = subprocess.check_output([ "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, - "manifest", "--rootfs", "ext4", - "--type=anaconda-iso", f"{container_ref}", + "manifest", + *tc.rootfs_args(), + "--type=anaconda-iso", f"{tc.container_ref}", ]) manifest = json.loads(output) # just some basic validation @@ -73,19 +70,19 @@ def test_iso_manifest_smoke(build_container, testcase_ref): assert [pipeline["name"] for pipeline in manifest["pipelines"]] == expected_pipeline_names -@pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) -def test_manifest_disksize(tmp_path, build_container, testcase_ref): +@pytest.mark.parametrize("tc", gen_testcases("manifest")) +def test_manifest_disksize(tmp_path, build_container, tc): # create derrived container with 6G silly file to ensure that # bib doubles the size to 12G+ cntf_path = tmp_path / "Containerfile" cntf_path.write_text(textwrap.dedent(f"""\n - FROM {testcase_ref} + FROM {tc.container_ref} RUN truncate -s 2G /big-file1 RUN truncate -s 2G /big-file2 RUN truncate -s 2G /big-file3 """), encoding="utf8") - print(f"building big size container from {testcase_ref}") + print(f"building big size container from {tc.container_ref}") with make_container(tmp_path) as container_tag: print(f"using {container_tag}") manifest_str = subprocess.check_output([ @@ -97,7 +94,9 @@ def test_manifest_disksize(tmp_path, build_container, testcase_ref): # need different entry point "--entrypoint", "/usr/bin/bootc-image-builder", build_container, - "manifest", "--local", "--rootfs", "ext4", f"localhost/{container_tag}", + "manifest", "--local", + *tc.rootfs_args(), + f"localhost/{container_tag}", ], encoding="utf8") # ensure disk size is bigger than the default 10G disk_size = find_image_size_from(manifest_str) @@ -121,11 +120,11 @@ def test_manifest_local_checks_containers_storage_errors(build_container): assert err in res.stderr -@pytest.mark.parametrize("testcase_ref", gen_testcases("manifest")) -def test_manifest_local_checks_containers_storage_works(tmp_path, build_container, testcase_ref): +@pytest.mark.parametrize("tc", gen_testcases("manifest")) +def test_manifest_local_checks_containers_storage_works(tmp_path, build_container, tc): cntf_path = tmp_path / "Containerfile" cntf_path.write_text(textwrap.dedent(f"""\n - FROM {testcase_ref} + FROM {tc.container_ref} """), encoding="utf8") with make_container(tmp_path) as container_tag: @@ -136,7 +135,9 @@ def test_manifest_local_checks_containers_storage_works(tmp_path, build_containe "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, - "manifest", "--local", "--rootfs", "ext4", f"localhost/{container_tag}", + "manifest", "--local", + *tc.rootfs_args(), + f"localhost/{container_tag}", ], check=True, encoding="utf8") @@ -158,7 +159,7 @@ def test_manifest_cross_arch_check(tmp_path, build_container): "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--target-arch=aarch64", - "--rootfs", "ext4", "--local", f"localhost/{container_tag}" + "--local", f"localhost/{container_tag}" ], check=True, capture_output=True, encoding="utf8") assert 'image found is for unexpected architecture "x86_64"' in exc.value.stderr @@ -174,11 +175,8 @@ def find_rootfs_type_from(manifest_str): raise ValueError(f"cannot find rootfs type in manifest:\n{manifest_str}") -@pytest.mark.parametrize("testcase_ref", gen_testcases("default-rootfs")) -def test_manifest_rootfs_respected(build_container, testcase_ref): - # testcases_ref has the form "container_url,img_type1+img_type2,arch" - container_ref = testcase_ref.split(",")[0] - +@pytest.mark.parametrize("tc", gen_testcases("default-rootfs")) +def test_manifest_rootfs_respected(build_container, tc): # TODO: derive container and fake "bootc install print-configuration"? output = subprocess.check_output([ "podman", "run", "--rm", @@ -186,14 +184,14 @@ def test_manifest_rootfs_respected(build_container, testcase_ref): "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, - "manifest", f"{container_ref}", + "manifest", f"{tc.container_ref}", ]) rootfs_type = find_rootfs_type_from(output) - match container_ref: + match tc.container_ref: case "quay.io/centos-bootc/centos-bootc:stream9": assert rootfs_type == "xfs" case _: - pytest.fail(f"unknown container_ref {container_ref} please update test") + pytest.fail(f"unknown container_ref {tc.container_ref} please update test") def test_manifest_rootfs_override(build_container): @@ -242,7 +240,7 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, - "manifest", "--rootfs", "ext4", f"{container_ref}", + "manifest", f"{container_ref}", ]) user_stage = find_user_stage_from(output) assert user_stage["options"]["users"].get("alice") == { @@ -329,7 +327,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): "-v", f"{output_path}:/output", "--entrypoint=/usr/bin/bootc-image-builder", build_container, - "manifest", "--rootfs", "ext4", f"{container_ref}", + "manifest", f"{container_ref}", "--config", "/output/config.json", ], stderr=subprocess.PIPE, encoding="utf8") assert 'The following custom mountpoints are not supported ["/ostree"]' in exc.value.stderr diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 0ba01f59..5114c1ed 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -1,3 +1,5 @@ +import dataclasses +import inspect import os import platform @@ -17,6 +19,34 @@ INSTALLER_IMAGE_TYPES = ("anaconda-iso",) +@dataclasses.dataclass(frozen=True) +class TestCase: + # container_ref to the bootc image, e.g. quay.io/fedora/fedora-bootc:40 + container_ref: str + # image is the image type, e.g. "ami" + image: str = "" + # target_arch is the target archicture, empty means current arch + target_arch: str = "" + # local means that the container should be pulled locally ("--local" flag) + local: bool = False + + def rootfs_args(self): + # fedora has no default rootfs so it must be specified + if "fedora-bootc" in self.container_ref: + # TODO: switch to "btrfs" once + # https://github.com/osbuild/bootc-image-builder/pull/439 + # is merged + return ["--rootfs", "ext4"] + return [] + + def __str__(self): + return ",".join([ + attr + for name, attr in inspect.getmembers(self) + if not name.startswith("_") and not callable(attr) and attr + ]) + + def gen_testcases(what): # bootc containers that are tested by default CONTAINERS_TO_TEST = { @@ -33,46 +63,49 @@ def gen_testcases(what): } if what == "manifest": - return CONTAINERS_TO_TEST.values() + return [TestCase(container_ref=ref) + for ref in CONTAINERS_TO_TEST.values()] elif what == "default-rootfs": # Fedora doesn't have a default rootfs - return [CONTAINERS_TO_TEST["centos"]] + return [TestCase(container_ref=CONTAINERS_TO_TEST["centos"])] elif what == "ami-boot": - return [cnt + ",ami" for cnt in CONTAINERS_TO_TEST.values()] + test_cases = [] + for ref in CONTAINERS_TO_TEST.values(): + test_cases.append(TestCase(container_ref=ref, image="ami")) + return test_cases elif what == "anaconda-iso": test_cases = [] - for cnt in CONTAINERS_TO_TEST.values(): + for ref in CONTAINERS_TO_TEST.values(): for img_type in INSTALLER_IMAGE_TYPES: - test_cases.append(f"{cnt},{img_type}") + test_cases.append(TestCase(container_ref=ref, image=img_type)) return test_cases elif what == "qemu-boot": test_cases = [] - for cnt in CONTAINERS_TO_TEST.values(): + for distro, ref in CONTAINERS_TO_TEST.items(): for img_type in QEMU_BOOT_IMAGE_TYPES: - test_cases.append(f"{cnt},{img_type}") + test_cases.append( + TestCase(container_ref=ref, image=img_type)) # do a cross arch test too if platform.machine() == "x86_64": # todo: add fedora:eln - test_cases.append( - f'{CONTAINERS_TO_TEST["centos"]},raw,arm64') + test_cases.append(TestCase(container_ref=ref, image="raw", target_arch="arm64")) elif platform.machine() == "arm64": # TODO: add arm64->x86_64 cross build test too pass return test_cases elif what == "all": test_cases = [] - for cnt in CONTAINERS_TO_TEST.values(): + for ref in CONTAINERS_TO_TEST.values(): for img_type in QEMU_BOOT_IMAGE_TYPES + \ CLOUD_BOOT_IMAGE_TYPES + \ NON_QEMU_BOOT_IMAGE_TYPES + \ INSTALLER_IMAGE_TYPES: - test_cases.append(f"{cnt},{img_type}") + test_cases.append(TestCase(container_ref=ref, image=img_type)) return test_cases elif what == "multidisk": # single test that specifies all image types test_cases = [] - for cnt in CONTAINERS_TO_TEST.values(): - img_type = "+".join(DISK_IMAGE_TYPES) - test_cases.append(f"{cnt},{img_type}") + for ref in CONTAINERS_TO_TEST.values(): + test_cases.append(TestCase(container_ref=ref, image="+".join(DISK_IMAGE_TYPES))) return test_cases raise ValueError(f"unknown test-case type {what}") From 92bb59aa5d4627b694e9d03ebab4596f5c540a73 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 8 Jul 2024 14:40:49 +0200 Subject: [PATCH 130/279] test: switch to btrfs for fedora tests --- test/bib/test_build.py | 7 ------- test/bib/testcases.py | 5 +---- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 0fddfb4c..f7e9e2b6 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -260,13 +260,6 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): if tc.local: cmd.extend(["-v", "/var/lib/containers/storage:/var/lib/containers/storage"]) - # fedora has no default roofs, pick "brfs" for testing here - # TODO: make part of testcase instead of hacking it in here - if "fedora" in container_ref: - rootfs_args = ["--rootfs", "btrfs"] - else: - rootfs_args = [] - cmd.extend([ *creds_args, build_container, diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 5114c1ed..2455376c 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -33,10 +33,7 @@ class TestCase: def rootfs_args(self): # fedora has no default rootfs so it must be specified if "fedora-bootc" in self.container_ref: - # TODO: switch to "btrfs" once - # https://github.com/osbuild/bootc-image-builder/pull/439 - # is merged - return ["--rootfs", "ext4"] + return ["--rootfs", "btrfs"] return [] def __str__(self): From 21d2b1caa72919d28a2c66f370b672d12bcedf36 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 4 Jul 2024 16:39:39 +0200 Subject: [PATCH 131/279] main: allow running outside of a container It can be useful to run `bootc-image-builder` outside a container, e.g. during local development. Let's allow it but warn the user that it's not a supported configuration. --- test/bib/test_opts.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 32f57e44..57a8f7da 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -165,3 +165,19 @@ def test_bib_version(tmp_path, container_storage, build_fake_container): ], check=True, capture_output=True, text=True) needle = "revision: " assert needle in res.stdout + + +def test_bib_no_outside_container_warning_in_container(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + res = subprocess.run([ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "quay.io/centos-bootc/centos-bootc:stream9" + ], check=True, capture_output=True, text=True) + assert "running outside a container" not in res.stderr From 2ef3c02a5781820398c5971bb5587e2d09d370a1 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 10 Jul 2024 13:34:17 +0200 Subject: [PATCH 132/279] README: update to show that `config.{toml,yaml}` can be read-only As suggested by Charlie Drage (thank you!) this commit makes the mounting of the `config.{toml,yaml}` read-only via the `:ro` suffix. The tests are also updated to validate that this works as expected. Closes: https://github.com/osbuild/bootc-image-builder/issues/533 --- test/bib/test_manifest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index ac12bf3c..e3c11a0d 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -236,7 +236,7 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): "podman", "run", "--rm", "--privileged", "-v", "/var/lib/containers/storage:/var/lib/containers/storage", - "-v", f"{config_toml_path}:/config.toml", + "-v", f"{config_toml_path}:/config.toml:ro", "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, @@ -266,7 +266,7 @@ def test_manifest_installer_customizations(tmp_path, build_container): "podman", "run", "--rm", "--privileged", "-v", "/var/lib/containers/storage:/var/lib/containers/storage", - "-v", f"{config_toml_path}:/config.toml", + "-v", f"{config_toml_path}:/config.toml:ro", "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, From 7a7865113b8db44a943f84732625932c7c678ddd Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 29 Jul 2024 13:21:09 +0200 Subject: [PATCH 133/279] test: add new `test_manifest_target_arch_smoke` test The new `test_manifest_target_arch_smoke` test ensures that for all supported `--target-arch` architecture bib can create a manifest. No further functional testing (like building or booting) is done. --- test/bib/test_manifest.py | 22 ++++++++++++++++++++++ test/bib/testcases.py | 9 +++++++++ 2 files changed, 31 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index e3c11a0d..9a366274 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -356,3 +356,25 @@ def check_image_ref(): assert expected_error in exc.value.stderr else: check_image_ref() + + +@pytest.mark.parametrize("tc", gen_testcases("target-arch-smoke")) +def test_manifest_target_arch_smoke(build_container, tc): + # TODO: actually build an image too + output = subprocess.check_output([ + "podman", "run", "--rm", + "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "--security-opt", "label=type:unconfined_t", + "--entrypoint=/usr/bin/bootc-image-builder", + build_container, + "manifest", + *tc.rootfs_args(), + f"--target-arch={tc.target_arch}", + tc.container_ref, + ]) + manifest = json.loads(output) + # just minimal validation, we could in theory look at the partition + # table be beside this there is relatively little that is different + assert manifest["version"] == "2" + assert manifest["pipelines"][0]["name"] == "build" diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 2455376c..0beca5c7 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -105,4 +105,13 @@ def gen_testcases(what): for ref in CONTAINERS_TO_TEST.values(): test_cases.append(TestCase(container_ref=ref, image="+".join(DISK_IMAGE_TYPES))) return test_cases + # Smoke test that all supported --target-arch architecture can + # create a manifest + elif what == "target-arch-smoke": + return [ + TestCase(container_ref=CONTAINERS_TO_TEST["centos"], target_arch="arm64"), + # TODO: merge with CONTAINERS_TO_TEST once that moves to :41 too + TestCase(container_ref="quay.io/fedora/fedora-bootc:41", target_arch="ppc64le"), + TestCase(container_ref="quay.io/fedora/fedora-bootc:41", target_arch="s390x"), + ] raise ValueError(f"unknown test-case type {what}") From 596829c7d6ba2d73f956bf99b00a1f76b728ce52 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 5 Jul 2024 11:14:38 +0200 Subject: [PATCH 134/279] test: introduce new `TestCase{Centos,Fedora}` classes Make the testcases generation more readable introduce new dataclasss for `TestCase{Centos,Fedora}` so that the testcases just become something like: ```python if what == "ami-boot": return [TestCaseCentos(image="ami"), TestCaseFedora(image="ami")] ``` --- test/bib/test_build.py | 2 +- test/bib/test_manifest.py | 10 ++-- test/bib/testcases.py | 117 +++++++++++++++++--------------------- 3 files changed, 59 insertions(+), 70 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index f7e9e2b6..143c3868 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -267,7 +267,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): *types_arg, *upload_args, *target_arch_args, - *tc.rootfs_args(), + *tc.bib_rootfs_args(), "--local" if tc.local else "--local=false", ]) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 9a366274..bc7eddd2 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -38,7 +38,7 @@ def test_manifest_smoke(build_container, tc): "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", - *tc.rootfs_args(), + *tc.bib_rootfs_args(), f"{tc.container_ref}", ]) manifest = json.loads(output) @@ -60,7 +60,7 @@ def test_iso_manifest_smoke(build_container, tc): "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", - *tc.rootfs_args(), + *tc.bib_rootfs_args(), "--type=anaconda-iso", f"{tc.container_ref}", ]) manifest = json.loads(output) @@ -95,7 +95,7 @@ def test_manifest_disksize(tmp_path, build_container, tc): "--entrypoint", "/usr/bin/bootc-image-builder", build_container, "manifest", "--local", - *tc.rootfs_args(), + *tc.bib_rootfs_args(), f"localhost/{container_tag}", ], encoding="utf8") # ensure disk size is bigger than the default 10G @@ -136,7 +136,7 @@ def test_manifest_local_checks_containers_storage_works(tmp_path, build_containe "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--local", - *tc.rootfs_args(), + *tc.bib_rootfs_args(), f"localhost/{container_tag}", ], check=True, encoding="utf8") @@ -369,7 +369,7 @@ def test_manifest_target_arch_smoke(build_container, tc): "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", - *tc.rootfs_args(), + *tc.bib_rootfs_args(), f"--target-arch={tc.target_arch}", tc.container_ref, ]) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 0beca5c7..2a085692 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -3,37 +3,30 @@ import os import platform -# supported images that can be directly booted -QEMU_BOOT_IMAGE_TYPES = ("qcow2", "raw") - -# images that can *not* be booted directly from qemu -NON_QEMU_BOOT_IMAGE_TYPES = ("vmdk",) - # disk image types can be build from a single manifest -DISK_IMAGE_TYPES = QEMU_BOOT_IMAGE_TYPES + NON_QEMU_BOOT_IMAGE_TYPES +DISK_IMAGE_TYPES = ("qcow2", "raw", "vmdk") # supported images that can be booted in a cloud CLOUD_BOOT_IMAGE_TYPES = ("ami",) -# supported images that require an install -INSTALLER_IMAGE_TYPES = ("anaconda-iso",) - -@dataclasses.dataclass(frozen=True) +@dataclasses.dataclass class TestCase: # container_ref to the bootc image, e.g. quay.io/fedora/fedora-bootc:40 - container_ref: str + container_ref: str = "" # image is the image type, e.g. "ami" image: str = "" # target_arch is the target archicture, empty means current arch target_arch: str = "" # local means that the container should be pulled locally ("--local" flag) local: bool = False + # rootfs to use (e.g. ext4), some containers like fedora do not + # have a default rootfs. If unset the container default is used. + rootfs: str = "" - def rootfs_args(self): - # fedora has no default rootfs so it must be specified - if "fedora-bootc" in self.container_ref: - return ["--rootfs", "btrfs"] + def bib_rootfs_args(self): + if self.rootfs: + return ["--rootfs", self.rootfs] return [] def __str__(self): @@ -44,74 +37,70 @@ def __str__(self): ]) -def gen_testcases(what): - # bootc containers that are tested by default - CONTAINERS_TO_TEST = { - "fedora": "quay.io/fedora/fedora-bootc:40", - "centos": "quay.io/centos-bootc/centos-bootc:stream9", - } - # allow commandline override, this is used when testing - # custom images - if os.getenv("BIB_TEST_BOOTC_CONTAINER_TAG"): - # TODO: make this more elegant - CONTAINERS_TO_TEST = { - "centos": os.getenv("BIB_TEST_BOOTC_CONTAINER_TAG"), - "fedora": [], - } +@dataclasses.dataclass +class TestCaseFedora(TestCase): + container_ref: str = "quay.io/fedora/fedora-bootc:40" + rootfs: str = "btrfs" + +@dataclasses.dataclass +class TestCaseFedora41(TestCase): + container_ref: str = "quay.io/fedora/fedora-bootc:41" + rootfs: str = "btrfs" + + +@dataclasses.dataclass +class TestCaseCentos(TestCase): + container_ref: str = os.getenv( + "BIB_TEST_BOOTC_CONTAINER_TAG", + "quay.io/centos-bootc/centos-bootc:stream9") + + +def gen_testcases(what): if what == "manifest": - return [TestCase(container_ref=ref) - for ref in CONTAINERS_TO_TEST.values()] + return [TestCaseCentos(), TestCaseFedora()] elif what == "default-rootfs": # Fedora doesn't have a default rootfs - return [TestCase(container_ref=CONTAINERS_TO_TEST["centos"])] + return [TestCaseCentos()] elif what == "ami-boot": - test_cases = [] - for ref in CONTAINERS_TO_TEST.values(): - test_cases.append(TestCase(container_ref=ref, image="ami")) - return test_cases + return [TestCaseCentos(image="ami"), TestCaseFedora(image="ami")] elif what == "anaconda-iso": - test_cases = [] - for ref in CONTAINERS_TO_TEST.values(): - for img_type in INSTALLER_IMAGE_TYPES: - test_cases.append(TestCase(container_ref=ref, image=img_type)) - return test_cases + return [TestCaseCentos(image="anaconda-iso"), TestCaseFedora(image="anaconda-iso")] elif what == "qemu-boot": - test_cases = [] - for distro, ref in CONTAINERS_TO_TEST.items(): - for img_type in QEMU_BOOT_IMAGE_TYPES: - test_cases.append( - TestCase(container_ref=ref, image=img_type)) + test_cases = [ + klass(image=img) + for klass in (TestCaseCentos, TestCaseFedora) + for img in ("raw", "qcow2") + ] # do a cross arch test too if platform.machine() == "x86_64": # todo: add fedora:eln - test_cases.append(TestCase(container_ref=ref, image="raw", target_arch="arm64")) + test_cases.append( + TestCaseCentos(image="raw", target_arch="arm64")) elif platform.machine() == "arm64": # TODO: add arm64->x86_64 cross build test too pass return test_cases elif what == "all": - test_cases = [] - for ref in CONTAINERS_TO_TEST.values(): - for img_type in QEMU_BOOT_IMAGE_TYPES + \ - CLOUD_BOOT_IMAGE_TYPES + \ - NON_QEMU_BOOT_IMAGE_TYPES + \ - INSTALLER_IMAGE_TYPES: - test_cases.append(TestCase(container_ref=ref, image=img_type)) - return test_cases + return [ + klass(image=img) + for klass in (TestCaseCentos, TestCaseFedora) + for img in ("ami", "anaconda-iso", "qcow2", "raw", "vmdk") + ] elif what == "multidisk": # single test that specifies all image types - test_cases = [] - for ref in CONTAINERS_TO_TEST.values(): - test_cases.append(TestCase(container_ref=ref, image="+".join(DISK_IMAGE_TYPES))) - return test_cases + image = "+".join(DISK_IMAGE_TYPES) + return [ + TestCaseCentos(image=image), + TestCaseFedora(image=image), + ] # Smoke test that all supported --target-arch architecture can # create a manifest elif what == "target-arch-smoke": return [ - TestCase(container_ref=CONTAINERS_TO_TEST["centos"], target_arch="arm64"), - # TODO: merge with CONTAINERS_TO_TEST once that moves to :41 too - TestCase(container_ref="quay.io/fedora/fedora-bootc:41", target_arch="ppc64le"), - TestCase(container_ref="quay.io/fedora/fedora-bootc:41", target_arch="s390x"), + TestCaseCentos(target_arch="arm64"), + # TODO: merge with TestCaseFedora once the arches are build there + TestCaseFedora41(target_arch="ppc64le"), + TestCaseFedora41(target_arch="s390x"), ] raise ValueError(f"unknown test-case type {what}") From bc6f4d90b47a9146a853070039b5c1fe6ef9c43f Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Thu, 1 Aug 2024 19:07:07 +0200 Subject: [PATCH 135/279] test: drop rootfs arg from iso manifest smoke test --- test/bib/test_manifest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index bc7eddd2..621108e3 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -60,7 +60,6 @@ def test_iso_manifest_smoke(build_container, tc): "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", - *tc.bib_rootfs_args(), "--type=anaconda-iso", f"{tc.container_ref}", ]) manifest = json.loads(output) From e3b04deab0952e3a6e82a05615d169e31ae6c592 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 4 Jul 2024 10:17:11 +0200 Subject: [PATCH 136/279] test: add pylint on our test Given that our testsuite is relatively complex it seems nice to let pylint have an opinion about it :) This commit adds the needed test and also fixes the various issues it found. --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/conftest.py | 2 +- test/bib/requirements.txt | 1 + test/bib/test_build.py | 19 +++++++------ test/bib/test_flake8.py | 2 +- test/bib/test_manifest.py | 8 +++--- test/bib/test_opts.py | 4 +-- test/bib/test_pylint.py | 18 ++++++++++++ test/bib/testcases.py | 16 +++++------ test/bib/testutil.py | 11 ++++---- test/bib/testutil_test.py | 43 ++++++++++++++--------------- test/bib/vm.py | 8 +++--- 12 files changed, 76 insertions(+), 58 deletions(-) create mode 100644 test/bib/test_pylint.py diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 5d48e73d..0df676cf 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -79,7 +79,7 @@ jobs: - name: Install test dependencies run: | sudo apt update - sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static + sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static pylint - name: Diskspace (before) run: | df -h diff --git a/test/bib/conftest.py b/test/bib/conftest.py index ef0e7bf9..4db68ad6 100644 --- a/test/bib/conftest.py +++ b/test/bib/conftest.py @@ -16,7 +16,7 @@ def force_aws_upload_fixture(request): # see https://hackebrot.github.io/pytest-tricks/param_id_func/ and # https://docs.pytest.org/en/7.1.x/reference/reference.html#pytest.hookspec.pytest_make_parametrize_id -def pytest_make_parametrize_id(config, val): +def pytest_make_parametrize_id(config, val): # pylint: disable=W0613 if isinstance(val, TestCase): return f"{val}" return None diff --git a/test/bib/requirements.txt b/test/bib/requirements.txt index 3d6c5505..9be09ce7 100644 --- a/test/bib/requirements.txt +++ b/test/bib/requirements.txt @@ -3,3 +3,4 @@ flake8==6.1.0 paramiko==2.12.0 boto3==1.33.13 qmp==1.1.0 +pylint==3.2.5 diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 143c3868..737cd865 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -5,18 +5,17 @@ import random import re import shutil -import subprocess import string +import subprocess import tempfile import uuid from contextlib import contextmanager from typing import NamedTuple import pytest - # local test utils import testutil -from containerbuild import build_container_fixture # noqa: F401 +from containerbuild import build_container_fixture # pylint: disable=unused-import from testcases import CLOUD_BOOT_IMAGE_TYPES, DISK_IMAGE_TYPES, gen_testcases from vm import AWS, QEMU @@ -46,8 +45,8 @@ class ImageBuildResult(NamedTuple): metadata: dict = {} -@pytest.fixture(scope='session') -def shared_tmpdir(tmpdir_factory): +@pytest.fixture(name="shared_tmpdir", scope='session') +def shared_tmpdir_fixture(tmpdir_factory): tmp_path = pathlib.Path(tmpdir_factory.mktemp("shared")) yield tmp_path @@ -89,6 +88,8 @@ def images_fixture(shared_tmpdir, build_container, request, force_aws_upload): yield build_results +# XXX: refactor +# pylint: disable=too-many-locals,too-many-branches,too-many-statements @contextmanager def build_images(shared_tmpdir, build_container, request, force_aws_upload): """ @@ -391,8 +392,8 @@ def test_ami_boots_in_aws(image_type, force_aws_upload): def log_has_osbuild_selinux_denials(log): - OSBUID_SELINUX_DENIALS_RE = re.compile(r"(?ms)avc:\ +denied.*osbuild") - return re.search(OSBUID_SELINUX_DENIALS_RE, log) + osbuid_selinux_denials_re = re.compile(r"(?ms)avc:\ +denied.*osbuild") + return re.search(osbuid_selinux_denials_re, log) def parse_ami_id_from_log(log_output): @@ -420,7 +421,7 @@ def test_osbuild_selinux_denials_re_works(): def has_selinux(): - return testutil.has_executable("selinuxenabled") and subprocess.run("selinuxenabled").returncode == 0 + return testutil.has_executable("selinuxenabled") and subprocess.run("selinuxenabled", check=False).returncode == 0 @pytest.mark.skipif(not has_selinux(), reason="selinux not enabled") @@ -437,7 +438,7 @@ def test_image_build_without_se_linux_denials(image_type): def test_iso_installs(image_type): installer_iso_path = image_type.img_path test_disk_path = installer_iso_path.with_name("test-disk.img") - with open(test_disk_path, "w") as fp: + with open(test_disk_path, "w", encoding="utf8") as fp: fp.truncate(10_1000_1000_1000) # install to test disk with QEMU(test_disk_path, cdrom=installer_iso_path) as vm: diff --git a/test/bib/test_flake8.py b/test/bib/test_flake8.py index 5cb61a7b..bfd79219 100644 --- a/test/bib/test_flake8.py +++ b/test/bib/test_flake8.py @@ -7,5 +7,5 @@ def test_flake8(): p = pathlib.Path(__file__).parent # TODO: use all static checks from osbuild instead subprocess.check_call( - ["flake8", "--ignore=E402", "--max-line-length=120", + ["flake8", "--ignore=E402,F811,F401", "--max-line-length=120", os.fspath(p)]) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 621108e3..a62e8bf0 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -7,9 +7,9 @@ import textwrap import pytest - import testutil -from containerbuild import build_container_fixture # noqa: F401 + +from containerbuild import build_container_fixture # pylint: disable=unused-import from containerbuild import make_container from testcases import gen_testcases @@ -291,7 +291,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): # no need to parameterize this test, toml is the same for all containers container_ref = "quay.io/centos-bootc/centos-bootc:stream9" - CFG = { + cfg = { "blueprint": { "customizations": { "filesystem": [ @@ -315,7 +315,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" output_path.mkdir(exist_ok=True) config_json_path = output_path / "config.json" - config_json_path.write_text(json.dumps(CFG), encoding="utf-8") + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") with pytest.raises(subprocess.CalledProcessError) as exc: subprocess.check_output([ diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 57a8f7da..46f6a33c 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -3,8 +3,8 @@ import subprocess import pytest - -from containerbuild import build_container_fixture, build_fake_container_fixture # noqa: F401 +# pylint: disable=unused-import +from containerbuild import build_container_fixture, build_fake_container_fixture @pytest.fixture(name="container_storage", scope="session") diff --git a/test/bib/test_pylint.py b/test/bib/test_pylint.py new file mode 100644 index 00000000..eb1c386b --- /dev/null +++ b/test/bib/test_pylint.py @@ -0,0 +1,18 @@ +import pathlib +import subprocess + + +def test_pylint(): + p = pathlib.Path(__file__).parent + subprocess.check_call( + ["pylint", + "--disable=fixme", + "--disable=missing-class-docstring", + "--disable=missing-module-docstring", + "--disable=missing-function-docstring", + "--disable=too-many-instance-attributes", + # false positive because of "if yield else yield" in + # the "build_container" fixture, see + # https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/contextmanager-generator-missing-cleanup.html + "--disable=contextmanager-generator-missing-cleanup", + "--max-line-length=120"] + list(p.glob("*.py"))) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 2a085692..5acfc00b 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -56,17 +56,17 @@ class TestCaseCentos(TestCase): "quay.io/centos-bootc/centos-bootc:stream9") -def gen_testcases(what): +def gen_testcases(what): # pylint: disable=too-many-return-statements if what == "manifest": return [TestCaseCentos(), TestCaseFedora()] - elif what == "default-rootfs": + if what == "default-rootfs": # Fedora doesn't have a default rootfs return [TestCaseCentos()] - elif what == "ami-boot": + if what == "ami-boot": return [TestCaseCentos(image="ami"), TestCaseFedora(image="ami")] - elif what == "anaconda-iso": + if what == "anaconda-iso": return [TestCaseCentos(image="anaconda-iso"), TestCaseFedora(image="anaconda-iso")] - elif what == "qemu-boot": + if what == "qemu-boot": test_cases = [ klass(image=img) for klass in (TestCaseCentos, TestCaseFedora) @@ -81,13 +81,13 @@ def gen_testcases(what): # TODO: add arm64->x86_64 cross build test too pass return test_cases - elif what == "all": + if what == "all": return [ klass(image=img) for klass in (TestCaseCentos, TestCaseFedora) for img in ("ami", "anaconda-iso", "qcow2", "raw", "vmdk") ] - elif what == "multidisk": + if what == "multidisk": # single test that specifies all image types image = "+".join(DISK_IMAGE_TYPES) return [ @@ -96,7 +96,7 @@ def gen_testcases(what): ] # Smoke test that all supported --target-arch architecture can # create a manifest - elif what == "target-arch-smoke": + if what == "target-arch-smoke": return [ TestCaseCentos(target_arch="arm64"), # TODO: merge with TestCaseFedora once the arches are build there diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 732c921b..5d85e2f0 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -22,7 +22,7 @@ def run_journalctl(*args): def journal_cursor(): output = run_journalctl("-n0", "--show-cursor") - cursor = output.split("\n")[-1] + cursor = output.rsplit("\n", maxsplit=1)[-1] return cursor.split("cursor: ")[-1] @@ -43,7 +43,7 @@ def get_free_port() -> int: def wait_ssh_ready(address, port, sleep, max_wait_sec): - for i in range(int(max_wait_sec / sleep)): + for _ in range(int(max_wait_sec / sleep)): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.settimeout(sleep) try: @@ -61,7 +61,7 @@ def has_x86_64_v3_cpu(): # x86_64-v3 has multiple features, see # https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels # but "avx2" is probably a good enough proxy - return " avx2 " in pathlib.Path("/proc/cpuinfo").read_text() + return " avx2 " in pathlib.Path("/proc/cpuinfo").read_text("utf8") def can_start_rootful_containers(): @@ -70,7 +70,7 @@ def can_start_rootful_containers(): # on linux we need to run "podman" with sudo to get full # root containers return os.getuid() == 0 - elif system == "Darwin": + if system == "Darwin": # on darwin a container is root if the podman machine runs # in "rootful" mode, i.e. no need to run "podman" as root # as it's just proxying to the VM @@ -78,8 +78,7 @@ def can_start_rootful_containers(): "podman", "machine", "inspect", "--format={{.Rootful}}", ], capture_output=True, encoding="utf8", check=True) return res.stdout.strip() == "true" - else: - raise ValueError(f"unknown platform {system}") + raise ValueError(f"unknown platform {system}") def write_aws_creds(path): diff --git a/test/bib/testutil_test.py b/test/bib/testutil_test.py index 783e0f0a..a1b2f0d2 100644 --- a/test/bib/testutil_test.py +++ b/test/bib/testutil_test.py @@ -4,13 +4,12 @@ from unittest.mock import call, patch import pytest - from testutil import get_free_port, has_executable, wait_ssh_ready def test_get_free_port(): port_nr = get_free_port() - assert port_nr > 1024 and port_nr < 65535 + assert 1024 < port_nr < 65535 @pytest.fixture(name="free_port") @@ -26,34 +25,34 @@ def test_wait_ssh_ready_sleeps_no_connection(mocked_sleep, free_port): @pytest.mark.skipif(not has_executable("nc"), reason="needs nc") -def test_wait_ssh_ready_sleeps_wrong_reply(free_port, tmp_path): +def test_wait_ssh_ready_sleeps_wrong_reply(free_port): with contextlib.ExitStack() as cm: - p = subprocess.Popen( + with subprocess.Popen( f"echo not-ssh | nc -vv -l -p {free_port}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8", - ) - cm.callback(p.kill) - # wait for nc to be ready - while True: - # netcat tranditional uses "listening", others "Listening" - # so just omit the first char - if "istening " in p.stdout.readline(): - break - # now connect - with patch("time.sleep") as mocked_sleep: - with pytest.raises(ConnectionRefusedError): - wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=0.55) - assert mocked_sleep.call_args_list == [ - call(0.1), call(0.1), call(0.1), call(0.1), call(0.1)] + ) as p: + cm.callback(p.kill) + # wait for nc to be ready + while True: + # netcat tranditional uses "listening", others "Listening" + # so just omit the first char + if "istening " in p.stdout.readline(): + break + # now connect + with patch("time.sleep") as mocked_sleep: + with pytest.raises(ConnectionRefusedError): + wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=0.55) + assert mocked_sleep.call_args_list == [ + call(0.1), call(0.1), call(0.1), call(0.1), call(0.1)] @pytest.mark.skipif(platform.system() == "Darwin", reason="hangs on macOS") @pytest.mark.skipif(not has_executable("nc"), reason="needs nc") -def test_wait_ssh_ready_integration(free_port, tmp_path): +def test_wait_ssh_ready_integration(free_port): with contextlib.ExitStack() as cm: - p = subprocess.Popen(f"echo OpenSSH | nc -l -p {free_port}", shell=True) - cm.callback(p.kill) - wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=10) + with subprocess.Popen(f"echo OpenSSH | nc -l -p {free_port}", shell=True) as p: + cm.callback(p.kill) + wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=10) diff --git a/test/bib/vm.py b/test/bib/vm.py index 1bb21f5b..a1be56a5 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -1,6 +1,5 @@ import abc import os -import paramiko import pathlib import platform import subprocess @@ -10,9 +9,9 @@ from io import StringIO import boto3 +import paramiko from botocore.exceptions import ClientError from paramiko.client import AutoAddPolicy, SSHClient - from testutil import AWS_REGION, get_free_port, wait_ssh_ready @@ -163,6 +162,7 @@ def start(self, wait_event="ssh", snapshot=True, use_ovmf=False): self._address = "localhost" # XXX: use systemd-run to ensure cleanup? + # pylint: disable=consider-using-with self._qemu_p = subprocess.Popen( self._gen_qemu_cmdline(snapshot, use_ovmf), stdout=sys.stdout, @@ -185,11 +185,11 @@ def _wait_qmp_socket(self, timeout_sec): if os.path.exists(self._qmp_socket): return True time.sleep(1) - raise Exception(f"no {self._qmp_socket} after {timeout_sec} seconds") + raise TimeoutError(f"no {self._qmp_socket} after {timeout_sec} seconds") def wait_qmp_event(self, qmp_event): # import lazy to avoid requiring it for all operations - import qmp + import qmp # pylint: disable=import-outside-toplevel self._wait_qmp_socket(30) mon = qmp.QEMUMonitorProtocol(os.fspath(self._qmp_socket)) mon.connect() From d14cdf476511a668cde716d9bf01f447f70ea97c Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 29 Jul 2024 13:33:33 +0200 Subject: [PATCH 137/279] test: extract `testutil.podman_run_common` This consolidates the duplication from `test_{build,manifest}` when running podman. All common arguments are now in a single place which also means that the host container storage is mapped consistently which should speed up the tests. --- test/bib/test_build.py | 5 +--- test/bib/test_manifest.py | 59 +++++++++------------------------------ test/bib/testutil.py | 9 ++++++ 3 files changed, 23 insertions(+), 50 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 737cd865..a29b85bf 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -248,10 +248,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): # run container to deploy an image into a bootable disk and upload to a cloud service if applicable cmd = [ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + *testutil.podman_run_common, "-v", f"{config_json_path}:/config.json:ro", "-v", f"{output_path}:/output", "-v", "/var/tmp/osbuild-test-store:/store", # share the cache between builds diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index a62e8bf0..d610500b 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -32,9 +32,7 @@ def find_image_size_from(manifest_str): @pytest.mark.parametrize("tc", gen_testcases("manifest")) def test_manifest_smoke(build_container, tc): output = subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", @@ -54,9 +52,7 @@ def test_manifest_smoke(build_container, tc): @pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) def test_iso_manifest_smoke(build_container, tc): output = subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", @@ -85,12 +81,7 @@ def test_manifest_disksize(tmp_path, build_container, tc): with make_container(tmp_path) as container_tag: print(f"using {container_tag}") manifest_str = subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", - # ensure local storage is here - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", - # need different entry point + *testutil.podman_run_common, "--entrypoint", "/usr/bin/bootc-image-builder", build_container, "manifest", "--local", @@ -107,6 +98,7 @@ def test_manifest_local_checks_containers_storage_errors(build_container): # "-v /var/lib/containers/storage:/var/lib/containers/storage" # is missing here res = subprocess.run([ + # not using *testutil.podman_run_common to test bad usage "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", @@ -128,10 +120,7 @@ def test_manifest_local_checks_containers_storage_works(tmp_path, build_containe with make_container(tmp_path) as container_tag: subprocess.run([ - "podman", "run", "--rm", - "--privileged", - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--local", @@ -151,10 +140,7 @@ def test_manifest_cross_arch_check(tmp_path, build_container): with make_container(tmp_path, arch="x86_64") as container_tag: with pytest.raises(subprocess.CalledProcessError) as exc: subprocess.run([ - "podman", "run", "--rm", - "--privileged", - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--target-arch=aarch64", @@ -178,9 +164,7 @@ def find_rootfs_type_from(manifest_str): def test_manifest_rootfs_respected(build_container, tc): # TODO: derive container and fake "bootc install print-configuration"? output = subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", f"{tc.container_ref}", @@ -198,9 +182,7 @@ def test_manifest_rootfs_override(build_container): container_ref = "quay.io/centos-bootc/centos-bootc:stream9" output = subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--rootfs", "btrfs", f"{container_ref}", @@ -232,11 +214,8 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): groups = ["wheel"] """)) output = subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + *testutil.podman_run_common, "-v", f"{config_toml_path}:/config.toml:ro", - "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", f"{container_ref}", @@ -262,11 +241,8 @@ def test_manifest_installer_customizations(tmp_path, build_container): \"\"\" """)) output = subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + *testutil.podman_run_common, "-v", f"{config_toml_path}:/config.toml:ro", - "--security-opt", "label=type:unconfined_t", "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--type=anaconda-iso", f"{container_ref}", @@ -319,10 +295,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): with pytest.raises(subprocess.CalledProcessError) as exc: subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, "-v", f"{output_path}:/output", "--entrypoint=/usr/bin/bootc-image-builder", build_container, @@ -342,10 +315,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): def test_manifest_checks_build_container_is_bootc(build_container, container_ref, should_error, expected_error): def check_image_ref(): subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', build_container, ], stderr=subprocess.PIPE, encoding="utf8") @@ -361,10 +331,7 @@ def check_image_ref(): def test_manifest_target_arch_smoke(build_container, tc): # TODO: actually build an image too output = subprocess.check_output([ - "podman", "run", "--rm", - "--privileged", - "-v", "/var/lib/containers/storage:/var/lib/containers/storage", - "--security-opt", "label=type:unconfined_t", + *testutil.podman_run_common, "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 5d85e2f0..47360614 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -107,3 +107,12 @@ def deregister_ami(ami_id): err_msg = err.response["Error"]["Message"] print(f"Couldn't deregister image {ami_id}.") print(f"Error {err_code}: {err_msg}") + + +# podman_run_common has the common prefix for the podman run invocations +podman_run_common = [ + "podman", "run", "--rm", + "--privileged", + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + "--security-opt", "label=type:unconfined_t", +] From d5cafb13922f728611c6a62c87eb2d650787576d Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 5 Aug 2024 17:39:12 +0200 Subject: [PATCH 138/279] test: fix test for `bootc-image-builder --version` when in non-git When tmt runs it will create a build from a non-git directory. This was breaking the tests and code `test_bib_version`. This commit makes it more robust by ensuring that `unkown` is printed when `bib` was build from a non-git directory. --- test/bib/test_opts.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 46f6a33c..e95ec22a 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -163,7 +163,14 @@ def test_bib_version(tmp_path, container_storage, build_fake_container): build_fake_container, "version", ], check=True, capture_output=True, text=True) - needle = "revision: " + + expected_rev = "unknown" + git_res = subprocess.run( + ["git", "describe", "--always"], + capture_output=True, text=True, check=False) + if git_res.returncode == 0: + expected_rev = git_res.stdout.strip() + needle = f"revision: {expected_rev}" assert needle in res.stdout From 4aa93dabbbc68a2b4fda0c579c16dccc6b778c8f Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 13 Aug 2024 09:17:59 +0200 Subject: [PATCH 139/279] test: test for customizations.modules.{enable,disable} Check that anaconda-iso customization works on the manifest level. --- test/bib/test_manifest.py | 52 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index d610500b..a8685122 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -344,3 +344,55 @@ def test_manifest_target_arch_smoke(build_container, tc): # table be beside this there is relatively little that is different assert manifest["version"] == "2" assert manifest["pipelines"][0]["name"] == "build" + + +def find_image_anaconda_stage(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "anaconda-tree": + for st in pipl["stages"]: + if st["type"] == "org.osbuild.anaconda": + return st + raise ValueError(f"cannot find disk size in manifest:\n{manifest_str}") + + +@pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) +def test_manifest_anaconda_module_customizations(tmpdir_factory, build_container, tc): + cfg = { + "customizations": { + "installer": { + "modules": { + "enable": [ + "org.fedoraproject.Anaconda.Modules.Localization", + # disable takes precedence + "org.fedoraproject.Anaconda.Modules.Timezone", + ], + "disable": [ + # defaults can be disabled as well + "org.fedoraproject.Anaconda.Modules.Users", + # disable takes precedence + "org.fedoraproject.Anaconda.Modules.Timezone", + ] + }, + }, + }, + } + output_path = pathlib.Path(tmpdir_factory.mktemp("data")) / "output" + output_path.mkdir(exist_ok=True) + config_json_path = output_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{output_path}:/output", + "--entrypoint=/usr/bin/bootc-image-builder", + build_container, + "manifest", + "--config", "/output/config.json", + *tc.bib_rootfs_args(), + "--type=anaconda-iso", tc.container_ref, + ]) + st = find_image_anaconda_stage(output) + assert "org.fedoraproject.Anaconda.Modules.Localization" in st["options"]["activatable-modules"] + assert "org.fedoraproject.Anaconda.Modules.Users" not in st["options"]["activatable-modules"] + assert "org.fedoraproject.Anaconda.Modules.Timezone" not in st["options"]["activatable-modules"] From 2ece8acaa07b93068c0f7c0245a80b5a25e3715e Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Fri, 9 Aug 2024 18:13:56 +0200 Subject: [PATCH 140/279] test: add rootfs argument to ImageBuildResult We'll use the rootfs argument of the test case to determine the partitions that are expected when verifying the built image. In the future it might be more convenient to include the whole configuration in the ImageBuildResult to perform more functional tests on the image. --- test/bib/test_build.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index a29b85bf..43eead80 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -36,6 +36,7 @@ class ImageBuildResult(NamedTuple): img_path: str img_arch: str container_ref: str + rootfs: str username: str password: str ssh_keyfile_private_path: str @@ -159,7 +160,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): journal_output = journal_log_path.read_text(encoding="utf8") bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( - image_type, generated_img, tc.target_arch, tc.container_ref, + image_type, generated_img, tc.target_arch, tc.container_ref, tc.rootfs, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) @@ -299,7 +300,7 @@ def del_ami(): results = [] for image_type in image_types: results.append(ImageBuildResult( - image_type, artifact[image_type], tc.target_arch, tc.container_ref, + image_type, artifact[image_type], tc.target_arch, tc.container_ref, tc.rootfs, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results From c1fc40b94e93cce41189bfe5e7451260d9351eeb Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Thu, 8 Aug 2024 21:06:13 +0200 Subject: [PATCH 141/279] test: add custom mountpoints for non-btrfs tests When the rootfs type supports it, add extra mountpoints to the test build. Btrfs subvolumes are not supported. --- test/bib/test_build.py | 7 +------ test/bib/testutil.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 43eead80..0ec8cc77 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -202,12 +202,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "groups": ["wheel"], }, ], - "filesystem": [ - { - "mountpoint": "/", - "minsize": "12GiB" - }, - ], + "filesystem": testutil.create_filesystem_customizations(tc.rootfs), "kernel": { "append": kargs, }, diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 47360614..6033327b 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -109,6 +109,37 @@ def deregister_ami(ami_id): print(f"Error {err_code}: {err_msg}") +def create_filesystem_customizations(rootfs: str): + if rootfs == "btrfs": + # only minimal customizations are supported for btrfs currently + return [ + { + "mountpoint": "/", + "minsize": "12 GiB" + }, + ] + + # add some custom mountpoints + return [ + { + "mountpoint": "/", + "minsize": "12 GiB" + }, + { + "mountpoint": "/var/data", + "minsize": "3 GiB" + }, + { + "mountpoint": "/var/data/test", + "minsize": "1 GiB" + }, + { + "mountpoint": "/var/opt", + "minsize": "2 GiB" + }, + ] + + # podman_run_common has the common prefix for the podman run invocations podman_run_common = [ "podman", "run", "--rm", From 234474bc2880c564b3458c792f9073a26a1884dd Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Fri, 9 Aug 2024 20:15:17 +0200 Subject: [PATCH 142/279] test: verify filesystem sizes when boot testing During the boot test (test_image_boots()), run 'df' in the VM and verify that the partitions and filesystems on disk match the filesystem customizations from the build config. Previously, we only had a check for /sysroot, but that wasn't doing anything because there was no call to 'df' preceding it. The current test first checks that every mountpoint specified in the build config exists in the VM and that the minsizes are respected (with some offsets that will be investigated separately). --- test/bib/test_build.py | 41 ++++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 0ec8cc77..a54c6ee8 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -355,14 +355,18 @@ def test_image_boots(image_type): # XXX: read the fully yaml instead? assert f"image: {image_type.container_ref}" in output - # Figure out how big / is and make sure it is > 11bGiB - # Note that df output is in 1k blocks, not bytes - for line in output.splitlines(): + # check the minsize specified in the build configuration for each mountpoint against the sizes in the image + exit_status, output = test_vm.run("df --output=target,size", user="root", + keyfile=image_type.ssh_keyfile_private_path) + assert exit_status == 0 + # parse the output of 'df' to a mountpoint -> size dict for convenience + mountpoint_sizes = {} + for line in output.splitlines()[1:]: fields = line.split() - if fields[0] == "/sysroot": - size = int(fields[1]) - assert size > 11 * 1024 * 1024 - break + # Note that df output is in 1k blocks, not bytes + mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes + + assert_fs_customizations(image_type, mountpoint_sizes) @pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) @@ -454,3 +458,26 @@ def test_multi_build_request(images): assert result.img_path.exists() artifacts.add(filename) assert artifacts == expected + + +def assert_fs_customizations(image_type, mountpoint_sizes): + fs_customizations = testutil.create_filesystem_customizations(image_type.rootfs) + for fs in fs_customizations: + mountpoint = fs["mountpoint"] + if mountpoint == "/": + # / is actually /sysroot + mountpoint = "/sysroot" + assert mountpoint in mountpoint_sizes + + minsize_human = fs["minsize"] + # assume all sizes are GiB + minsize_str = minsize_human.removesuffix("GiB").strip() + minsize = int(minsize_str) * 2 ** 30 + # TODO: find the exact source of all the discrepancies or compare the actual partition sizes instead of the + # filesystem sizes + if mountpoint == "/sysroot": + minsize -= 2 ** 30 # reduce expected /sysroot size by 1 GiB + + # NOTE: xfs filesystems are ~40 MiB and ext4 ~26 MiB smaller than the partition - reduce minsize by 100 MiB + minsize -= 100 * 2 ** 20 + assert minsize < mountpoint_sizes[mountpoint] From 58e4cf683cc9d6385a25606e80eda5ab9de01789 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 12 Aug 2024 12:52:03 +0200 Subject: [PATCH 143/279] bib: propagate error messages from policy.Check() The errors returned by policy.Check() can contain a reason for why a path was rejected, especially when the rejection is not based on the path policy itself. For example, a non-absolute or non-canonical path will be rejected. This information is useful for the user, so let's not discard that extra information and return all error messages we encountered. Co-authored-by: Michael Vogt --- test/bib/test_manifest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index a8685122..c0d2f62a 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -302,7 +302,8 @@ def test_mount_ostree_error(tmpdir_factory, build_container): "manifest", f"{container_ref}", "--config", "/output/config.json", ], stderr=subprocess.PIPE, encoding="utf8") - assert 'The following custom mountpoints are not supported ["/ostree"]' in exc.value.stderr + assert "The following errors occurred while validating custom mountpoints:\npath '/ostree ' is not allowed" \ + in exc.value.stderr @pytest.mark.parametrize( From c90c047ebe77ae7334c5ccfd36cd0ff64cc47506 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Tue, 13 Aug 2024 21:20:47 +0200 Subject: [PATCH 144/279] test: manifest test for filesystem customizations Add a test that checks that filesystem customizations appear in the manifest in the fstab stage. Checks that: - All filesystems, where appropriate, have the same type as the desired rootfs. - All filesystems listed in the customizations appear in the fstab stage. --- test/bib/test_manifest.py | 67 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index c0d2f62a..4284723f 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -397,3 +397,70 @@ def test_manifest_anaconda_module_customizations(tmpdir_factory, build_container assert "org.fedoraproject.Anaconda.Modules.Localization" in st["options"]["activatable-modules"] assert "org.fedoraproject.Anaconda.Modules.Users" not in st["options"]["activatable-modules"] assert "org.fedoraproject.Anaconda.Modules.Timezone" not in st["options"]["activatable-modules"] + + +def find_fstab_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipeline in manifest["pipelines"]: + # the fstab stage in cross-arch manifests is in the "ostree-deployment" pipeline + if pipeline["name"] in ("image", "ostree-deployment"): + for st in pipeline["stages"]: + if st["type"] == "org.osbuild.fstab": + return st + raise ValueError(f"cannot find fstab stage in manifest:\n{manifest_str}") + + +@pytest.mark.parametrize("fscustomizations,rootfs", [ + ({"/var/data": "2 GiB", "/var/stuff": "10 GiB"}, "xfs"), + ({"/var/data": "2 GiB", "/var/stuff": "10 GiB"}, "ext4"), + ({"/": "2 GiB", "/boot": "1 GiB"}, "ext4"), + ({"/": "2 GiB", "/boot": "1 GiB", "/var/data": "42 GiB"}, "ext4"), + ({"/": "2 GiB"}, "btrfs"), + ({}, "ext4"), + ({}, "xfs"), + ({}, "btrfs"), +]) +def test_manifest_fs_customizations(tmp_path, build_container, fscustomizations, rootfs): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "filesystem": [{"mountpoint": mnt, "minsize": minsize} for mnt, minsize in fscustomizations.items()], + }, + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + "--entrypoint=/usr/bin/bootc-image-builder", + build_container, + f"--rootfs={rootfs}", + "manifest", f"{container_ref}", + ]) + assert_fs_customizations(fscustomizations, rootfs, output) + + +def assert_fs_customizations(customizations, fstype, manifest): + # use the fstab stage to get filesystem types for each mountpoint + fstab_stage = find_fstab_stage_from(manifest) + filesystems = fstab_stage["options"]["filesystems"] + + manifest_mountpoints = set() + for fs in filesystems: + manifest_mountpoints.add(fs["path"]) + if fs["path"] == "/boot/efi": + assert fs["vfs_type"] == "vfat" + continue + + if fstype == "btrfs" and fs["path"] == "/boot": + # /boot keeps its default fstype when using btrfs + assert fs["vfs_type"] == "ext4" + continue + + assert fs["vfs_type"] == fstype, f"incorrect filesystem type for {fs['path']}" + + # check that all fs customizations appear in fstab + for custom_mountpoint in customizations: + assert custom_mountpoint in manifest_mountpoints From b7b9e3c9e9910d5c4b17aca1b55714807e9bb302 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Tue, 13 Aug 2024 21:20:47 +0200 Subject: [PATCH 145/279] test: manifest test for filesystem customizations (cross-arch) Add the same test as in the previous commit but for cross-arch manifests. Cross-arch builds only support ext4 filesystems. Any selected root fstype (whether from the container itself or from the --rootfs option) is ignored (with a warning) --- test/bib/test_manifest.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 4284723f..88fcee25 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -464,3 +464,39 @@ def assert_fs_customizations(customizations, fstype, manifest): # check that all fs customizations appear in fstab for custom_mountpoint in customizations: assert custom_mountpoint in manifest_mountpoints + + +@pytest.mark.skipif(platform.uname().machine != "x86_64", reason="cross arch test only runs on x86") +@pytest.mark.parametrize("fscustomizations,rootfs", [ + ({"/var/data": "2 GiB", "/var/stuff": "10 GiB"}, "xfs"), + ({"/var/data": "2 GiB", "/var/stuff": "10 GiB"}, "ext4"), + ({"/": "2 GiB", "/boot": "1 GiB"}, "ext4"), + ({"/": "2 GiB", "/boot": "1 GiB", "/var/data": "42 GiB"}, "ext4"), + ({"/": "2 GiB"}, "btrfs"), + ({}, "ext4"), + ({}, "xfs"), + ({}, "btrfs"), +]) +def test_manifest_fs_customizations_xarch(tmp_path, build_container, fscustomizations, rootfs): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "filesystem": [{"mountpoint": mnt, "minsize": minsize} for mnt, minsize in fscustomizations.items()], + }, + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + "--entrypoint=/usr/bin/bootc-image-builder", + build_container, + f"--rootfs={rootfs}", + "--target-arch=aarch64", + "manifest", f"{container_ref}", + ]) + + # cross-arch builds only support ext4 (for now) + assert_fs_customizations(fscustomizations, "ext4", output) From 32fe65a920732a336b232d5df876986a9f6252ae Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Thu, 15 Aug 2024 12:03:34 +0200 Subject: [PATCH 146/279] test: drop size comparison in assert_fs_customizations The size comparison in the build test is bad. Customizations control the size of a partition but the function was comparing the usable size of a filesystem, which can be very different. Attempts were made to cover the difference, by simply reducing the expected size by a fixed amount, but were ultimately inadequate. We can do better. --- test/bib/test_build.py | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index a54c6ee8..ae1db04e 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -356,6 +356,7 @@ def test_image_boots(image_type): assert f"image: {image_type.container_ref}" in output # check the minsize specified in the build configuration for each mountpoint against the sizes in the image + # TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint exit_status, output = test_vm.run("df --output=target,size", user="root", keyfile=image_type.ssh_keyfile_private_path) assert exit_status == 0 @@ -461,6 +462,12 @@ def test_multi_build_request(images): def assert_fs_customizations(image_type, mountpoint_sizes): + """ + Asserts that each mountpoint that appears in the build configuration also appears in mountpoint_sizes. + + TODO: assert that the size of each filesystem (or partition) also matches the expected size based on the + customization. + """ fs_customizations = testutil.create_filesystem_customizations(image_type.rootfs) for fs in fs_customizations: mountpoint = fs["mountpoint"] @@ -468,16 +475,3 @@ def assert_fs_customizations(image_type, mountpoint_sizes): # / is actually /sysroot mountpoint = "/sysroot" assert mountpoint in mountpoint_sizes - - minsize_human = fs["minsize"] - # assume all sizes are GiB - minsize_str = minsize_human.removesuffix("GiB").strip() - minsize = int(minsize_str) * 2 ** 30 - # TODO: find the exact source of all the discrepancies or compare the actual partition sizes instead of the - # filesystem sizes - if mountpoint == "/sysroot": - minsize -= 2 ** 30 # reduce expected /sysroot size by 1 GiB - - # NOTE: xfs filesystems are ~40 MiB and ext4 ~26 MiB smaller than the partition - reduce minsize by 100 MiB - minsize -= 100 * 2 ** 20 - assert minsize < mountpoint_sizes[mountpoint] From 99c728ef95d429cf40a60235aeb0463040249309 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Wed, 14 Aug 2024 13:17:43 +0200 Subject: [PATCH 147/279] bib: make build the default subcommand Prior this commit, the bootc-image-builder container image had a custom entrypoint that hardcoded the use of the build subcommand. This meant that if a user wanted to use a different subcommand, they had to overwrite the entrypoint. This commit changes the cobra code in bib to fallback to build if no subcommand was given. This is slighly ugly, but it allows us to remove the custom entrypoint, streamlining the use of subcommands. Let's see an example of calling the version subcommand: Before: podman run --rm -it --entrypoint=/usr/bin/bootc-image-builder \ quay.io/centos-bootc/bootc-image-builder:latest version After: sudo podman run --rm -it \ quay.io/centos-bootc/bootc-image-builder:latest version Kudos to https://github.com/IKukhta for his code from https://github.com/spf13/cobra/issues/823#issuecomment-870027246 --- test/bib/test_manifest.py | 16 ++-------------- test/bib/test_opts.py | 2 -- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 88fcee25..d38e237c 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -33,7 +33,6 @@ def find_image_size_from(manifest_str): def test_manifest_smoke(build_container, tc): output = subprocess.check_output([ *testutil.podman_run_common, - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", *tc.bib_rootfs_args(), @@ -53,7 +52,6 @@ def test_manifest_smoke(build_container, tc): def test_iso_manifest_smoke(build_container, tc): output = subprocess.check_output([ *testutil.podman_run_common, - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--type=anaconda-iso", f"{tc.container_ref}", @@ -82,7 +80,6 @@ def test_manifest_disksize(tmp_path, build_container, tc): print(f"using {container_tag}") manifest_str = subprocess.check_output([ *testutil.podman_run_common, - "--entrypoint", "/usr/bin/bootc-image-builder", build_container, "manifest", "--local", *tc.bib_rootfs_args(), @@ -102,7 +99,6 @@ def test_manifest_local_checks_containers_storage_errors(build_container): "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--local", "arg-not-used", ], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8") @@ -121,7 +117,6 @@ def test_manifest_local_checks_containers_storage_works(tmp_path, build_containe with make_container(tmp_path) as container_tag: subprocess.run([ *testutil.podman_run_common, - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--local", *tc.bib_rootfs_args(), @@ -141,7 +136,6 @@ def test_manifest_cross_arch_check(tmp_path, build_container): with pytest.raises(subprocess.CalledProcessError) as exc: subprocess.run([ *testutil.podman_run_common, - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--target-arch=aarch64", "--local", f"localhost/{container_tag}" @@ -165,7 +159,6 @@ def test_manifest_rootfs_respected(build_container, tc): # TODO: derive container and fake "bootc install print-configuration"? output = subprocess.check_output([ *testutil.podman_run_common, - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", f"{tc.container_ref}", ]) @@ -183,7 +176,6 @@ def test_manifest_rootfs_override(build_container): output = subprocess.check_output([ *testutil.podman_run_common, - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--rootfs", "btrfs", f"{container_ref}", ]) @@ -216,7 +208,6 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{config_toml_path}:/config.toml:ro", - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", f"{container_ref}", ]) @@ -243,7 +234,6 @@ def test_manifest_installer_customizations(tmp_path, build_container): output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{config_toml_path}:/config.toml:ro", - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--type=anaconda-iso", f"{container_ref}", ]) @@ -297,7 +287,6 @@ def test_mount_ostree_error(tmpdir_factory, build_container): subprocess.check_output([ *testutil.podman_run_common, "-v", f"{output_path}:/output", - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", f"{container_ref}", "--config", "/output/config.json", @@ -317,8 +306,9 @@ def test_manifest_checks_build_container_is_bootc(build_container, container_ref def check_image_ref(): subprocess.check_output([ *testutil.podman_run_common, - f'--entrypoint=["/usr/bin/bootc-image-builder", "manifest", "{container_ref}"]', build_container, + "manifest", + container_ref, ], stderr=subprocess.PIPE, encoding="utf8") if should_error: with pytest.raises(subprocess.CalledProcessError) as exc: @@ -333,7 +323,6 @@ def test_manifest_target_arch_smoke(build_container, tc): # TODO: actually build an image too output = subprocess.check_output([ *testutil.podman_run_common, - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", *tc.bib_rootfs_args(), @@ -386,7 +375,6 @@ def test_manifest_anaconda_module_customizations(tmpdir_factory, build_container output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{output_path}:/output", - "--entrypoint=/usr/bin/bootc-image-builder", build_container, "manifest", "--config", "/output/config.json", diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index e95ec22a..188b4f1f 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -122,7 +122,6 @@ def test_bib_help_hides_config(tmp_path, container_storage, build_fake_container "--security-opt", "label=type:unconfined_t", "-v", f"{container_storage}:/var/lib/containers/storage", "-v", f"{output_path}:/output", - "--entrypoint=/usr/bin/bootc-image-builder", build_fake_container, "manifest", "--help", ], check=True, capture_output=True, text=True) @@ -159,7 +158,6 @@ def test_bib_version(tmp_path, container_storage, build_fake_container): "--security-opt", "label=type:unconfined_t", "-v", f"{container_storage}:/var/lib/containers/storage", "-v", f"{output_path}:/output", - "--entrypoint=/usr/bin/bootc-image-builder", build_fake_container, "version", ], check=True, capture_output=True, text=True) From 7367b9007c033e7b386d1705ea7a83a723993da3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 19 Aug 2024 14:06:10 +0200 Subject: [PATCH 148/279] bib: update to images v0.78 and update for API changes Two changes: - The pathpolicy.PathTri type is no longer exported and it is `pathpolicy.Pathpolicies` instead - The error messages from images for path policy violations changes and need an update (arguable this is not the best layer to test the error details here and instead we should check how much we can move down to images but that is a followup :) --- test/bib/test_manifest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index d38e237c..b89a4b36 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -291,7 +291,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): "manifest", f"{container_ref}", "--config", "/output/config.json", ], stderr=subprocess.PIPE, encoding="utf8") - assert "The following errors occurred while validating custom mountpoints:\npath '/ostree ' is not allowed" \ + assert 'The following errors occurred while validating custom mountpoints:\npath "/ostree" is not allowed' \ in exc.value.stderr From 4d4fa79fff16d96cffc15768ff4f95bc6141b3e8 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 28 Aug 2024 07:16:00 +0200 Subject: [PATCH 149/279] test: disable arm64 test until #691 is fixed Unfortunately ostree is now using openat2 which is currently not supported by qemu-user so the cross-arch install fails. We are looking into this but for now disable the test to unblock other landing as there is nothing bib can do to fix this. --- test/bib/testcases.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 5acfc00b..1157111d 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -74,9 +74,12 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements ] # do a cross arch test too if platform.machine() == "x86_64": - # todo: add fedora:eln - test_cases.append( - TestCaseCentos(image="raw", target_arch="arm64")) + # TODO: re-enable once + # https://github.com/osbuild/bootc-image-builder/issues/619 + # is resolved + # test_cases.append( + # TestCaseCentos(image="raw", target_arch="arm64")) + pass elif platform.machine() == "arm64": # TODO: add arm64->x86_64 cross build test too pass From 472d52571b421d61ecac145148c721a9f1e87b2b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 5 Sep 2024 19:05:12 +0200 Subject: [PATCH 150/279] bib: add `vhd` to supported image types This is the coresponding change to the images PR [0] that adds support to build `vhd` type images directly via bib. [0] https://github.com/osbuild/images/pull/909 --- test/bib/test_build.py | 3 ++- test/bib/testcases.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index ae1db04e..8895a484 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -134,6 +134,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "ami": pathlib.Path(output_path) / "image/disk.raw", "raw": pathlib.Path(output_path) / "image/disk.raw", "vmdk": pathlib.Path(output_path) / "vmdk/disk.vmdk", + "vhd": pathlib.Path(output_path) / "vpc/disk.vhd", "anaconda-iso": pathlib.Path(output_path) / "bootiso/install.iso", } assert len(artifact) == len(set(tc.image for tc in gen_testcases("all"))), \ @@ -453,7 +454,7 @@ def test_iso_installs(image_type): @pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) def test_multi_build_request(images): artifacts = set() - expected = {"disk.qcow2", "disk.raw", "disk.vmdk"} + expected = {"disk.qcow2", "disk.raw", "disk.vhd", "disk.vmdk"} for result in images: filename = os.path.basename(result.img_path) assert result.img_path.exists() diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 1157111d..277bafd7 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -4,10 +4,10 @@ import platform # disk image types can be build from a single manifest -DISK_IMAGE_TYPES = ("qcow2", "raw", "vmdk") +DISK_IMAGE_TYPES = ["qcow2", "raw", "vmdk", "vhd"] # supported images that can be booted in a cloud -CLOUD_BOOT_IMAGE_TYPES = ("ami",) +CLOUD_BOOT_IMAGE_TYPES = ["ami"] @dataclasses.dataclass @@ -88,7 +88,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements return [ klass(image=img) for klass in (TestCaseCentos, TestCaseFedora) - for img in ("ami", "anaconda-iso", "qcow2", "raw", "vmdk") + for img in CLOUD_BOOT_IMAGE_TYPES + DISK_IMAGE_TYPES + ["anaconda-iso"] ] if what == "multidisk": # single test that specifies all image types From 06aff9eb9858dca983bb4edfd7b4bfebedb56235 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 24 Sep 2024 15:26:36 +0200 Subject: [PATCH 151/279] test: add (failing) regression test for issue#655 This adds another layer of defense for issue#655, i.e. have a hand craftet toml that integration tests with bib as a smoke test. --- test/bib/test_manifest.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index b89a4b36..57fd17fd 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -430,6 +430,36 @@ def test_manifest_fs_customizations(tmp_path, build_container, fscustomizations, assert_fs_customizations(fscustomizations, rootfs, output) +def test_manifest_fs_customizations_smoke_toml(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + rootfs = "xfs" + + expected_fs_customizations = { + "/": 10 * 1024 * 1024 * 1024, + "/var/data": 20 * 1024 * 1024 * 1024, + } + + config_toml_path = tmp_path / "config.toml" + config_toml_path.write_text(textwrap.dedent("""\ + [[customizations.filesystem]] + mountpoint = "/" + minsize = "10 GiB" + + [[customizations.filesystem]] + mountpoint = "/var/data" + minsize = "20 GiB" + """)) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_toml_path}:/config.toml:ro", + "--entrypoint=/usr/bin/bootc-image-builder", + build_container, + f"--rootfs={rootfs}", + "manifest", f"{container_ref}", + ]) + assert_fs_customizations(expected_fs_customizations, rootfs, output) + + def assert_fs_customizations(customizations, fstype, manifest): # use the fstab stage to get filesystem types for each mountpoint fstab_stage = find_fstab_stage_from(manifest) From 1431442209e72b9d2b9efa975ad646de33279f78 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 24 Sep 2024 15:58:14 +0200 Subject: [PATCH 152/279] buildconfig: disable tests for undecoded keys for now This commit partially reverts PT#549 to unblock filesystem customizations in bib. This is a short term fix and we should revert and do something smarter like https://github.com/osbuild/images/pull/951 or see if we can do better in the toml unmarshaling. But to unblock toml customizations this is a (IMHO) reasonable first step. Closes: https://github.com/osbuild/bootc-image-builder/issues/655 --- test/bib/test_manifest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 57fd17fd..1978d614 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -435,7 +435,7 @@ def test_manifest_fs_customizations_smoke_toml(tmp_path, build_container): rootfs = "xfs" expected_fs_customizations = { - "/": 10 * 1024 * 1024 * 1024, + "/": 10 * 1024 * 1024 * 1024, "/var/data": 20 * 1024 * 1024 * 1024, } From 78129be70e019d33886026568b29780f631f7068 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 25 Sep 2024 13:04:27 +0200 Subject: [PATCH 153/279] test: move `target-arch-smoke` testcases to Fedora42 (for dnf5) This will give us testing for dnf5 as well. --- test/bib/testcases.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 277bafd7..564af645 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -44,8 +44,8 @@ class TestCaseFedora(TestCase): @dataclasses.dataclass -class TestCaseFedora41(TestCase): - container_ref: str = "quay.io/fedora/fedora-bootc:41" +class TestCaseFedora42(TestCase): + container_ref: str = "quay.io/fedora/fedora-bootc:42" rootfs: str = "btrfs" @@ -103,7 +103,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements return [ TestCaseCentos(target_arch="arm64"), # TODO: merge with TestCaseFedora once the arches are build there - TestCaseFedora41(target_arch="ppc64le"), - TestCaseFedora41(target_arch="s390x"), + TestCaseFedora42(target_arch="ppc64le"), + TestCaseFedora42(target_arch="s390x"), ] raise ValueError(f"unknown test-case type {what}") From d611f8ca8396997d0ca59345a28caeeded30c006 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 12 Sep 2024 12:31:33 +0200 Subject: [PATCH 154/279] bib: add a new `gce` image type This commit adds a new image type `gce` that contains a tar file with the raw image inside. This can then be imported into GCE. --- test/bib/test_build.py | 3 ++- test/bib/testcases.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 8895a484..74b316a5 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -135,6 +135,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "raw": pathlib.Path(output_path) / "image/disk.raw", "vmdk": pathlib.Path(output_path) / "vmdk/disk.vmdk", "vhd": pathlib.Path(output_path) / "vpc/disk.vhd", + "gce": pathlib.Path(output_path) / "gce/image.tgz", "anaconda-iso": pathlib.Path(output_path) / "bootiso/install.iso", } assert len(artifact) == len(set(tc.image for tc in gen_testcases("all"))), \ @@ -454,7 +455,7 @@ def test_iso_installs(image_type): @pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) def test_multi_build_request(images): artifacts = set() - expected = {"disk.qcow2", "disk.raw", "disk.vhd", "disk.vmdk"} + expected = {"disk.qcow2", "disk.raw", "disk.vhd", "disk.vmdk", "image.tgz"} for result in images: filename = os.path.basename(result.img_path) assert result.img_path.exists() diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 564af645..c26b9123 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -4,7 +4,7 @@ import platform # disk image types can be build from a single manifest -DISK_IMAGE_TYPES = ["qcow2", "raw", "vmdk", "vhd"] +DISK_IMAGE_TYPES = ["qcow2", "raw", "vmdk", "vhd", "gce"] # supported images that can be booted in a cloud CLOUD_BOOT_IMAGE_TYPES = ["ami"] From fd68746602f7c7536f39583c71c8346f269691c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Mart=C3=ADn?= Date: Wed, 9 Oct 2024 08:27:59 +0200 Subject: [PATCH 155/279] test: add a test to verify libosinfo detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a test to verify the anaconda ISO label is properly detected by libosinfo. Signed-off-by: Miguel Martín --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/test_build.py | 30 +++++++++++++++++++++++------ test/bib/testcases.py | 3 +++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 0df676cf..1c9b1308 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -79,7 +79,7 @@ jobs: - name: Install test dependencies run: | sudo apt update - sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static pylint + sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static pylint libosinfo-bin - name: Diskspace (before) run: | df -h diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 74b316a5..449ace58 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -35,6 +35,7 @@ class ImageBuildResult(NamedTuple): img_type: str img_path: str img_arch: str + osinfo_template: str container_ref: str rootfs: str username: str @@ -162,9 +163,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): journal_output = journal_log_path.read_text(encoding="utf8") bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( - image_type, generated_img, tc.target_arch, tc.container_ref, tc.rootfs, - username, password, ssh_keyfile_private_path, - kargs, bib_output, journal_output)) + image_type, generated_img, tc.target_arch, tc.osinfo_template, + tc.container_ref, tc.rootfs, username, password, + ssh_keyfile_private_path, kargs, bib_output, journal_output)) # generate new keyfile if not ssh_keyfile_private_path.exists(): @@ -297,9 +298,9 @@ def del_ami(): results = [] for image_type in image_types: results.append(ImageBuildResult( - image_type, artifact[image_type], tc.target_arch, tc.container_ref, tc.rootfs, - username, password, ssh_keyfile_private_path, - kargs, bib_output, journal_output, metadata)) + image_type, artifact[image_type], tc.target_arch, tc.osinfo_template, + tc.container_ref, tc.rootfs, username, password, + ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results # Try to cache as much as possible @@ -452,6 +453,23 @@ def test_iso_installs(image_type): assert_kernel_args(vm, image_type) +@pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_os_detection(image_type): + installer_iso_path = image_type.img_path + arch = image_type.img_arch + if not arch: + arch = platform.machine() + osinfo = image_type.osinfo_template.format(arch=arch) + result = subprocess.run([ + "osinfo-detect", + installer_iso_path, + ], capture_output=True, text=True, check=True) + osinfo_output = result.stdout + expected_output = f"Media is bootable.\nMedia is an installer for OS '{osinfo}'\n" + assert osinfo_output == expected_output + + @pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) def test_multi_build_request(images): artifacts = set() diff --git a/test/bib/testcases.py b/test/bib/testcases.py index c26b9123..f6425d5e 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -41,12 +41,14 @@ def __str__(self): class TestCaseFedora(TestCase): container_ref: str = "quay.io/fedora/fedora-bootc:40" rootfs: str = "btrfs" + osinfo_template: str = "Fedora Server 40 ({arch})" @dataclasses.dataclass class TestCaseFedora42(TestCase): container_ref: str = "quay.io/fedora/fedora-bootc:42" rootfs: str = "btrfs" + osinfo_template: str = "Fedora Server 42 ({arch})" @dataclasses.dataclass @@ -54,6 +56,7 @@ class TestCaseCentos(TestCase): container_ref: str = os.getenv( "BIB_TEST_BOOTC_CONTAINER_TAG", "quay.io/centos-bootc/centos-bootc:stream9") + osinfo_template: str = "CentOS Stream 9 ({arch})" def gen_testcases(what): # pylint: disable=too-many-return-statements From bc3ac9175926d4ba960b1b4fa8d8cc9691498abe Mon Sep 17 00:00:00 2001 From: Matthieu Bernardin Date: Thu, 17 Oct 2024 10:42:23 +0200 Subject: [PATCH 156/279] test: GCE output ends with .tar.gz --- test/bib/test_build.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 449ace58..e6517538 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -136,7 +136,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): "raw": pathlib.Path(output_path) / "image/disk.raw", "vmdk": pathlib.Path(output_path) / "vmdk/disk.vmdk", "vhd": pathlib.Path(output_path) / "vpc/disk.vhd", - "gce": pathlib.Path(output_path) / "gce/image.tgz", + "gce": pathlib.Path(output_path) / "gce/image.tar.gz", "anaconda-iso": pathlib.Path(output_path) / "bootiso/install.iso", } assert len(artifact) == len(set(tc.image for tc in gen_testcases("all"))), \ @@ -473,7 +473,7 @@ def test_iso_os_detection(image_type): @pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) def test_multi_build_request(images): artifacts = set() - expected = {"disk.qcow2", "disk.raw", "disk.vhd", "disk.vmdk", "image.tgz"} + expected = {"disk.qcow2", "disk.raw", "disk.vhd", "disk.vmdk", "image.tar.gz"} for result in images: filename = os.path.basename(result.img_path) assert result.img_path.exists() From ce33c1cd6f343f4d1d4de92d6d6ce2d124d592cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Mart=C3=ADn?= Date: Tue, 22 Oct 2024 13:35:53 +0200 Subject: [PATCH 157/279] test: add anaconda-iso build tests with signed containers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add anaconda-iso iso build tests with signed containers. The rest of the images can be also added to the test once [1] and [2] are merged [1] https://github.com/osbuild/images/pull/990 [2] https://github.com/osbuild/osbuild/pull/1906 Signed-off-by: Miguel Martín --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/test_build.py | 197 ++++++++++++++++++++++++++-- test/bib/testcases.py | 10 +- test/bib/testutil.py | 10 ++ 4 files changed, 205 insertions(+), 14 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 1c9b1308..95d34823 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -93,7 +93,7 @@ jobs: echo "deb $sources_url/ /" | sudo tee /etc/apt/sources.list.d/devel-kubic-libcontainers-unstable.list curl -fsSL $key_url | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/devel_kubic_libcontainers_unstable.gpg > /dev/null sudo apt update - sudo apt install -y podman + sudo apt install -y podman skopeo - name: Install python test deps run: | # make sure test deps are available for root diff --git a/test/bib/test_build.py b/test/bib/test_build.py index e6517538..771eadf1 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -11,6 +11,7 @@ import uuid from contextlib import contextmanager from typing import NamedTuple +from dataclasses import dataclass import pytest # local test utils @@ -47,14 +48,169 @@ class ImageBuildResult(NamedTuple): metadata: dict = {} +@dataclass +class GPGConf: + email: str + key_length: str + home_dir: str + pub_key_file: str + key_params: str + + +@dataclass +class RegistryConf: + local_registry: str + sigstore_dir: str + registries_d_dir: str + policy_file: str + lookaside_conf_file: str + lookaside_conf: str + + @pytest.fixture(name="shared_tmpdir", scope='session') def shared_tmpdir_fixture(tmpdir_factory): tmp_path = pathlib.Path(tmpdir_factory.mktemp("shared")) yield tmp_path +@pytest.fixture(name="gpg_conf", scope='session') +def gpg_conf_fixture(shared_tmpdir): + key_params_tmpl = """ + %no-protection + Key-Type: RSA + Key-Length: {key_length} + Key-Usage: sign + Name-Real: Bootc Image Builder Tests + Name-Email: {email} + Expire-Date: 0 + """ + email = "bib-tests@redhat.com" + key_length = "3072" + home_dir = f"{shared_tmpdir}/.gnupg" + pub_key_file = f"{shared_tmpdir}/GPG-KEY-bib-tests" + key_params = key_params_tmpl.format(key_length=key_length, email=email) + + os.makedirs(home_dir, mode=0o700, exist_ok=False) + subprocess.run( + ["gpg", "--gen-key", "--batch"], + check=True, env={"GNUPGHOME": home_dir}, + input=key_params, + text=True) + subprocess.run( + ["gpg", "--output", pub_key_file, + "--armor", "--export", email], + check=True, env={"GNUPGHOME": home_dir}) + + yield GPGConf(email=email, home_dir=home_dir, + key_length=key_length, pub_key_file=pub_key_file, key_params=key_params) + + +@pytest.fixture(name="registry_conf", scope='session') +def registry_conf_fixture(shared_tmpdir, request): + lookaside_conf_tmpl = """ + docker: + {local_registry}: + lookaside: file:///{sigstore_dir} + """ + registry_port = testutil.get_free_port() + # We cannot use localhost as we need to access the registry from both + # the host system and the bootc-image-builder container. + default_ip = testutil.get_ip_from_default_route() + local_registry = f"{default_ip}:{registry_port}" + sigstore_dir = f"{shared_tmpdir}/sigstore" + registries_d_dir = f"{shared_tmpdir}/registries.d" + policy_file = f"{shared_tmpdir}/policy.json" + lookaside_conf_file = f"{registries_d_dir}/lookaside.yaml" + lookaside_conf = lookaside_conf_tmpl.format( + local_registry=local_registry, + sigstore_dir=sigstore_dir + ) + os.makedirs(registries_d_dir, mode=0o700, exist_ok=True) + os.makedirs(sigstore_dir, mode=0o700, exist_ok=True) + + registry_container_name = f"registry_{registry_port}" + + registry_container_running = subprocess.run([ + "podman", "ps", "-a", "--filter", f"name={registry_container_name}", "--format", "{{.Names}}" + ], check=True, capture_output=True, text=True).stdout.strip() + if registry_container_running != registry_container_name: + subprocess.run([ + "podman", "run", "-d", + "-p", f"{registry_port}:5000", + "--restart", "always", + "--name", registry_container_name, + "registry:2" + ], check=True) + + registry_container_state = subprocess.run([ + "podman", "ps", "-a", "--filter", f"name={registry_container_name}", "--format", "{{.State}}" + ], check=True, capture_output=True, text=True).stdout.strip() + + if registry_container_state in ("paused", "exited"): + subprocess.run([ + "podman", "start", registry_container_name + ], check=True) + + def remove_registry(): + subprocess.run([ + "podman", "rm", "--force", registry_container_name + ], check=True) + + request.addfinalizer(remove_registry) + yield RegistryConf( + local_registry=local_registry, + sigstore_dir=sigstore_dir, + registries_d_dir=registries_d_dir, + policy_file=policy_file, + lookaside_conf=lookaside_conf, + lookaside_conf_file=lookaside_conf_file, + ) + + +def get_signed_container_ref(local_registry: str, container_ref: str): + container_ref_path = container_ref[container_ref.index('/'):] + return f"{local_registry}{container_ref_path}" + + +def sign_container_image(gpg_conf: GPGConf, registry_conf: RegistryConf, container_ref): + registry_policy = { + "default": [{"type": "insecureAcceptAnything"}], + "transports": { + "docker": { + f"{registry_conf.local_registry}": [ + { + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": f"{gpg_conf.pub_key_file}" + } + ] + }, + "docker-daemon": { + "": [{"type": "insecureAcceptAnything"}] + } + } + } + with open(registry_conf.policy_file, mode="w", encoding="utf-8") as f: + f.write(json.dumps(registry_policy)) + + with open(registry_conf.lookaside_conf_file, mode="w", encoding="utf-8") as f: + f.write(registry_conf.lookaside_conf) + + signed_container_ref = get_signed_container_ref(registry_conf.local_registry, container_ref) + cmd = [ + "skopeo", "--registries.d", registry_conf.registries_d_dir, + "copy", "--dest-tls-verify=false", "--remove-signatures", + "--sign-by", gpg_conf.email, + f"docker://{container_ref}", + f"docker://{signed_container_ref}", + ] + print(cmd) + subprocess.run(cmd, check=True, env={"GNUPGHOME": gpg_conf.home_dir}) + + @pytest.fixture(name="image_type", scope="session") -def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload): +# pylint: disable=too-many-arguments +def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload, gpg_conf, registry_conf): """ Build an image inside the passed build_container and return an ImageBuildResult with the resulting image path and user/password @@ -76,24 +232,27 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload f"containers-storage:[overlay@/var/lib/containers/storage+/run/containers/storage]{cont_tag}" ]) - with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: + with build_images(shared_tmpdir, build_container, + request, force_aws_upload, gpg_conf, registry_conf) as build_results: yield build_results[0] @pytest.fixture(name="images", scope="session") -def images_fixture(shared_tmpdir, build_container, request, force_aws_upload): +# pylint: disable=too-many-arguments +def images_fixture(shared_tmpdir, build_container, request, force_aws_upload, gpg_conf, registry_conf): """ Build one or more images inside the passed build_container and return an ImageBuildResult array with the resulting image path and user/password """ - with build_images(shared_tmpdir, build_container, request, force_aws_upload) as build_results: + with build_images(shared_tmpdir, build_container, + request, force_aws_upload, gpg_conf, registry_conf) as build_results: yield build_results # XXX: refactor -# pylint: disable=too-many-locals,too-many-branches,too-many-statements +# pylint: disable=too-many-locals,too-many-branches,too-many-statements,too-many-arguments @contextmanager -def build_images(shared_tmpdir, build_container, request, force_aws_upload): +def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_conf, registry_conf): """ Build all available image types if necessary and return the results for the image types that were requested via :request:. @@ -113,11 +272,16 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): password = "password" kargs = "systemd.journald.forward_to_console=1" + container_ref = tc.container_ref + + if tc.sign: + container_ref = get_signed_container_ref(registry_conf.local_registry, tc.container_ref) + # params can be long and the qmp socket (that has a limit of 100ish # AF_UNIX) is derived from the path # hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to # different image type combinations - output_path = shared_tmpdir / format(abs(hash(tc.container_ref + str(tc.target_arch))), "x") + output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.target_arch))), "x") output_path.mkdir(exist_ok=True) # make sure that the test store exists, because podman refuses to start if the source directory for a volume @@ -164,7 +328,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( image_type, generated_img, tc.target_arch, tc.osinfo_template, - tc.container_ref, tc.rootfs, username, password, + container_ref, tc.rootfs, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) # generate new keyfile @@ -257,15 +421,26 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload): if tc.local: cmd.extend(["-v", "/var/lib/containers/storage:/var/lib/containers/storage"]) + if tc.sign: + sign_container_image(gpg_conf, registry_conf, tc.container_ref) + signed_image_args = [ + "-v", f"{registry_conf.policy_file}:/etc/containers/policy.json", + "-v", f"{registry_conf.lookaside_conf_file}:/etc/containers/registries.d/bib-lookaside.yaml", + "-v", f"{registry_conf.sigstore_dir}:{registry_conf.sigstore_dir}", + "-v", f"{gpg_conf.pub_key_file}:{gpg_conf.pub_key_file}", + ] + cmd.extend(signed_image_args) + cmd.extend([ *creds_args, build_container, - tc.container_ref, + container_ref, *types_arg, *upload_args, *target_arch_args, *tc.bib_rootfs_args(), "--local" if tc.local else "--local=false", + "--tls-verify=false" if tc.sign else "--tls-verify=true" ]) # print the build command for easier tracing @@ -299,7 +474,7 @@ def del_ami(): for image_type in image_types: results.append(ImageBuildResult( image_type, artifact[image_type], tc.target_arch, tc.osinfo_template, - tc.container_ref, tc.rootfs, username, password, + container_ref, tc.rootfs, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results @@ -316,7 +491,7 @@ def del_ami(): img.unlink() else: print("does not exist") - subprocess.run(["podman", "rmi", tc.container_ref], check=False) + subprocess.run(["podman", "rmi", container_ref], check=False) return diff --git a/test/bib/testcases.py b/test/bib/testcases.py index f6425d5e..8b0ef21c 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -23,6 +23,8 @@ class TestCase: # rootfs to use (e.g. ext4), some containers like fedora do not # have a default rootfs. If unset the container default is used. rootfs: str = "" + # Sign the container_ref and use the new signed image instead of the original one + sign: bool = False def bib_rootfs_args(self): if self.rootfs: @@ -31,7 +33,7 @@ def bib_rootfs_args(self): def __str__(self): return ",".join([ - attr + f"{name}={attr}" for name, attr in inspect.getmembers(self) if not name.startswith("_") and not callable(attr) and attr ]) @@ -68,7 +70,11 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements if what == "ami-boot": return [TestCaseCentos(image="ami"), TestCaseFedora(image="ami")] if what == "anaconda-iso": - return [TestCaseCentos(image="anaconda-iso"), TestCaseFedora(image="anaconda-iso")] + return [ + TestCaseFedora(image="anaconda-iso", sign=True), + TestCaseCentos(image="anaconda-iso"), + TestCaseFedora(image="anaconda-iso"), + ] if what == "qemu-boot": test_cases = [ klass(image=img) diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 6033327b..b853c613 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -147,3 +147,13 @@ def create_filesystem_customizations(rootfs: str): "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", ] + + +def get_ip_from_default_route(): + default_route = subprocess.run([ + "ip", + "route", + "list", + "default" + ], check=True, capture_output=True, text=True).stdout + return default_route.split()[8] From bec59129779df5d23f241fc0a52f0626d8ce23e8 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 17 Oct 2024 14:59:35 +0200 Subject: [PATCH 158/279] test: fix the testcase strings to not include `osinfo_template` This commit fixes the testcase string representation to not include the `osinfo_template` string and adds a test case to not accidentally regress here. Currently our test case strings look like: ``` ... ``` which is an accident. A testcase should ideally only contain what Instead of putting the osinfo_template into the testcase itself, have a function that generates it. It has downsides (now the osinfo_template is more disconnected from the testcase) so we could move it also back into testcase or we could exclude osinfo_template from the string generation. Ideas welcome here, my current approach feels okay but not like it's perfect yet. --- test/bib/test_build.py | 17 ++++++++++++----- test/bib/testcases.py | 13 ++++++++++--- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 771eadf1..7c94616d 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -36,7 +36,6 @@ class ImageBuildResult(NamedTuple): img_type: str img_path: str img_arch: str - osinfo_template: str container_ref: str rootfs: str username: str @@ -327,7 +326,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ journal_output = journal_log_path.read_text(encoding="utf8") bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( - image_type, generated_img, tc.target_arch, tc.osinfo_template, + image_type, generated_img, tc.target_arch, container_ref, tc.rootfs, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) @@ -473,7 +472,7 @@ def del_ami(): results = [] for image_type in image_types: results.append(ImageBuildResult( - image_type, artifact[image_type], tc.target_arch, tc.osinfo_template, + image_type, artifact[image_type], tc.target_arch, container_ref, tc.rootfs, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results @@ -628,6 +627,15 @@ def test_iso_installs(image_type): assert_kernel_args(vm, image_type) +def osinfo_for(it: ImageBuildResult, arch: str) -> str: + if it.container_ref.endswith("/centos-bootc/centos-bootc:stream9"): + return f"CentOS Stream 9 ({arch})" + if "/fedora/fedora-bootc:" in it.container_ref: + ver = it.container_ref.rsplit(":", maxsplit=1)[1] + return f"Fedora Server {ver} ({arch})" + raise ValueError(f"unknown osinfo string for '{it.container_ref}'") + + @pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") @pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) def test_iso_os_detection(image_type): @@ -635,13 +643,12 @@ def test_iso_os_detection(image_type): arch = image_type.img_arch if not arch: arch = platform.machine() - osinfo = image_type.osinfo_template.format(arch=arch) result = subprocess.run([ "osinfo-detect", installer_iso_path, ], capture_output=True, text=True, check=True) osinfo_output = result.stdout - expected_output = f"Media is bootable.\nMedia is an installer for OS '{osinfo}'\n" + expected_output = f"Media is bootable.\nMedia is an installer for OS '{osinfo_for(image_type, arch)}'\n" assert osinfo_output == expected_output diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 8b0ef21c..80b2b9ee 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -43,14 +43,12 @@ def __str__(self): class TestCaseFedora(TestCase): container_ref: str = "quay.io/fedora/fedora-bootc:40" rootfs: str = "btrfs" - osinfo_template: str = "Fedora Server 40 ({arch})" @dataclasses.dataclass class TestCaseFedora42(TestCase): container_ref: str = "quay.io/fedora/fedora-bootc:42" rootfs: str = "btrfs" - osinfo_template: str = "Fedora Server 42 ({arch})" @dataclasses.dataclass @@ -58,7 +56,16 @@ class TestCaseCentos(TestCase): container_ref: str = os.getenv( "BIB_TEST_BOOTC_CONTAINER_TAG", "quay.io/centos-bootc/centos-bootc:stream9") - osinfo_template: str = "CentOS Stream 9 ({arch})" + + +def test_testcase_nameing(): + """ + Ensure the testcase naming does not change without us knowing as those + are visible when running "pytest --collect-only" + """ + tc = TestCaseFedora() + expected = "container_ref=quay.io/fedora/fedora-bootc:40,rootfs=btrfs" + assert f"{tc}" == expected, f"{tc} != {expected}" def gen_testcases(what): # pylint: disable=too-many-return-statements From bcfac5fd446b9f571e662e7be48327f91debe499 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 12 Nov 2024 08:35:29 +0100 Subject: [PATCH 159/279] tests: drop (unsigned) fedora ISO testcase This commit drops the "unsigned" fedora ISO genartion testcases. We also test unsigned ISO generation via centos-9 and we test the fedora ISO generation via the "signed" fedora ISO test so running this specific test gives us little and it is also one of the most "expensive" tests we run (around 10min). --- test/bib/testcases.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 80b2b9ee..3adfa768 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -80,7 +80,6 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements return [ TestCaseFedora(image="anaconda-iso", sign=True), TestCaseCentos(image="anaconda-iso"), - TestCaseFedora(image="anaconda-iso"), ] if what == "qemu-boot": test_cases = [ From 40c731ead659afb13f6616e83b001e82e7fef76e Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 11 Nov 2024 10:54:52 +0100 Subject: [PATCH 160/279] workflow: move workflow to run on ubuntu-24.04 This commit moves the GH runner to move to 24.04. This means we no longer need a extra repository to get a current version of podman and it generally means our software stack is more modern and closer to what version people run. Note that this also drops "podman,skopeo" as those are preinstalled. Unfortuantely the default 24.04 podman configuration is buggy and a workaround is needed. --- .github.com/workflows/bibtests.yaml | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 95d34823..73d576f4 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -67,7 +67,7 @@ jobs: integration: # TODO: run this also via tmt/testing-farm name: "Integration" - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 with: @@ -79,21 +79,17 @@ jobs: - name: Install test dependencies run: | sudo apt update - sudo apt install -y podman python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static pylint libosinfo-bin + sudo apt install -y python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static pylint libosinfo-bin - name: Diskspace (before) run: | df -h sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh - - name: Update podman - run: | - # from https://askubuntu.com/questions/1414446/whats-the-recommended-way-of-installing-podman-4-in-ubuntu-22-04 - ubuntu_version='22.04' - key_url="https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}/Release.key" - sources_url="https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${ubuntu_version}" - echo "deb $sources_url/ /" | sudo tee /etc/apt/sources.list.d/devel-kubic-libcontainers-unstable.list - curl -fsSL $key_url | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/devel_kubic_libcontainers_unstable.gpg > /dev/null - sudo apt update - sudo apt install -y podman skopeo + - name: Workaround podman issues in GH actions + run: | + # see https://github.com/osbuild/bootc-image-builder/issues/446 + sudo rm -rf /var/lib/containers/storage + sudo mkdir -p /etc/containers + echo -e "[storage]\ndriver = \"overlay\"\nrunroot = \"/run/containers/storage\"\ngraphroot = \"/var/lib/containers/storage\"" | sudo tee /etc/containers/storage.conf - name: Install python test deps run: | # make sure test deps are available for root From 9bfd3f60f7ee80d60673bc965a8fa375392f4a67 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 12 Nov 2024 09:03:36 +0100 Subject: [PATCH 161/279] test: add test that ensures fips customizations work This commit adds a quick manifest level test that ensures that the FIPS customizations set for the iso installer are actually set in the manifest. --- test/bib/test_manifest.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 1978d614..f28d005b 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -518,3 +518,35 @@ def test_manifest_fs_customizations_xarch(tmp_path, build_container, fscustomiza # cross-arch builds only support ext4 (for now) assert_fs_customizations(fscustomizations, "ext4", output) + + +def find_grub2_iso_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + for st in pipl["stages"]: + if st["type"] == "org.osbuild.grub2.iso": + return st + raise ValueError(f"cannot find grub2.iso stage in manifest:\n{manifest_str}") + + +def test_manifest_fips_customization(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "fips": True, + }, + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + # XXX: test for qcow2 too + "--type=anaconda-iso", + "manifest", f"{container_ref}", + ], text=True) + st = find_grub2_iso_stage_from(output) + assert "fips=1" in st["options"]["kernel"]["opts"] From 070799ca8ad2e3644d0d6e0a800825f93bfcdbbc Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 21 Nov 2024 11:05:15 +0100 Subject: [PATCH 162/279] bib: extract new partition table helpers and add new DiskCustomization This commit makes use of the excellent work in https://github.com/osbuild/images/pull/1041 and wires up support to generate LVM and btrfs volumes via the new disk customizations. It also add tests. --- test/bib/test_manifest.py | 80 +++++++++++++++++++++++++++++++++++++++ test/bib/testcases.py | 12 +++++- 2 files changed, 90 insertions(+), 2 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index f28d005b..8badfd9e 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -550,3 +550,83 @@ def test_manifest_fips_customization(tmp_path, build_container): ], text=True) st = find_grub2_iso_stage_from(output) assert "fips=1" in st["options"]["kernel"]["opts"] + + +def find_bootc_install_to_fs_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipeline in manifest["pipelines"]: + # the fstab stage in cross-arch manifests is in the "ostree-deployment" pipeline + if pipeline["name"] == "image": + for st in pipeline["stages"]: + if st["type"] == "org.osbuild.bootc.install-to-filesystem": + return st + raise ValueError(f"cannot find bootc.install-to-filesystem stage in manifest:\n{manifest_str}") + + +def test_manifest_disk_customization_lvm(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "disk": { + "partitions": [ + { + "type": "lvm", + "logical_volumes": [ + { + "fs_type": "ext4", + "mountpoint": "/", + } + ] + } + ] + } + } + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + "manifest", f"{container_ref}", + ]) + st = find_bootc_install_to_fs_stage_from(output) + assert st["devices"]["rootlv"]["type"] == "org.osbuild.lvm2.lv" + + +def test_manifest_disk_customization_btrfs(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "disk": { + "partitions": [ + { + "type": "btrfs", + "subvolumes": [ + { + "name": "root", + "mountpoint": "/", + } + ] + } + ] + } + } + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + "manifest", f"{container_ref}", + ]) + st = find_bootc_install_to_fs_stage_from(output) + assert st["mounts"][0]["type"] == "org.osbuild.btrfs" + assert st["mounts"][0]["target"] == "/" diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 3adfa768..0cf51e1d 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -25,6 +25,8 @@ class TestCase: rootfs: str = "" # Sign the container_ref and use the new signed image instead of the original one sign: bool = False + # use special partition_mode like "lvm" + partition_mode: str = "" def bib_rootfs_args(self): if self.rootfs: @@ -82,11 +84,17 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements TestCaseCentos(image="anaconda-iso"), ] if what == "qemu-boot": + # test partition defaults with qcow2 test_cases = [ - klass(image=img) + klass(image="qcow2") for klass in (TestCaseCentos, TestCaseFedora) - for img in ("raw", "qcow2") ] + # and custom with raw (this is arbitrary, we could do it the + # other way around too + test_cases.append( + TestCaseCentos(image="raw", partition_mode="lvm")) + test_cases.append( + TestCaseFedora(image="raw", partition_mode="btrfs")) # do a cross arch test too if platform.machine() == "x86_64": # TODO: re-enable once From 60fc846c2bd57a95c07b4d4c3c000804cdd310d3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 21 Nov 2024 12:59:47 +0100 Subject: [PATCH 163/279] test: add disk-customization test This commit adds a new integration test that checks for the disk customizations. It exchanges some of the filesystem tests with disk customization tests. --- test/bib/test_build.py | 72 +++++++++++++++++++++++++++------------ test/bib/test_manifest.py | 3 ++ test/bib/testcases.py | 8 ++--- test/bib/testutil.py | 53 +++++++++++++++++++++++++--- 4 files changed, 106 insertions(+), 30 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 7c94616d..7c8ffd13 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -38,6 +38,7 @@ class ImageBuildResult(NamedTuple): img_arch: str container_ref: str rootfs: str + disk_config: str username: str password: str ssh_keyfile_private_path: str @@ -203,7 +204,6 @@ def sign_container_image(gpg_conf: GPGConf, registry_conf: RegistryConf, contain f"docker://{container_ref}", f"docker://{signed_container_ref}", ] - print(cmd) subprocess.run(cmd, check=True, env={"GNUPGHOME": gpg_conf.home_dir}) @@ -280,7 +280,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ # AF_UNIX) is derived from the path # hash the container_ref+target_arch, but exclude the image_type so that the output path is shared between calls to # different image type combinations - output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.target_arch))), "x") + output_path = shared_tmpdir / format(abs(hash(container_ref + str(tc.disk_config) + str(tc.target_arch))), "x") output_path.mkdir(exist_ok=True) # make sure that the test store exists, because podman refuses to start if the source directory for a volume @@ -327,7 +327,8 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( image_type, generated_img, tc.target_arch, - container_ref, tc.rootfs, username, password, + container_ref, tc.rootfs, tc.disk_config, + username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) # generate new keyfile @@ -368,12 +369,14 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ "groups": ["wheel"], }, ], - "filesystem": testutil.create_filesystem_customizations(tc.rootfs), "kernel": { "append": kargs, }, }, } + testutil.maybe_create_filesystem_customizations(cfg, tc) + testutil.maybe_create_disk_customizations(cfg, tc) + print(f"config for {output_path} {tc=}: {cfg=}") config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(cfg), encoding="utf-8") @@ -473,7 +476,8 @@ def del_ami(): for image_type in image_types: results.append(ImageBuildResult( image_type, artifact[image_type], tc.target_arch, - container_ref, tc.rootfs, username, password, + container_ref, tc.rootfs, tc.disk_config, + username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results @@ -532,19 +536,10 @@ def test_image_boots(image_type): # XXX: read the fully yaml instead? assert f"image: {image_type.container_ref}" in output - # check the minsize specified in the build configuration for each mountpoint against the sizes in the image - # TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint - exit_status, output = test_vm.run("df --output=target,size", user="root", - keyfile=image_type.ssh_keyfile_private_path) - assert exit_status == 0 - # parse the output of 'df' to a mountpoint -> size dict for convenience - mountpoint_sizes = {} - for line in output.splitlines()[1:]: - fields = line.split() - # Note that df output is in 1k blocks, not bytes - mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes - - assert_fs_customizations(image_type, mountpoint_sizes) + if image_type.disk_config: + assert_disk_customizations(image_type, test_vm) + else: + assert_fs_customizations(image_type, test_vm) @pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) @@ -663,17 +658,52 @@ def test_multi_build_request(images): assert artifacts == expected -def assert_fs_customizations(image_type, mountpoint_sizes): +def assert_fs_customizations(image_type, test_vm): """ Asserts that each mountpoint that appears in the build configuration also appears in mountpoint_sizes. TODO: assert that the size of each filesystem (or partition) also matches the expected size based on the customization. """ - fs_customizations = testutil.create_filesystem_customizations(image_type.rootfs) - for fs in fs_customizations: + # check the minsize specified in the build configuration for each mountpoint against the sizes in the image + # TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint + exit_status, output = test_vm.run("df --output=target,size", user="root", + keyfile=image_type.ssh_keyfile_private_path) + assert exit_status == 0 + # parse the output of 'df' to a mountpoint -> size dict for convenience + mountpoint_sizes = {} + for line in output.splitlines()[1:]: + fields = line.split() + # Note that df output is in 1k blocks, not bytes + mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes + + cfg = { + "customizations": {}, + } + testutil.maybe_create_filesystem_customizations(cfg, image_type) + for fs in cfg["customizations"]["filesystem"]: mountpoint = fs["mountpoint"] if mountpoint == "/": # / is actually /sysroot mountpoint = "/sysroot" assert mountpoint in mountpoint_sizes + + +def assert_disk_customizations(image_type, test_vm): + exit_status, output = test_vm.run("findmnt --json", user="root", + keyfile=image_type.ssh_keyfile_private_path) + assert exit_status == 0 + findmnt = json.loads(output) + if dc := image_type.disk_config: + if dc == "lvm": + mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] + if mnt["target"] == "/sysroot"] + assert len(mnts) == 1 + assert "/dev/mapper/vg00-rootlv" == mnts[0]["source"] + elif dc == "btrfs": + mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] + if mnt["target"] == "/sysroot"] + assert len(mnts) == 1 + assert "btrfs" == mnts[0]["fstype"] + # ensure sysroot comes from the "root" subvolume + assert mnts[0]["source"].endswith("[/root]") diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 8badfd9e..c869bdb4 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -572,8 +572,10 @@ def test_manifest_disk_customization_lvm(tmp_path, build_container): "partitions": [ { "type": "lvm", + "minsize": "10 GiB", "logical_volumes": [ { + "minsize": "10 GiB", "fs_type": "ext4", "mountpoint": "/", } @@ -606,6 +608,7 @@ def test_manifest_disk_customization_btrfs(tmp_path, build_container): "partitions": [ { "type": "btrfs", + "minsize": "10 GiB", "subvolumes": [ { "name": "root", diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 0cf51e1d..ed0ee45e 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -25,8 +25,8 @@ class TestCase: rootfs: str = "" # Sign the container_ref and use the new signed image instead of the original one sign: bool = False - # use special partition_mode like "lvm" - partition_mode: str = "" + # use special disk_config like "lvm" + disk_config: str = "" def bib_rootfs_args(self): if self.rootfs: @@ -92,9 +92,9 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements # and custom with raw (this is arbitrary, we could do it the # other way around too test_cases.append( - TestCaseCentos(image="raw", partition_mode="lvm")) + TestCaseCentos(image="raw", disk_config="lvm")) test_cases.append( - TestCaseFedora(image="raw", partition_mode="btrfs")) + TestCaseFedora(image="raw", disk_config="btrfs")) # do a cross arch test too if platform.machine() == "x86_64": # TODO: re-enable once diff --git a/test/bib/testutil.py b/test/bib/testutil.py index b853c613..8ee8682d 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -109,18 +109,21 @@ def deregister_ami(ami_id): print(f"Error {err_code}: {err_msg}") -def create_filesystem_customizations(rootfs: str): - if rootfs == "btrfs": +def maybe_create_filesystem_customizations(cfg, tc): + # disk_config and filesystem_customization are mutually exclusive + if tc.disk_config: + return + if tc.rootfs == "btrfs": # only minimal customizations are supported for btrfs currently - return [ + cfg["customizations"]["filesystem"] = [ { "mountpoint": "/", "minsize": "12 GiB" }, ] - + return # add some custom mountpoints - return [ + cfg["customizations"]["filesystem"] = [ { "mountpoint": "/", "minsize": "12 GiB" @@ -140,6 +143,46 @@ def create_filesystem_customizations(rootfs: str): ] +def maybe_create_disk_customizations(cfg, tc): + if not tc.disk_config: + return + if tc.disk_config == "lvm": + cfg["customizations"]["disk"] = { + "partitions": [ + { + "type": "lvm", + # XXX: why is this minsize also needed? should we derrive + # it from the LVs ? + "minsize": "10 GiB", + "logical_volumes": [ + { + "minsize": "10 GiB", + "fs_type": "xfs", + "mountpoint": "/", + } + ] + } + ] + } + elif tc.disk_config == "btrfs": + cfg["customizations"]["disk"] = { + "partitions": [ + { + "type": "btrfs", + "minsize": "10 GiB", + "subvolumes": [ + { + "name": "varlog", + "mountpoint": "/var/log", + } + ] + } + ] + } + else: + raise ValueError(f"unsupported disk_config {tc.disk_config}") + + # podman_run_common has the common prefix for the podman run invocations podman_run_common = [ "podman", "run", "--rm", From 89e3d2687d6fffdd04a065859d79013f48af52e6 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 5 Dec 2024 11:51:31 +0100 Subject: [PATCH 164/279] test: add integration test for the new `swap` customization This commit adds an integration test for the new swap disk customization that got added to the `images` library in PR https://github.com/osbuild/images/pull/1072 Also extends the LVM test to include swap on lvm. --- test/bib/test_build.py | 7 +++ test/bib/test_manifest.py | 92 +++++++++++++++++++++++++++++++++++++++ test/bib/testcases.py | 15 +++---- test/bib/testutil.py | 13 ++++++ 4 files changed, 118 insertions(+), 9 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 7c8ffd13..02e9da0f 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -694,12 +694,17 @@ def assert_disk_customizations(image_type, test_vm): keyfile=image_type.ssh_keyfile_private_path) assert exit_status == 0 findmnt = json.loads(output) + exit_status, swapon_output = test_vm.run("swapon --show", user="root", + keyfile=image_type.ssh_keyfile_private_path) + assert exit_status == 0 if dc := image_type.disk_config: if dc == "lvm": mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] if mnt["target"] == "/sysroot"] assert len(mnts) == 1 assert "/dev/mapper/vg00-rootlv" == mnts[0]["source"] + # check swap too + assert "7G" in swapon_output elif dc == "btrfs": mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] if mnt["target"] == "/sysroot"] @@ -707,3 +712,5 @@ def assert_disk_customizations(image_type, test_vm): assert "btrfs" == mnts[0]["fstype"] # ensure sysroot comes from the "root" subvolume assert mnts[0]["source"].endswith("[/root]") + elif dc == "swap": + assert "123M" in swapon_output diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index c869bdb4..b64cf5e8 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -633,3 +633,95 @@ def test_manifest_disk_customization_btrfs(tmp_path, build_container): st = find_bootc_install_to_fs_stage_from(output) assert st["mounts"][0]["type"] == "org.osbuild.btrfs" assert st["mounts"][0]["target"] == "/" + + +def find_mkswap_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipeline in manifest["pipelines"]: + if pipeline["name"] == "image": + for st in pipeline["stages"]: + if st["type"] == "org.osbuild.mkswap": + return st + raise ValueError(f"cannot find mkswap stage in manifest:\n{manifest_str}") + + +def test_manifest_disk_customization_swap(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "disk": { + "partitions": [ + { + "minsize": "2 GiB", + "fs_type": "swap", + } + ] + } + } + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + "manifest", f"{container_ref}", + ]) + mkswap_stage = find_mkswap_stage_from(output) + assert mkswap_stage["options"].get("uuid") + swap_uuid = mkswap_stage["options"]["uuid"] + fstab_stage = find_fstab_stage_from(output) + filesystems = fstab_stage["options"]["filesystems"] + assert { + 'uuid': swap_uuid, + "vfs_type": "swap", + "path": "none", + "options": "defaults", + } in filesystems + + +def test_manifest_disk_customization_lvm_swap(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + config = { + "customizations": { + "disk": { + "partitions": [ + { + "type": "lvm", + "minsize": "10 GiB", + "logical_volumes": [ + { + "minsize": "2 GiB", + "fs_type": "swap", + } + ] + } + ] + } + } + } + config_path = tmp_path / "config.json" + with config_path.open("w") as config_file: + json.dump(config, config_file) + + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.json:ro", + build_container, + "manifest", f"{container_ref}", + ]) + mkswap_stage = find_mkswap_stage_from(output) + assert mkswap_stage["options"].get("uuid") + swap_uuid = mkswap_stage["options"]["uuid"] + fstab_stage = find_fstab_stage_from(output) + filesystems = fstab_stage["options"]["filesystems"] + assert { + 'uuid': swap_uuid, + "vfs_type": "swap", + "path": "none", + "options": "defaults", + } in filesystems diff --git a/test/bib/testcases.py b/test/bib/testcases.py index ed0ee45e..ea10622f 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -84,17 +84,14 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements TestCaseCentos(image="anaconda-iso"), ] if what == "qemu-boot": - # test partition defaults with qcow2 test_cases = [ - klass(image="qcow2") - for klass in (TestCaseCentos, TestCaseFedora) + # test default partitioning + TestCaseFedora(image="qcow2"), + # test with custom disk configs + TestCaseCentos(image="qcow2", disk_config="swap"), + TestCaseFedora(image="raw", disk_config="btrfs"), + TestCaseCentos(image="raw", disk_config="lvm"), ] - # and custom with raw (this is arbitrary, we could do it the - # other way around too - test_cases.append( - TestCaseCentos(image="raw", disk_config="lvm")) - test_cases.append( - TestCaseFedora(image="raw", disk_config="btrfs")) # do a cross arch test too if platform.machine() == "x86_64": # TODO: re-enable once diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 8ee8682d..aa69c35d 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -159,6 +159,10 @@ def maybe_create_disk_customizations(cfg, tc): "minsize": "10 GiB", "fs_type": "xfs", "mountpoint": "/", + }, + { + "minsize": "7 GiB", + "fs_type": "swap", } ] } @@ -179,6 +183,15 @@ def maybe_create_disk_customizations(cfg, tc): } ] } + elif tc.disk_config == "swap": + cfg["customizations"]["disk"] = { + "partitions": [ + { + "minsize": "123 MiB", + "fs_type": "swap", + } + ] + } else: raise ValueError(f"unsupported disk_config {tc.disk_config}") From 53d93c161687cd87785d9ccbcc15a2a4ecb08faa Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 6 Dec 2024 09:34:54 +0100 Subject: [PATCH 165/279] test: add regression test for gh748 swap space creation This commit adds a regression test for the manifest creation when custom disk are used. When just adding a swap device the rootlv size is not set/updated correctly which leads to a manifest like: ``` ... { "type": "org.osbuild.lvm2.create", "options": { "volumes": [ { "name": "swaplv", "size": "2147483648B" }, { "name": "rootlv", "size": "0B" } ] }, ... ``` for a config like: ``` "customizations": { "disk": { "partitions": [ { "type": "lvm", "minsize": "10 GiB", "logical_volumes": [ { "minsize": "2 GiB", "fs_type": "swap", } ] } ] } ``` --- test/bib/test_manifest.py | 6 ++++++ test/bib/testutil.py | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index b64cf5e8..6c4712b0 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -725,3 +725,9 @@ def test_manifest_disk_customization_lvm_swap(tmp_path, build_container): "path": "none", "options": "defaults", } in filesystems + # run osbuild schema validation, see gh#748 + if not testutil.has_executable("osbuild"): + pytest.skip("no osbuild executable") + osbuild_manifest_path = tmp_path / "manifest.json" + osbuild_manifest_path.write_bytes(output) + subprocess.run(["osbuild", osbuild_manifest_path.as_posix()], check=True) diff --git a/test/bib/testutil.py b/test/bib/testutil.py index aa69c35d..f2f09489 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -156,9 +156,9 @@ def maybe_create_disk_customizations(cfg, tc): "minsize": "10 GiB", "logical_volumes": [ { - "minsize": "10 GiB", "fs_type": "xfs", - "mountpoint": "/", + "minsize": "1 GiB", + "mountpoint": "/var/log", }, { "minsize": "7 GiB", From 4438e896f988fb5f7aa8c7fbd3738f15f40c5b7a Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 28 Oct 2024 18:41:18 +0100 Subject: [PATCH 166/279] test: run tests with centos-bootc:stream10 as well Rename TestCaseCentos to TestCaseS9S and add a TestCaseC10S. --- test/bib/testcases.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index ea10622f..f60aa8cb 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -54,7 +54,7 @@ class TestCaseFedora42(TestCase): @dataclasses.dataclass -class TestCaseCentos(TestCase): +class TestCaseC9S(TestCase): container_ref: str = os.getenv( "BIB_TEST_BOOTC_CONTAINER_TAG", "quay.io/centos-bootc/centos-bootc:stream9") @@ -70,27 +70,36 @@ def test_testcase_nameing(): assert f"{tc}" == expected, f"{tc} != {expected}" +@dataclasses.dataclass +class TestCaseC10S(TestCase): + container_ref: str = os.getenv( + "BIB_TEST_BOOTC_CONTAINER_TAG", + "quay.io/centos-bootc/centos-bootc:stream10") + osinfo_template: str = "CentOS Stream 10 ({arch})" + + def gen_testcases(what): # pylint: disable=too-many-return-statements if what == "manifest": - return [TestCaseCentos(), TestCaseFedora()] + return [TestCaseC9S(), TestCaseFedora(), TestCaseC10S()] if what == "default-rootfs": # Fedora doesn't have a default rootfs - return [TestCaseCentos()] + return [TestCaseC9S()] if what == "ami-boot": - return [TestCaseCentos(image="ami"), TestCaseFedora(image="ami")] + return [TestCaseC9S(image="ami"), TestCaseFedora(image="ami")] if what == "anaconda-iso": return [ TestCaseFedora(image="anaconda-iso", sign=True), - TestCaseCentos(image="anaconda-iso"), + TestCaseC9S(image="anaconda-iso"), + TestCaseC10S(image="anaconda-iso"), ] if what == "qemu-boot": test_cases = [ # test default partitioning TestCaseFedora(image="qcow2"), # test with custom disk configs - TestCaseCentos(image="qcow2", disk_config="swap"), + TestCaseC9S(image="qcow2", disk_config="swap"), TestCaseFedora(image="raw", disk_config="btrfs"), - TestCaseCentos(image="raw", disk_config="lvm"), + TestCaseC9S(image="raw", disk_config="lvm"), ] # do a cross arch test too if platform.machine() == "x86_64": @@ -98,7 +107,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements # https://github.com/osbuild/bootc-image-builder/issues/619 # is resolved # test_cases.append( - # TestCaseCentos(image="raw", target_arch="arm64")) + # TestCaseC9S(image="raw", target_arch="arm64")) pass elif platform.machine() == "arm64": # TODO: add arm64->x86_64 cross build test too @@ -107,21 +116,21 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements if what == "all": return [ klass(image=img) - for klass in (TestCaseCentos, TestCaseFedora) + for klass in (TestCaseC9S, TestCaseFedora) for img in CLOUD_BOOT_IMAGE_TYPES + DISK_IMAGE_TYPES + ["anaconda-iso"] ] if what == "multidisk": # single test that specifies all image types image = "+".join(DISK_IMAGE_TYPES) return [ - TestCaseCentos(image=image), + TestCaseC9S(image=image), TestCaseFedora(image=image), ] # Smoke test that all supported --target-arch architecture can # create a manifest if what == "target-arch-smoke": return [ - TestCaseCentos(target_arch="arm64"), + TestCaseC9S(target_arch="arm64"), # TODO: merge with TestCaseFedora once the arches are build there TestCaseFedora42(target_arch="ppc64le"), TestCaseFedora42(target_arch="s390x"), From dc12c74585ee1486e4b65dea818240a052d684aa Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 28 Nov 2024 18:35:45 +0100 Subject: [PATCH 167/279] test: tweak TestCaseC10S testcase and osinfo_for helper --- test/bib/test_build.py | 13 ++++++++++--- test/bib/testcases.py | 15 +++++++-------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 02e9da0f..193f466b 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -623,11 +623,18 @@ def test_iso_installs(image_type): def osinfo_for(it: ImageBuildResult, arch: str) -> str: + base = "Media is an installer for OS" if it.container_ref.endswith("/centos-bootc/centos-bootc:stream9"): - return f"CentOS Stream 9 ({arch})" + return f"{base} 'CentOS Stream 9 ({arch})'\n" + if it.container_ref.endswith("/centos-bootc/centos-bootc:stream10"): + # XXX: uncomment once + # https://gitlab.com/libosinfo/osinfo-db/-/commit/fc811ba5a792967e22a0108de5a245b23da3cc66 + # gets released + # return f"CentOS Stream 10 ({arch})" + return "" if "/fedora/fedora-bootc:" in it.container_ref: ver = it.container_ref.rsplit(":", maxsplit=1)[1] - return f"Fedora Server {ver} ({arch})" + return f"{base} 'Fedora Server {ver} ({arch})'\n" raise ValueError(f"unknown osinfo string for '{it.container_ref}'") @@ -643,7 +650,7 @@ def test_iso_os_detection(image_type): installer_iso_path, ], capture_output=True, text=True, check=True) osinfo_output = result.stdout - expected_output = f"Media is bootable.\nMedia is an installer for OS '{osinfo_for(image_type, arch)}'\n" + expected_output = f"Media is bootable.\n{osinfo_for(image_type, arch)}" assert osinfo_output == expected_output diff --git a/test/bib/testcases.py b/test/bib/testcases.py index f60aa8cb..78d8e926 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -60,6 +60,13 @@ class TestCaseC9S(TestCase): "quay.io/centos-bootc/centos-bootc:stream9") +@dataclasses.dataclass +class TestCaseC10S(TestCase): + container_ref: str = os.getenv( + "BIB_TEST_BOOTC_CONTAINER_TAG", + "quay.io/centos-bootc/centos-bootc:stream10") + + def test_testcase_nameing(): """ Ensure the testcase naming does not change without us knowing as those @@ -70,14 +77,6 @@ def test_testcase_nameing(): assert f"{tc}" == expected, f"{tc} != {expected}" -@dataclasses.dataclass -class TestCaseC10S(TestCase): - container_ref: str = os.getenv( - "BIB_TEST_BOOTC_CONTAINER_TAG", - "quay.io/centos-bootc/centos-bootc:stream10") - osinfo_template: str = "CentOS Stream 10 ({arch})" - - def gen_testcases(what): # pylint: disable=too-many-return-statements if what == "manifest": return [TestCaseC9S(), TestCaseFedora(), TestCaseC10S()] From f6a818ffd0a092bbec8de6584ca93b202ea9cffa Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 5 Dec 2024 11:03:30 +0100 Subject: [PATCH 168/279] main,osbuildprogress: add `--progress=term,plain,debug` support This adds a new `progress` flag that makes use of the osbuild jsonseq progress information to show progress and hide the low-level details from the user. --- test/bib/containerbuild.py | 5 ++--- test/bib/test_progress.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 test/bib/test_progress.py diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index d751ccfb..44154c75 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -68,16 +68,15 @@ def build_fake_container_fixture(tmpdir_factory, build_container): fake_osbuild_path = tmp_path / "fake-osbuild" fake_osbuild_path.write_text(textwrap.dedent("""\ - #!/bin/sh -e + #!/bin/bash -e # injest generated manifest from the images library, if we do not # do this images may fail with "broken" pipe errors - cat - + cat - >/dev/null mkdir -p /output/qcow2 echo "fake-disk.qcow2" > /output/qcow2/disk.qcow2 - echo "Done" """), encoding="utf8") cntf_path = tmp_path / "Containerfile" diff --git a/test/bib/test_progress.py b/test/bib/test_progress.py new file mode 100644 index 00000000..2c76ff2a --- /dev/null +++ b/test/bib/test_progress.py @@ -0,0 +1,33 @@ +import subprocess + +# pylint: disable=unused-import +from test_opts import container_storage_fixture +from containerbuild import build_container_fixture, build_fake_container_fixture + + +def bib_cmd(container_storage, output_path, build_fake_container): + return [ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "build", + "quay.io/centos-bootc/centos-bootc:stream9", + ] + + +def test_progress_debug(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = bib_cmd(container_storage, output_path, build_fake_container) + cmdline.append("--progress=debug") + res = subprocess.run(cmdline, capture_output=True, check=True, text=True) + assert res.stderr.count("Start progressbar") == 1 + assert res.stderr.count("Manifest generation step") == 1 + assert res.stderr.count("Image building step") == 1 + assert res.stderr.count("Build complete") == 1 + assert res.stderr.count("Stop progressbar") == 1 + assert res.stdout.strip() == "" From 2b397bdde232de9dfdd8d0c1160f56ad72949da6 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 11 Dec 2024 09:57:49 +0100 Subject: [PATCH 169/279] progress: implement pb.Pool without raw terminal access This commit implements enough of `pb.Pool` for our needs without the need to implement raw terminal access. It turns out that by default podman does not connect the full tty, even in `--privileged` mode. This is a sensible security default but it means `pb.Pool` does not work as it wants to set the terminal into "raw" mode and will fail with an ioctl() error when not running on a terminal. However we really just need simple ANSI sequences to render the pool so this seems unnecessary. The initial idea was to just use `--log-driver=passthrough-tty` but that is not available on podman 4.x. (which is part of Ubuntu 24.04 LTS and the GH actions). So this commit just implemnts a custom pool like renderer. --- test/bib/test_progress.py | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/test/bib/test_progress.py b/test/bib/test_progress.py index 2c76ff2a..b531eb92 100644 --- a/test/bib/test_progress.py +++ b/test/bib/test_progress.py @@ -1,12 +1,15 @@ import subprocess -# pylint: disable=unused-import +# pylint: disable=unused-import,duplicate-code from test_opts import container_storage_fixture from containerbuild import build_container_fixture, build_fake_container_fixture -def bib_cmd(container_storage, output_path, build_fake_container): - return [ +def test_progress_debug(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = [ "podman", "run", "--rm", "--privileged", "--security-opt", "label=type:unconfined_t", @@ -16,13 +19,6 @@ def bib_cmd(container_storage, output_path, build_fake_container): "build", "quay.io/centos-bootc/centos-bootc:stream9", ] - - -def test_progress_debug(tmp_path, container_storage, build_fake_container): - output_path = tmp_path / "output" - output_path.mkdir(exist_ok=True) - - cmdline = bib_cmd(container_storage, output_path, build_fake_container) cmdline.append("--progress=debug") res = subprocess.run(cmdline, capture_output=True, check=True, text=True) assert res.stderr.count("Start progressbar") == 1 @@ -31,3 +27,24 @@ def test_progress_debug(tmp_path, container_storage, build_fake_container): assert res.stderr.count("Build complete") == 1 assert res.stderr.count("Stop progressbar") == 1 assert res.stdout.strip() == "" + + +def test_progress_term(tmp_path, container_storage, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = [ + "podman", "run", "--rm", + "--privileged", + "--security-opt", "label=type:unconfined_t", + "-v", f"{container_storage}:/var/lib/containers/storage", + "-v", f"{output_path}:/output", + build_fake_container, + "build", + # explicitly select term progress + "--progress=term", + "quay.io/centos-bootc/centos-bootc:stream9", + ] + res = subprocess.run(cmdline, capture_output=True, text=True, check=False) + assert res.returncode == 0 + assert "[|] Manifest generation step" in res.stderr From 43c7ae1fedef3db3a8f67682b0e5c9e38405ca33 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 12 Dec 2024 19:45:44 +0100 Subject: [PATCH 170/279] progress: auto-select progress based on if we run on a terminal This commit adds automatic progress bar selection based on checking if we run on a terminal or not. When running on a terminal we use the nice "terminalProgressBar". If that is not set we assuem we run in a script or CI/CD environment and select plainProgressBar. Thanks Colin for the hint about the bad integration test. --- test/bib/test_progress.py | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/test/bib/test_progress.py b/test/bib/test_progress.py index b531eb92..d83cad97 100644 --- a/test/bib/test_progress.py +++ b/test/bib/test_progress.py @@ -5,21 +5,18 @@ from containerbuild import build_container_fixture, build_fake_container_fixture -def test_progress_debug(tmp_path, container_storage, build_fake_container): +def test_progress_debug(tmp_path, build_fake_container): output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) cmdline = [ "podman", "run", "--rm", "--privileged", - "--security-opt", "label=type:unconfined_t", - "-v", f"{container_storage}:/var/lib/containers/storage", - "-v", f"{output_path}:/output", build_fake_container, "build", + "--progress=debug", "quay.io/centos-bootc/centos-bootc:stream9", ] - cmdline.append("--progress=debug") res = subprocess.run(cmdline, capture_output=True, check=True, text=True) assert res.stderr.count("Start progressbar") == 1 assert res.stderr.count("Manifest generation step") == 1 @@ -29,22 +26,41 @@ def test_progress_debug(tmp_path, container_storage, build_fake_container): assert res.stdout.strip() == "" -def test_progress_term(tmp_path, container_storage, build_fake_container): +def test_progress_term_works_without_tty(tmp_path, build_fake_container): output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) cmdline = [ "podman", "run", "--rm", + # note that "-t" is missing here "--privileged", - "--security-opt", "label=type:unconfined_t", - "-v", f"{container_storage}:/var/lib/containers/storage", - "-v", f"{output_path}:/output", build_fake_container, "build", - # explicitly select term progress + # explicitly selecting term progress works even when there is no tty + # (i.e. we just need ansi terminal support) "--progress=term", "quay.io/centos-bootc/centos-bootc:stream9", ] res = subprocess.run(cmdline, capture_output=True, text=True, check=False) assert res.returncode == 0 assert "[|] Manifest generation step" in res.stderr + + +def test_progress_term_autoselect(tmp_path, build_fake_container): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = [ + "podman", "run", "--rm", + # we have a terminal + "-t", + "--privileged", + build_fake_container, + "build", + # note that we do not select a --progress here so auto-select is used + "quay.io/centos-bootc/centos-bootc:stream9", + ] + res = subprocess.run(cmdline, capture_output=True, text=True, check=False) + assert res.returncode == 0 + # its curious that we get the output on stdout here, podman weirdness? + assert "[|] Manifest generation step" in res.stdout From b75f64b6cb009c56c97c68a68005598445b8541b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 19 Dec 2024 17:28:31 +0100 Subject: [PATCH 171/279] test: disable the fedora iso test for now This commit disables the fedora ISO test for now. The download of the RPMs tends to be very fragile, often mirrors are outdated or broken and we get many 404 or 500 errors. The real fix is of couse to switch to a librepo based downloading in osbuild but for now we just disable this test so that we can merge PRs again without having to retry multiple times :( --- test/bib/testcases.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 78d8e926..773210e7 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -87,7 +87,9 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements return [TestCaseC9S(image="ami"), TestCaseFedora(image="ami")] if what == "anaconda-iso": return [ - TestCaseFedora(image="anaconda-iso", sign=True), + # 2024-12-19: disabled for now until the mirror situation becomes + # a bit more stable + # TestCaseFedora(image="anaconda-iso", sign=True), TestCaseC9S(image="anaconda-iso"), TestCaseC10S(image="anaconda-iso"), ] From ec11df8b7124919f7208ac2395e9085b2d68d33e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20Sch=C3=BCller?= Date: Tue, 17 Dec 2024 14:48:14 +0100 Subject: [PATCH 172/279] bootc-image-builder/main: extend version command Extend version with printing also the timestamp and "tainted" if not all files are checked in. Also support calling it with `--version` or `-v`. --- test/bib/test_opts.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 188b4f1f..afb3d096 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -148,7 +148,8 @@ def test_bib_errors_only_once(tmp_path, container_storage, build_fake_container) assert res.stderr.count(needle) == 1 -def test_bib_version(tmp_path, container_storage, build_fake_container): +@pytest.mark.parametrize("version_argument", ["version", "--version", "-v"]) +def test_bib_version(tmp_path, container_storage, build_fake_container, version_argument): output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) @@ -159,7 +160,7 @@ def test_bib_version(tmp_path, container_storage, build_fake_container): "-v", f"{container_storage}:/var/lib/containers/storage", "-v", f"{output_path}:/output", build_fake_container, - "version", + version_argument, ], check=True, capture_output=True, text=True) expected_rev = "unknown" From d51b4b817a588409885b8b34902b58ba9cbda032 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 23 Oct 2024 12:08:13 +0200 Subject: [PATCH 173/279] bib: remove usage of `BootcLegacyDiskImage` (for qemu-9.1) This commit enables bib to work with the qemu-9.1+openat2 patches as proposed in https://src.fedoraproject.org/rpms/qemu/pull-request/70. With the qemu-user-statuc-aarch64 rpm produced by `fedpkg mockbuild` from this package the cross-arch test build works again. ``` $ sudo PYTHONPATH=. pytest './test/test_build.py::test_image_boots[quay.io/centos-bootc/centos-bootc:stream9,raw,CentOS Stream 9 ({arch}),arm64]' -s -vv ============================= test session starts ============================== platform linux -- Python 3.12.7, pytest-7.4.3, pluggy-1.3.0 -- /usr/bin/python3 cachedir: .pytest_cache rootdir: /home/mvogt/devel/osbuild/bootc-image-builder configfile: pytest.ini plugins: repeat-0.9.3, xdist-3.5.0 collected 1 item test/test_build.py::test_image_boots[quay.io/centos-bootc/centos-bootc:stream9,raw,CentOS Stream 9 ({arch}),arm64] [1/2] STEP 1/9: FROM registry.fedoraproject.org/fedora:40 AS builder ... PASSEDChecking disk usage for /var/tmp/bib-tests/shared0/68b20145da2cd3f2/image/disk.raw NOTE: disk usage after /var/tmp/bib-tests/shared0/68b20145da2cd3f2/image/disk.raw: 712472.71936 / 1998694.907904 Untagged: quay.io/centos-bootc/centos-bootc:stream9 Deleted: 0de1f90b11cbe7e2768101f6c55b4dd9841c13a2f87c8d9c177aea74ade88050 ============================= slowest 10 durations ============================= 1033.54s setup test/test_build.py::test_image_boots[quay.io/centos-bootc/centos-bootc:stream9,raw,CentOS Stream 9 ({arch}),arm64] 67.62s call test/test_build.py::test_image_boots[quay.io/centos-bootc/centos-bootc:stream9,raw,CentOS Stream 9 ({arch}),arm64] 2.30s teardown test/test_build.py::test_image_boots[quay.io/centos-bootc/centos-bootc:stream9,raw,CentOS Stream 9 ({arch}),arm64] ======================== 1 passed in 1103.58s (0:18:23) ======================== ``` --- test/bib/testcases.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 773210e7..4cf76751 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -104,12 +104,8 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements ] # do a cross arch test too if platform.machine() == "x86_64": - # TODO: re-enable once - # https://github.com/osbuild/bootc-image-builder/issues/619 - # is resolved - # test_cases.append( - # TestCaseC9S(image="raw", target_arch="arm64")) - pass + test_cases.append( + TestCaseC9S(image="raw", target_arch="arm64")) elif platform.machine() == "arm64": # TODO: add arm64->x86_64 cross build test too pass From 64bdf73bd538e5ecb43c94cda31bc9f0c473e97b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 8 Nov 2024 16:15:40 +0100 Subject: [PATCH 174/279] workflow: use qemu-user-static with openat2 patches This commit adds the https://launchpad.net/~mvo/+archive/ubuntu/qemu/ PPA that contains a backport of the openat2 patches for qemu-9 so that we can test cross-arch building again. --- .github.com/workflows/bibtests.yaml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 73d576f4..0a5cc063 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -79,7 +79,7 @@ jobs: - name: Install test dependencies run: | sudo apt update - sudo apt install -y python3-pytest python3-paramiko python3-boto3 flake8 qemu-system-x86 qemu-efi-aarch64 qemu-system-arm qemu-user-static pylint libosinfo-bin + sudo apt install -y python3-pytest python3-paramiko python3-boto3 flake8 pylint libosinfo-bin - name: Diskspace (before) run: | df -h @@ -90,6 +90,18 @@ jobs: sudo rm -rf /var/lib/containers/storage sudo mkdir -p /etc/containers echo -e "[storage]\ndriver = \"overlay\"\nrunroot = \"/run/containers/storage\"\ngraphroot = \"/var/lib/containers/storage\"" | sudo tee /etc/containers/storage.conf + - name: Updating qemu-user + run: | + # get qemu-9 with openat2 patches via qemu-user-static, that + # has no dependencies so just install. + # XXX: remove once ubuntu ships qemu-9.1 + sudo apt install -y software-properties-common + sudo apt-add-repository -y ppa:mvo/qemu + sudo apt install --no-install-recommends -y qemu-user-static + # Now remove ppa again, the metadata confuses apt. Then install + # qemu-system-* from the regular repo again. + sudo apt-add-repository --remove -y ppa:mvo/qemu + sudo apt install -y qemu-system-arm qemu-system-x86 qemu-efi-aarch64 - name: Install python test deps run: | # make sure test deps are available for root From e07f449492cd9f50df882f256b69bbd869bc0494 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 6 Jan 2025 11:28:05 +0100 Subject: [PATCH 175/279] bib: use plain squashfs for the ISO rootfs This commit moves to the new "flat" squashfs rootfs image that is now available in the `images` library (c.f. https://github.com/osbuild/images/pull/1105). This will ensures we are no longer using the previous "ext4" intermediate image that gave problems for big rootfses. Closes: https://github.com/osbuild/bootc-image-builder/issues/733 --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/test_build.py | 18 +++++++++++++++++- test/bib/test_manifest.py | 2 +- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 0a5cc063..a253e66a 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -79,7 +79,7 @@ jobs: - name: Install test dependencies run: | sudo apt update - sudo apt install -y python3-pytest python3-paramiko python3-boto3 flake8 pylint libosinfo-bin + sudo apt install -y python3-pytest python3-paramiko python3-boto3 flake8 pylint libosinfo-bin squashfs-tools - name: Diskspace (before) run: | df -h diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 193f466b..c8ab9151 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -9,7 +9,7 @@ import subprocess import tempfile import uuid -from contextlib import contextmanager +from contextlib import contextmanager, ExitStack from typing import NamedTuple from dataclasses import dataclass @@ -654,6 +654,22 @@ def test_iso_os_detection(image_type): assert osinfo_output == expected_output +@pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") +@pytest.mark.skipif(not testutil.has_executable("unsquashfs"), reason="need unsquashfs") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_install_img_is_squashfs(tmp_path, image_type): + installer_iso_path = image_type.img_path + with ExitStack() as cm: + mount_point = tmp_path / "cdrom" + mount_point.mkdir() + subprocess.check_call(["mount", installer_iso_path, os.fspath(mount_point)]) + cm.callback(subprocess.check_call, ["umount", os.fspath(mount_point)]) + # ensure install.img is the "flat" squashfs, before PR#777 the content + # was an intermediate ext4 image "squashfs-root/LiveOS/rootfs.img" + output = subprocess.check_output(["unsquashfs", "-ls", mount_point / "images/install.img"], text=True) + assert "usr/bin/bootc" in output + + @pytest.mark.parametrize("images", gen_testcases("multidisk"), indirect=["images"]) def test_multi_build_request(images): artifacts = set() diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 6c4712b0..4ad3af1a 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -58,7 +58,7 @@ def test_iso_manifest_smoke(build_container, tc): ]) manifest = json.loads(output) # just some basic validation - expected_pipeline_names = ["build", "anaconda-tree", "rootfs-image", "efiboot-tree", "bootiso-tree", "bootiso"] + expected_pipeline_names = ["build", "anaconda-tree", "efiboot-tree", "bootiso-tree", "bootiso"] assert manifest["version"] == "2" assert [pipeline["name"] for pipeline in manifest["pipelines"]] == expected_pipeline_names From 4213b23dc7008f5f1a8f5584ca186013a8c82664 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 9 Jan 2025 10:38:38 +0100 Subject: [PATCH 176/279] main,test: tweak cmdVersion This comit tweak the new and very nice functionality of cmdVersion in the following way: - Rename to versionFromBuildInfo as it is no longer a "cmd*" (i.e. it no longer takes a cobra.Command) - Use switch/case as it's slightly more compact than if/else - Just build the string directly instead of using a list (slightly shorter) - Change "build_status: ok" to "build_tainted" with a boolean value to ensure this is easier to parse in yaml (and more descriptive as "status" is quite generic and may mean many things to people). - Extend the test_bib_version to test for the full strings prefixes in test_opts. --- test/bib/test_opts.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index afb3d096..94cb5efe 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -169,8 +169,11 @@ def test_bib_version(tmp_path, container_storage, build_fake_container, version_ capture_output=True, text=True, check=False) if git_res.returncode == 0: expected_rev = git_res.stdout.strip() - needle = f"revision: {expected_rev}" - assert needle in res.stdout + assert f"build_revision: {expected_rev}" in res.stdout + assert "build_time: " in res.stdout + assert "build_tainted: " in res.stdout + # we have a final newline + assert res.stdout[-1] == "\n" def test_bib_no_outside_container_warning_in_container(tmp_path, container_storage, build_fake_container): From 7684bebe8ceaf730ef35addbdc0d3a495478dfbc Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 9 Jan 2025 10:22:16 +0100 Subject: [PATCH 177/279] main: add --verbose,-v persistent flag that increases verbosity This commit adds `--verbose,-v` which will increase the verbosity of logrus and also switch the --progress to "verbose". This is addressing the feedback we got in https://github.com/osbuild/bootc-image-builder/pull/765 and a followup for #776 The new `-v` clashes unfortunately with cobras default for version, so there is no single dash flag for version anymore. Most unix tools (e.g. cp,rsync,mv,curl,ssh,tar) use "-v" for "--verbose" so IMHO we should follow suite. Unfortuantely there is no consistency in linux, e.g. git,gcc are counter examples where it means version). I would still go with -v for verbose as ssh,tar,curl are probably used more often to get verbose output. --- test/bib/test_opts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 94cb5efe..28641f13 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -148,7 +148,7 @@ def test_bib_errors_only_once(tmp_path, container_storage, build_fake_container) assert res.stderr.count(needle) == 1 -@pytest.mark.parametrize("version_argument", ["version", "--version", "-v"]) +@pytest.mark.parametrize("version_argument", ["version", "--version"]) def test_bib_version(tmp_path, container_storage, build_fake_container, version_argument): output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) From 921176fffa88218b7bdfc70b4ecde044a2b32669 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 8 Jan 2025 09:06:04 +0100 Subject: [PATCH 178/279] test: enable librepo as part of the test cases This commit make the use of librepo part of the testcases and enables it for the problematic testcases like fedora and centos-10. Eventually we should switch entirely to librepo but there is a regression risk with secrets so for now make it optional. --- test/bib/test_build.py | 3 ++- test/bib/test_manifest.py | 20 ++++++++++++++++++++ test/bib/testcases.py | 5 +++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index c8ab9151..c9944a20 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -442,7 +442,8 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ *target_arch_args, *tc.bib_rootfs_args(), "--local" if tc.local else "--local=false", - "--tls-verify=false" if tc.sign else "--tls-verify=true" + "--tls-verify=false" if tc.sign else "--tls-verify=true", + f"--use-librepo={tc.use_librepo}", ]) # print the build command for easier tracing diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 4ad3af1a..ac02351c 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -731,3 +731,23 @@ def test_manifest_disk_customization_lvm_swap(tmp_path, build_container): osbuild_manifest_path = tmp_path / "manifest.json" osbuild_manifest_path.write_bytes(output) subprocess.run(["osbuild", osbuild_manifest_path.as_posix()], check=True) + + +@pytest.mark.parametrize("use_librepo", [False, True]) +def test_iso_manifest_use_librepo(build_container, use_librepo): + # no need to parameterize this test, --use-librepo behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + "--type=anaconda-iso", + container_ref, + f"--use-librepo={use_librepo}", + ]) + manifest = json.loads(output) + if use_librepo: + assert "org.osbuild.librepo" in manifest["sources"] + else: + assert "org.osbuild.curl" in manifest["sources"] diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 4cf76751..ac2b9f44 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -27,6 +27,8 @@ class TestCase: sign: bool = False # use special disk_config like "lvm" disk_config: str = "" + # use librepo for the downloading + use_librepo: bool = False def bib_rootfs_args(self): if self.rootfs: @@ -45,12 +47,14 @@ def __str__(self): class TestCaseFedora(TestCase): container_ref: str = "quay.io/fedora/fedora-bootc:40" rootfs: str = "btrfs" + use_librepo: bool = True @dataclasses.dataclass class TestCaseFedora42(TestCase): container_ref: str = "quay.io/fedora/fedora-bootc:42" rootfs: str = "btrfs" + use_librepo: bool = True @dataclasses.dataclass @@ -65,6 +69,7 @@ class TestCaseC10S(TestCase): container_ref: str = os.getenv( "BIB_TEST_BOOTC_CONTAINER_TAG", "quay.io/centos-bootc/centos-bootc:stream10") + use_librepo: bool = True def test_testcase_nameing(): From 67f8c6d45338b857102d5f2e57caa93beeb7ce5b Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Fri, 3 May 2024 12:36:52 +0100 Subject: [PATCH 179/279] bib: deprecate the `--local` flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pulling container images before building would break in the case of authenticated images on podman machine, since the auth file lives on the host and not podman machine and won't know about it. This commit deprecates the `--local` flag and warns users when it is passed to the CLI so that this won't break things for anyone who might already be using the flag. This change means that the user will have to ensure that the container is pulled to the local container store before initiating the build. Co-authored-by: Ondřej Budai --- test/bib/test_build.py | 1 - test/bib/test_manifest.py | 11 ++++++----- test/bib/test_opts.py | 2 +- test/bib/testcases.py | 2 -- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index c9944a20..8f732bad 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -441,7 +441,6 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ *upload_args, *target_arch_args, *tc.bib_rootfs_args(), - "--local" if tc.local else "--local=false", "--tls-verify=false" if tc.sign else "--tls-verify=true", f"--use-librepo={tc.use_librepo}", ]) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index ac02351c..b1726e9e 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -81,7 +81,7 @@ def test_manifest_disksize(tmp_path, build_container, tc): manifest_str = subprocess.check_output([ *testutil.podman_run_common, build_container, - "manifest", "--local", + "manifest", *tc.bib_rootfs_args(), f"localhost/{container_tag}", ], encoding="utf8") @@ -100,10 +100,11 @@ def test_manifest_local_checks_containers_storage_errors(build_container): "--privileged", "--security-opt", "label=type:unconfined_t", build_container, - "manifest", "--local", "arg-not-used", + "manifest", "arg-not-used", ], check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8") assert res.returncode == 1 - err = 'local storage not working, did you forget -v /var/lib/containers/storage:/var/lib/containers/storage?' + err = 'could not access container storage, ' + \ + 'did you forget -v /var/lib/containers/storage:/var/lib/containers/storage?' assert err in res.stderr @@ -118,7 +119,7 @@ def test_manifest_local_checks_containers_storage_works(tmp_path, build_containe subprocess.run([ *testutil.podman_run_common, build_container, - "manifest", "--local", + "manifest", *tc.bib_rootfs_args(), f"localhost/{container_tag}", ], check=True, encoding="utf8") @@ -138,7 +139,7 @@ def test_manifest_cross_arch_check(tmp_path, build_container): *testutil.podman_run_common, build_container, "manifest", "--target-arch=aarch64", - "--local", f"localhost/{container_tag}" + f"localhost/{container_tag}" ], check=True, capture_output=True, encoding="utf8") assert 'image found is for unexpected architecture "x86_64"' in exc.value.stderr diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 28641f13..e4ae74be 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -144,7 +144,7 @@ def test_bib_errors_only_once(tmp_path, container_storage, build_fake_container) build_fake_container, "localhost/no-such-image", ], check=False, capture_output=True, text=True) - needle = "cannot build manifest: failed to pull container image:" + needle = "cannot build manifest: failed to inspect the image:" assert res.stderr.count(needle) == 1 diff --git a/test/bib/testcases.py b/test/bib/testcases.py index ac2b9f44..729e6b2b 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -18,8 +18,6 @@ class TestCase: image: str = "" # target_arch is the target archicture, empty means current arch target_arch: str = "" - # local means that the container should be pulled locally ("--local" flag) - local: bool = False # rootfs to use (e.g. ext4), some containers like fedora do not # have a default rootfs. If unset the container default is used. rootfs: str = "" From baeb2eee9768d874ba86d9c98a6b5d8f6f03aef7 Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Tue, 7 May 2024 14:23:17 +0100 Subject: [PATCH 180/279] tests: pull containers into local storage Ensure that the containers have been copied into local storage for all test cases. We need to explicitly pull the container into local containers storage with the correct arch otherwise cross-arch building fails. The helper function uses the host-arch as a fallback when no target arch is provided. --- test/bib/test_build.py | 23 +++-------------------- test/bib/test_manifest.py | 18 +++++++++++++++++- test/bib/test_opts.py | 26 +++++++++++++++++++++----- test/bib/testutil.py | 14 ++++++++++++++ 4 files changed, 55 insertions(+), 26 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 8f732bad..fbf1f60f 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -2,10 +2,8 @@ import os import pathlib import platform -import random import re import shutil -import string import subprocess import tempfile import uuid @@ -216,20 +214,7 @@ def image_type_fixture(shared_tmpdir, build_container, request, force_aws_upload In the case an image is being built from a local container, the function will build the required local container for the test. """ - container_ref = request.param.container_ref - - if request.param.local: - cont_tag = "localhost/cont-base-" + "".join(random.choices(string.digits, k=12)) - - # we are not cross-building local images (for now) - request.param.target_arch = "" - - # copy the container into containers-storage - subprocess.check_call([ - "skopeo", "copy", - f"docker://{container_ref}", - f"containers-storage:[overlay@/var/lib/containers/storage+/run/containers/storage]{cont_tag}" - ]) + testutil.pull_container(request.param.container_ref, request.param.target_arch) with build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_conf, registry_conf) as build_results: @@ -243,6 +228,7 @@ def images_fixture(shared_tmpdir, build_container, request, force_aws_upload, gp Build one or more images inside the passed build_container and return an ImageBuildResult array with the resulting image path and user/password """ + testutil.pull_container(request.param.container_ref, request.param.target_arch) with build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_conf, registry_conf) as build_results: yield build_results @@ -417,12 +403,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ "-v", f"{config_json_path}:/config.json:ro", "-v", f"{output_path}:/output", "-v", "/var/tmp/osbuild-test-store:/store", # share the cache between builds + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", # mount the host's containers storage ] - # we need to mount the host's container store - if tc.local: - cmd.extend(["-v", "/var/lib/containers/storage:/var/lib/containers/storage"]) - if tc.sign: sign_container_image(gpg_conf, registry_conf, tc.container_ref) signed_image_args = [ diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index b1726e9e..746f9461 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -31,6 +31,8 @@ def find_image_size_from(manifest_str): @pytest.mark.parametrize("tc", gen_testcases("manifest")) def test_manifest_smoke(build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + output = subprocess.check_output([ *testutil.podman_run_common, build_container, @@ -50,6 +52,8 @@ def test_manifest_smoke(build_container, tc): @pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) def test_iso_manifest_smoke(build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + output = subprocess.check_output([ *testutil.podman_run_common, build_container, @@ -158,6 +162,7 @@ def find_rootfs_type_from(manifest_str): @pytest.mark.parametrize("tc", gen_testcases("default-rootfs")) def test_manifest_rootfs_respected(build_container, tc): # TODO: derive container and fake "bootc install print-configuration"? + testutil.pull_container(tc.container_ref) output = subprocess.check_output([ *testutil.podman_run_common, build_container, @@ -197,6 +202,7 @@ def find_user_stage_from(manifest_str): def test_manifest_user_customizations_toml(tmp_path, build_container): # no need to parameterize this test, toml is the same for all containers container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) config_toml_path = tmp_path / "config.toml" config_toml_path.write_text(textwrap.dedent("""\ @@ -224,6 +230,7 @@ def test_manifest_user_customizations_toml(tmp_path, build_container): def test_manifest_installer_customizations(tmp_path, build_container): container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) config_toml_path = tmp_path / "config.toml" config_toml_path.write_text(textwrap.dedent("""\ @@ -257,6 +264,7 @@ def test_manifest_installer_customizations(tmp_path, build_container): def test_mount_ostree_error(tmpdir_factory, build_container): # no need to parameterize this test, toml is the same for all containers container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) cfg = { "blueprint": { @@ -305,6 +313,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): ) def test_manifest_checks_build_container_is_bootc(build_container, container_ref, should_error, expected_error): def check_image_ref(): + testutil.pull_container(container_ref) subprocess.check_output([ *testutil.podman_run_common, build_container, @@ -321,6 +330,8 @@ def check_image_ref(): @pytest.mark.parametrize("tc", gen_testcases("target-arch-smoke")) def test_manifest_target_arch_smoke(build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + # TODO: actually build an image too output = subprocess.check_output([ *testutil.podman_run_common, @@ -373,6 +384,8 @@ def test_manifest_anaconda_module_customizations(tmpdir_factory, build_container config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + testutil.pull_container(tc.container_ref, tc.target_arch) + output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{output_path}:/output", @@ -411,6 +424,7 @@ def find_fstab_stage_from(manifest_str): ]) def test_manifest_fs_customizations(tmp_path, build_container, fscustomizations, rootfs): container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) config = { "customizations": { @@ -497,7 +511,9 @@ def assert_fs_customizations(customizations, fstype, manifest): ({}, "btrfs"), ]) def test_manifest_fs_customizations_xarch(tmp_path, build_container, fscustomizations, rootfs): + target_arch = "aarch64" container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref, target_arch) config = { "customizations": { @@ -513,7 +529,7 @@ def test_manifest_fs_customizations_xarch(tmp_path, build_container, fscustomiza "--entrypoint=/usr/bin/bootc-image-builder", build_container, f"--rootfs={rootfs}", - "--target-arch=aarch64", + f"--target-arch={target_arch}", "manifest", f"{container_ref}", ]) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index e4ae74be..0827392e 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -3,6 +3,7 @@ import subprocess import pytest +import testutil # pylint: disable=unused-import from containerbuild import build_container_fixture, build_fake_container_fixture @@ -25,6 +26,9 @@ def test_bib_chown_opts(tmp_path, container_storage, build_fake_container, chown output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + subprocess.check_call([ "podman", "run", "--rm", "--privileged", @@ -32,7 +36,7 @@ def test_bib_chown_opts(tmp_path, container_storage, build_fake_container, chown "-v", f"{container_storage}:/var/lib/containers/storage", "-v", f"{output_path}:/output", build_fake_container, - "quay.io/centos-bootc/centos-bootc:stream9", + container_ref, ] + chown_opt) expected_output_disk = output_path / "qcow2/disk.qcow2" for p in output_path, expected_output_disk: @@ -52,6 +56,9 @@ def test_opts_arch_is_same_arch_is_fine(tmp_path, build_fake_container, target_a output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + res = subprocess.run([ "podman", "run", "--rm", "--privileged", @@ -60,7 +67,7 @@ def test_opts_arch_is_same_arch_is_fine(tmp_path, build_fake_container, target_a "-v", f"{output_path}:/output", build_fake_container, "--type=iso", - "quay.io/centos-bootc/centos-bootc:stream9", + container_ref, ] + target_arch_opt, check=False, capture_output=True, text=True) if expected_err == "": assert res.returncode == 0 @@ -80,6 +87,9 @@ def test_bib_tls_opts(tmp_path, container_storage, build_fake_container, tls_opt output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + subprocess.check_call([ "podman", "run", "--rm", "--privileged", @@ -87,7 +97,7 @@ def test_bib_tls_opts(tmp_path, container_storage, build_fake_container, tls_opt "-v", f"{container_storage}:/var/lib/containers/storage", "-v", f"{output_path}:/output", build_fake_container, - "quay.io/centos-bootc/centos-bootc:stream9" + container_ref, ] + tls_opt) podman_log = output_path / "podman.log" assert expected_cmdline in podman_log.read_text() @@ -98,6 +108,9 @@ def test_bib_log_level_smoke(tmp_path, container_storage, build_fake_container, output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + log_debug = ["--log-level", "debug"] if with_debug else [] res = subprocess.run([ "podman", "run", "--rm", @@ -107,7 +120,7 @@ def test_bib_log_level_smoke(tmp_path, container_storage, build_fake_container, "-v", f"{output_path}:/output", build_fake_container, *log_debug, - "quay.io/centos-bootc/centos-bootc:stream9" + container_ref, ], check=True, capture_output=True, text=True) assert ('level=debug' in res.stderr) == with_debug @@ -180,6 +193,9 @@ def test_bib_no_outside_container_warning_in_container(tmp_path, container_stora output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + res = subprocess.run([ "podman", "run", "--rm", "--privileged", @@ -187,6 +203,6 @@ def test_bib_no_outside_container_warning_in_container(tmp_path, container_stora "-v", f"{container_storage}:/var/lib/containers/storage", "-v", f"{output_path}:/output", build_fake_container, - "quay.io/centos-bootc/centos-bootc:stream9" + container_ref, ], check=True, capture_output=True, text=True) assert "running outside a container" not in res.stderr diff --git a/test/bib/testutil.py b/test/bib/testutil.py index f2f09489..1cdbcaa7 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -213,3 +213,17 @@ def get_ip_from_default_route(): "default" ], check=True, capture_output=True, text=True).stdout return default_route.split()[8] + + +def pull_container(container_ref, target_arch=""): + if target_arch == "": + target_arch = platform.machine() + + if target_arch not in ["x86_64", "amd64", "aarch64", "arm64", "s390x", "ppc64le"]: + raise RuntimeError(f"unknown host arch: {target_arch}") + + subprocess.run([ + "podman", "pull", + "--arch", target_arch, + container_ref, + ], check=True) From b3bc1ad8dfa5bb042637c92b56c68ee10d391e0c Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Tue, 3 Sep 2024 12:45:43 +0100 Subject: [PATCH 181/279] bib: disable `--tls-verify` flag Since all containers are coming from local storage and require the user to pull in the container before-hand, we can disable the `--tls-verify` flag. The containers will not be resolved from a remote registry but rather from the local container store. --- test/bib/test_opts.py | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/test/bib/test_opts.py b/test/bib/test_opts.py index 0827392e..c12a8a76 100644 --- a/test/bib/test_opts.py +++ b/test/bib/test_opts.py @@ -76,33 +76,6 @@ def test_opts_arch_is_same_arch_is_fine(tmp_path, build_fake_container, target_a assert expected_err in res.stderr -@pytest.mark.parametrize("tls_opt,expected_cmdline", [ - ([], "--tls-verify=true"), - (["--tls-verify"], "--tls-verify=true"), - (["--tls-verify=true"], "--tls-verify=true"), - (["--tls-verify=false"], "--tls-verify=false"), - (["--tls-verify=0"], "--tls-verify=false"), -]) -def test_bib_tls_opts(tmp_path, container_storage, build_fake_container, tls_opt, expected_cmdline): - output_path = tmp_path / "output" - output_path.mkdir(exist_ok=True) - - container_ref = "quay.io/centos-bootc/centos-bootc:stream9" - testutil.pull_container(container_ref) - - subprocess.check_call([ - "podman", "run", "--rm", - "--privileged", - "--security-opt", "label=type:unconfined_t", - "-v", f"{container_storage}:/var/lib/containers/storage", - "-v", f"{output_path}:/output", - build_fake_container, - container_ref, - ] + tls_opt) - podman_log = output_path / "podman.log" - assert expected_cmdline in podman_log.read_text() - - @pytest.mark.parametrize("with_debug", [False, True]) def test_bib_log_level_smoke(tmp_path, container_storage, build_fake_container, with_debug): output_path = tmp_path / "output" From 4582bacbb60fa08cb919d0b068fd34f6ce491b99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Tue, 17 Dec 2024 11:09:10 +0100 Subject: [PATCH 182/279] tests: update all tests to pull the container This commit ensures all tests pull the container into local storage before. --- test/bib/test_build.py | 5 ++++- test/bib/test_manifest.py | 17 ++++++++++++++--- test/bib/test_progress.py | 15 ++++++++------- test/bib/testutil.py | 3 ++- 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index fbf1f60f..052be0f3 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -416,6 +416,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ ] cmd.extend(signed_image_args) + # Pull the signed image + testutil.pull_container(container_ref, tls_verify=False) + cmd.extend([ *creds_args, build_container, @@ -424,8 +427,8 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ *upload_args, *target_arch_args, *tc.bib_rootfs_args(), - "--tls-verify=false" if tc.sign else "--tls-verify=true", f"--use-librepo={tc.use_librepo}", + *tc.bib_rootfs_args() ]) # print the build command for easier tracing diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 746f9461..32fb90ef 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -7,8 +7,8 @@ import textwrap import pytest -import testutil +import testutil from containerbuild import build_container_fixture # pylint: disable=unused-import from containerbuild import make_container from testcases import gen_testcases @@ -69,6 +69,8 @@ def test_iso_manifest_smoke(build_container, tc): @pytest.mark.parametrize("tc", gen_testcases("manifest")) def test_manifest_disksize(tmp_path, build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + # create derrived container with 6G silly file to ensure that # bib doubles the size to 12G+ cntf_path = tmp_path / "Containerfile" @@ -114,6 +116,8 @@ def test_manifest_local_checks_containers_storage_errors(build_container): @pytest.mark.parametrize("tc", gen_testcases("manifest")) def test_manifest_local_checks_containers_storage_works(tmp_path, build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + cntf_path = tmp_path / "Containerfile" cntf_path.write_text(textwrap.dedent(f"""\n FROM {tc.container_ref} @@ -360,6 +364,8 @@ def find_image_anaconda_stage(manifest_str): @pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) def test_manifest_anaconda_module_customizations(tmpdir_factory, build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + cfg = { "customizations": { "installer": { @@ -384,8 +390,6 @@ def test_manifest_anaconda_module_customizations(tmpdir_factory, build_container config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(cfg), encoding="utf-8") - testutil.pull_container(tc.container_ref, tc.target_arch) - output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{output_path}:/output", @@ -548,6 +552,7 @@ def find_grub2_iso_stage_from(manifest_str): def test_manifest_fips_customization(tmp_path, build_container): container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) config = { "customizations": { @@ -582,6 +587,7 @@ def find_bootc_install_to_fs_stage_from(manifest_str): def test_manifest_disk_customization_lvm(tmp_path, build_container): container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) config = { "customizations": { @@ -606,6 +612,7 @@ def test_manifest_disk_customization_lvm(tmp_path, build_container): with config_path.open("w") as config_file: json.dump(config, config_file) + testutil.pull_container(container_ref) output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{config_path}:/config.json:ro", @@ -641,6 +648,7 @@ def test_manifest_disk_customization_btrfs(tmp_path, build_container): with config_path.open("w") as config_file: json.dump(config, config_file) + testutil.pull_container(container_ref) output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{config_path}:/config.json:ro", @@ -681,6 +689,7 @@ def test_manifest_disk_customization_swap(tmp_path, build_container): with config_path.open("w") as config_file: json.dump(config, config_file) + testutil.pull_container(container_ref) output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{config_path}:/config.json:ro", @@ -725,6 +734,7 @@ def test_manifest_disk_customization_lvm_swap(tmp_path, build_container): with config_path.open("w") as config_file: json.dump(config, config_file) + testutil.pull_container(container_ref) output = subprocess.check_output([ *testutil.podman_run_common, "-v", f"{config_path}:/config.json:ro", @@ -754,6 +764,7 @@ def test_manifest_disk_customization_lvm_swap(tmp_path, build_container): def test_iso_manifest_use_librepo(build_container, use_librepo): # no need to parameterize this test, --use-librepo behaves same for all containers container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) output = subprocess.check_output([ *testutil.podman_run_common, diff --git a/test/bib/test_progress.py b/test/bib/test_progress.py index d83cad97..2caa0eaf 100644 --- a/test/bib/test_progress.py +++ b/test/bib/test_progress.py @@ -1,5 +1,6 @@ import subprocess +import testutil # pylint: disable=unused-import,duplicate-code from test_opts import container_storage_fixture from containerbuild import build_container_fixture, build_fake_container_fixture @@ -10,8 +11,7 @@ def test_progress_debug(tmp_path, build_fake_container): output_path.mkdir(exist_ok=True) cmdline = [ - "podman", "run", "--rm", - "--privileged", + *testutil.podman_run_common, build_fake_container, "build", "--progress=debug", @@ -27,19 +27,21 @@ def test_progress_debug(tmp_path, build_fake_container): def test_progress_term_works_without_tty(tmp_path, build_fake_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) cmdline = [ - "podman", "run", "--rm", + *testutil.podman_run_common, # note that "-t" is missing here - "--privileged", build_fake_container, "build", # explicitly selecting term progress works even when there is no tty # (i.e. we just need ansi terminal support) "--progress=term", - "quay.io/centos-bootc/centos-bootc:stream9", + container_ref, ] res = subprocess.run(cmdline, capture_output=True, text=True, check=False) assert res.returncode == 0 @@ -51,10 +53,9 @@ def test_progress_term_autoselect(tmp_path, build_fake_container): output_path.mkdir(exist_ok=True) cmdline = [ - "podman", "run", "--rm", + *testutil.podman_run_common, # we have a terminal "-t", - "--privileged", build_fake_container, "build", # note that we do not select a --progress here so auto-select is used diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 1cdbcaa7..2d574579 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -215,7 +215,7 @@ def get_ip_from_default_route(): return default_route.split()[8] -def pull_container(container_ref, target_arch=""): +def pull_container(container_ref, target_arch="", tls_verify=True): if target_arch == "": target_arch = platform.machine() @@ -225,5 +225,6 @@ def pull_container(container_ref, target_arch=""): subprocess.run([ "podman", "pull", "--arch", target_arch, + "--tls-verify" if tls_verify else "--tls-verify=false", container_ref, ], check=True) From ccb7ba54530b1145a8c8baa8dd33be342c1ec02a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 5 Feb 2025 12:29:32 +0100 Subject: [PATCH 183/279] test: use `librepo` for `centos-9` too This commit switches centos-9 to use librepo as well. This is a bit sad because it means we have no repo anymore that uses the default "curl" backend. But even the centos9 mirrors are now so unreliable that we cannot get things merged. We should switch the default downloader here to librepo but we need some downstream test that ensures that subscribed content can still be installed with librepo (it should work but there is no explicit test in our testsuite). --- test/bib/testcases.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 729e6b2b..5a9ac03c 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -60,6 +60,7 @@ class TestCaseC9S(TestCase): container_ref: str = os.getenv( "BIB_TEST_BOOTC_CONTAINER_TAG", "quay.io/centos-bootc/centos-bootc:stream9") + use_librepo: bool = True @dataclasses.dataclass From d2f31ad513aee16025698f8b2e23de554b5f33e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20Sch=C3=BCller?= Date: Mon, 10 Feb 2025 19:58:10 +0100 Subject: [PATCH 184/279] github/workflows/tests: fix linter by adapting versions --- .github.com/workflows/bibtests.yaml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index a253e66a..ad3cbfdb 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -11,6 +11,12 @@ on: # for merge queue merge_group: +env: + GO_VERSION: 1.22 + # see https://golangci-lint.run/product/changelog + # to select a version that supports the GO_VERSION given above + GOLANGCI_LINT_VERSION: v1.59.1 + concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true @@ -20,10 +26,10 @@ jobs: name: "⌨ Lint & unittests" runs-on: ubuntu-latest steps: - - name: Set up Go 1.21 + - name: Set up Go ${{ env.GO_VERSION }} uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version: ${{ env.GO_VERSION }} id: go - name: Check out code into the Go module directory @@ -41,7 +47,7 @@ jobs: - name: Run golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.55.2 + version: ${{ env.GOLANGCI_LINT_VERSION }} args: --timeout 5m0s working-directory: bib From dc6ce0122e03cbdbc8fe23f27adcb2e3856e2e0a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 5 Feb 2025 12:20:33 +0100 Subject: [PATCH 185/279] test: add integration test for reporting errors from osbuild This commit adds a proper integration test that ensures that we always report the details of a failing osbuild run. This prevents regressions like the one from #810. This ensures that: a) we report the messags from a broken stage b) we report any osbuild messages as well to catch e.g. crashes that are not reported via the json progress It is archived by creating a new build container fixture that is deliberately broken, i.e: a) the org.osbuild.selinux stage is replaced with a fake that will echo some msgs and then error b) osbuild itself is is wrapped around so that we can reliably echo some canary strings before the real osbuild is executed --- test/bib/containerbuild.py | 47 ++++++++++++++++++++++++++++++++++++++ test/bib/test_progress.py | 32 +++++++++++++++++++++++++- 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 44154c75..b762a8d9 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -97,3 +97,50 @@ def build_fake_container_fixture(tmpdir_factory, build_container): tmp_path, ]) return container_tag + + +@pytest.fixture(name="build_erroring_container", scope="session") +def build_erroring_container_fixture(tmpdir_factory, build_container): + """Build a container with a erroring osbuild and returns the name""" + tmp_path = tmpdir_factory.mktemp("build-fake-container") + + # this ensures there are messages from osbuild itself that + # we can reliably test for + wrapping_osbuild_path = tmp_path / "wrapping-osbuild" + wrapping_osbuild_path.write_text(textwrap.dedent("""\ + #!/bin/sh -e + echo "output-from-osbuild-stdout" + >&2 echo "output-from-osbuild-stderr" + + exec /usr/bin/osbuild.real "$@" + """), encoding="utf8") + + # this ensures we have a failing stage and failure messages + bad_stage_path = tmp_path / "bad-stage" + bad_stage_path.write_text(textwrap.dedent("""\ + #!/bin/sh -e + echo osbuild-stage-stdout-output + >&2 echo osbuild-stage-stderr-output + exit 112 + """), encoding="utf8") + + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {build_container} + # ensure there is osbuild output + COPY --from={build_container} /usr/bin/osbuild /usr/bin/osbuild.real + COPY wrapping-osbuild /usr/bin/osbuild + RUN chmod 755 /usr/bin/osbuild + + # we break org.osbuild.selinux as runs early and is used everywhere + COPY bad-stage /usr/lib/osbuild/stages/org.osbuild.selinux + RUN chmod +x /usr/lib/osbuild/stages/org.osbuild.selinux + """), encoding="utf8") + + container_tag = "bootc-image-builder-test--osbuild" + subprocess.check_call([ + "podman", "build", + "-t", container_tag, + tmp_path, + ]) + return container_tag diff --git a/test/bib/test_progress.py b/test/bib/test_progress.py index 2caa0eaf..3b7a7a2b 100644 --- a/test/bib/test_progress.py +++ b/test/bib/test_progress.py @@ -1,9 +1,15 @@ import subprocess +import pytest + import testutil # pylint: disable=unused-import,duplicate-code from test_opts import container_storage_fixture -from containerbuild import build_container_fixture, build_fake_container_fixture +from containerbuild import ( + build_container_fixture, + build_erroring_container_fixture, + build_fake_container_fixture, +) def test_progress_debug(tmp_path, build_fake_container): @@ -65,3 +71,27 @@ def test_progress_term_autoselect(tmp_path, build_fake_container): assert res.returncode == 0 # its curious that we get the output on stdout here, podman weirdness? assert "[|] Manifest generation step" in res.stdout + + +@pytest.mark.skipif(not testutil.can_start_rootful_containers, reason="require a rootful containers (try: sudo)") +@pytest.mark.parametrize("progress", ["term", "verbose"]) +def test_progress_error_reporting(tmp_path, build_erroring_container, progress): + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + + cmdline = [ + *testutil.podman_run_common, + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + # we have a terminal + "-t", + build_erroring_container, + "build", + f"--progress={progress}", + "quay.io/centos-bootc/centos-bootc:stream9", + ] + res = subprocess.run(cmdline, capture_output=True, text=True, check=False) + assert "osbuild-stage-stdout-output" in res.stdout + assert "osbuild-stage-stderr-output" in res.stdout + assert "output-from-osbuild-stdout" in res.stdout + assert "output-from-osbuild-stderr" in res.stdout + assert res.returncode == 1 From 3e5de37babe975429631032fee295e084bc9c97d Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 7 Feb 2025 13:26:38 +0100 Subject: [PATCH 186/279] testutil: ensure podman runs with 8g mem max This commit ensures that we do restrict the memory of the bib test conatiner to catch excessive memory usage. This is prompted by a memory leak when dealing with unrecoverable status messages that lead to failures in konflux. --- test/bib/testutil.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/bib/testutil.py b/test/bib/testutil.py index 2d574579..e1700078 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -202,6 +202,8 @@ def maybe_create_disk_customizations(cfg, tc): "--privileged", "-v", "/var/lib/containers/storage:/var/lib/containers/storage", "--security-opt", "label=type:unconfined_t", + # ensure we run in reasonable memory limits + "--memory=8g", "--memory-swap=8g", ] From 970c671ec359b95192735d217b593d5b8b4a38da Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 7 Feb 2025 13:29:34 +0100 Subject: [PATCH 187/279] test: run the centos-9 test with an attached terminal This commit changes the centos-9 test to run with `podman -t` so that we have a test-case that uses the `terminal` progress. This is prompted by: a) we have no integration test currently that uses the terminal progress for the full build b) a konflux failure/memory leak that showed because there the test is run with `-t` --- test/bib/test_build.py | 2 ++ test/bib/testcases.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index 052be0f3..b8d367b6 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -405,6 +405,8 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ "-v", "/var/tmp/osbuild-test-store:/store", # share the cache between builds "-v", "/var/lib/containers/storage:/var/lib/containers/storage", # mount the host's containers storage ] + if tc.podman_terminal: + cmd.append("-t") if tc.sign: sign_container_image(gpg_conf, registry_conf, tc.container_ref) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 5a9ac03c..88ed8cd8 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -27,6 +27,8 @@ class TestCase: disk_config: str = "" # use librepo for the downloading use_librepo: bool = False + # podman_terminal enables the podman -t option to get progress + podman_terminal: bool = False def bib_rootfs_args(self): if self.rootfs: @@ -61,6 +63,7 @@ class TestCaseC9S(TestCase): "BIB_TEST_BOOTC_CONTAINER_TAG", "quay.io/centos-bootc/centos-bootc:stream9") use_librepo: bool = True + use_terminal: bool = True @dataclasses.dataclass From c30a839a2cf12b88046a4c88623dd20c8f443412 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 17 Feb 2025 17:21:24 +0100 Subject: [PATCH 188/279] bib: add support for file/directory customizations This commit adds support for files/directories in blueprint customizations. This needs https://github.com/osbuild/images/pull/1227 Closes: https://github.com/osbuild/bootc-image-builder/issues/834 --- test/bib/test_manifest.py | 43 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 32fb90ef..57f03024 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -779,3 +779,46 @@ def test_iso_manifest_use_librepo(build_container, use_librepo): assert "org.osbuild.librepo" in manifest["sources"] else: assert "org.osbuild.curl" in manifest["sources"] + + +def test_manifest_customization_custom_file_smoke(tmp_path, build_container): + # no need to parameterize this test, toml is the same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + cfg = { + "blueprint": { + "customizations": { + "files": [ + { + "path": "/etc/custom_file", + "data": "hello world" + }, + ], + "directories": [ + { + "path": "/etc/custom_dir", + }, + ], + }, + }, + } + + output_path = tmp_path / "output" + output_path.mkdir(exist_ok=True) + config_json_path = output_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{output_path}:/output", + build_container, + "manifest", f"{container_ref}", + "--config", "/output/config.json", + ], stderr=subprocess.PIPE, encoding="utf8") + json.loads(output) + assert '"to":"tree:///etc/custom_file"' in output + assert ('{"type":"org.osbuild.mkdir","options":{"paths":' + '[{"path":"/etc/custom_dir","exist_ok":true}]},' + '"devices":{"disk":{"type":"org.osbuild.loopback"' + ',"options":{"filename":"disk.raw"') in output From 1a551bb1075b96c89c7b45544cb76e566b17c7ed Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 18 Feb 2025 10:30:58 +0100 Subject: [PATCH 189/279] test: add full integration test for bp files/dirs This commit adds a full integration test for the files/dir blueprint customizations. --- test/bib/test_build.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index b8d367b6..f45318e0 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -358,6 +358,17 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ "kernel": { "append": kargs, }, + "files": [ + { + "path": "/etc/some-file", + "data": "some-data", + }, + ], + "directories": [ + { + "path": "/etc/some-dir", + }, + ], }, } testutil.maybe_create_filesystem_customizations(cfg, tc) @@ -529,6 +540,14 @@ def test_image_boots(image_type): else: assert_fs_customizations(image_type, test_vm) + # check file/dir customizations + exit_status, output = test_vm.run("stat /etc/some-file", user=image_type.username, password=image_type.password) + assert exit_status == 0 + assert "File: /etc/some-file" in output + _, output = test_vm.run("stat /etc/some-dir", user=image_type.username, password=image_type.password) + assert exit_status == 0 + assert "File: /etc/some-dir" in output + @pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) def test_ami_boots_in_aws(image_type, force_aws_upload): From bad7f190afff89cb340349599073818f3900e70e Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Thu, 27 Mar 2025 10:35:29 +0100 Subject: [PATCH 190/279] test: import build_container_fixture as _ We need to import the fixtures and use them indirectly, which makes it appear like an unused import. Instead of disabling the linter on the line, let's suppress the warning by using _ as the import name. I find this nicer. --- test/bib/test_manifest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 57f03024..e3adbf4d 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -9,7 +9,7 @@ import pytest import testutil -from containerbuild import build_container_fixture # pylint: disable=unused-import +from containerbuild import build_container_fixture as _ from containerbuild import make_container from testcases import gen_testcases From 4abba2d7d3e22d0e395c17f1f0f424285fc38e30 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Wed, 26 Mar 2025 17:34:42 +0100 Subject: [PATCH 191/279] test: update tests to look for mount units and not fstab Some tests rely on reading the fstab options from the manifest to verify that filesystems and swap partitions generate the right options. These have been changed to instead look for org.osbuild.systemd.unit.create stages with filenames ending in .mount and .swap. --- test/bib/test_manifest.py | 75 +++++++++++++++++++++++---------------- 1 file changed, 45 insertions(+), 30 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index e3adbf4d..9e744976 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -405,15 +405,36 @@ def test_manifest_anaconda_module_customizations(tmpdir_factory, build_container assert "org.fedoraproject.Anaconda.Modules.Timezone" not in st["options"]["activatable-modules"] -def find_fstab_stage_from(manifest_str): +def find_fs_mount_info_from(manifest_str): manifest = json.loads(manifest_str) + mount_stages = [] + # normally there should be only one swap partition, but there's no technical reason you can't have multiple + swap_stages = [] for pipeline in manifest["pipelines"]: - # the fstab stage in cross-arch manifests is in the "ostree-deployment" pipeline + # the mount unit stages in cross-arch manifests are in the "ostree-deployment" pipeline if pipeline["name"] in ("image", "ostree-deployment"): for st in pipeline["stages"]: - if st["type"] == "org.osbuild.fstab": - return st - raise ValueError(f"cannot find fstab stage in manifest:\n{manifest_str}") + if st["type"] == "org.osbuild.systemd.unit.create": + options = st["options"] + if options["filename"].endswith(".mount"): + mount_stages.append(st) + elif options["filename"].endswith(".swap"): + swap_stages.append(st) + + if not mount_stages: + raise ValueError(f"cannot find mount unit creation stages in manifest:\n{manifest_str}") + + mounts = [] + for stage in mount_stages: + options = stage["options"]["config"] + mounts.append(options["Mount"]) + + swaps = [] + for stage in swap_stages: + options = stage["options"]["config"] + swaps.append(options["Swap"]) + + return mounts, swaps @pytest.mark.parametrize("fscustomizations,rootfs", [ @@ -480,25 +501,23 @@ def test_manifest_fs_customizations_smoke_toml(tmp_path, build_container): def assert_fs_customizations(customizations, fstype, manifest): - # use the fstab stage to get filesystem types for each mountpoint - fstab_stage = find_fstab_stage_from(manifest) - filesystems = fstab_stage["options"]["filesystems"] + mounts, _ = find_fs_mount_info_from(manifest) manifest_mountpoints = set() - for fs in filesystems: - manifest_mountpoints.add(fs["path"]) - if fs["path"] == "/boot/efi": - assert fs["vfs_type"] == "vfat" + for mount in mounts: + manifest_mountpoints.add(mount["Where"]) + if mount["Where"] == "/boot/efi": + assert mount["Type"] == "vfat" continue - if fstype == "btrfs" and fs["path"] == "/boot": + if fstype == "btrfs" and mount["Where"] == "/boot": # /boot keeps its default fstype when using btrfs - assert fs["vfs_type"] == "ext4" + assert mount["Type"] == "ext4" continue - assert fs["vfs_type"] == fstype, f"incorrect filesystem type for {fs['path']}" + assert mount["Type"] == fstype, f"incorrect filesystem type for {mount['Where']}" - # check that all fs customizations appear in fstab + # check that all fs customizations appear in the manifest for custom_mountpoint in customizations: assert custom_mountpoint in manifest_mountpoints @@ -699,14 +718,12 @@ def test_manifest_disk_customization_swap(tmp_path, build_container): mkswap_stage = find_mkswap_stage_from(output) assert mkswap_stage["options"].get("uuid") swap_uuid = mkswap_stage["options"]["uuid"] - fstab_stage = find_fstab_stage_from(output) - filesystems = fstab_stage["options"]["filesystems"] + _, swaps = find_fs_mount_info_from(output) + what_node = f"/dev/disk/by-uuid/{swap_uuid}" assert { - 'uuid': swap_uuid, - "vfs_type": "swap", - "path": "none", - "options": "defaults", - } in filesystems + "What": what_node, + "Options": "defaults", + } in swaps def test_manifest_disk_customization_lvm_swap(tmp_path, build_container): @@ -744,14 +761,12 @@ def test_manifest_disk_customization_lvm_swap(tmp_path, build_container): mkswap_stage = find_mkswap_stage_from(output) assert mkswap_stage["options"].get("uuid") swap_uuid = mkswap_stage["options"]["uuid"] - fstab_stage = find_fstab_stage_from(output) - filesystems = fstab_stage["options"]["filesystems"] + _, swaps = find_fs_mount_info_from(output) + what_node = f"/dev/disk/by-uuid/{swap_uuid}" assert { - 'uuid': swap_uuid, - "vfs_type": "swap", - "path": "none", - "options": "defaults", - } in filesystems + "What": what_node, + "Options": "defaults", + } in swaps # run osbuild schema validation, see gh#748 if not testutil.has_executable("osbuild"): pytest.skip("no osbuild executable") From ce8a7985501bafa23de4ce0607887ed281af86a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 05:06:45 +0000 Subject: [PATCH 192/279] build(deps): bump golangci/golangci-lint-action from 6 to 7 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6 to 7. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v6...v7) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index ad3cbfdb..9a4e35fd 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -45,7 +45,7 @@ jobs: run: sudo apt install -y libgpgme-dev libbtrfs-dev libdevmapper-dev - name: Run golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@v7 with: version: ${{ env.GOLANGCI_LINT_VERSION }} args: --timeout 5m0s From 95eee2656f52b22eaae747f90332c99d43c1b138 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Tue, 8 Apr 2025 21:18:22 +0200 Subject: [PATCH 193/279] github: bump ubuntu runners to 24.04 20.04 is EOL soon. --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 9a4e35fd..c6d4a333 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -56,7 +56,7 @@ jobs: shellcheck: name: "🐚 Shellcheck" - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 with: From 84e59a8e7bb8671c77a112ce362e13a2953e1462 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 8 Apr 2025 12:21:44 +0200 Subject: [PATCH 194/279] test: convert test_manifest_disk_customization_lvm to TOML By moving the LVM disk customization test to TOML we will catch issues with our TOML parsing early, i.e. we can catch the regression we encountered when applying strict TOML parsing. This is also tested in https://github.com/osbuild/blueprint/pull/12 but having it here again for good measure is a good idea. --- test/bib/test_manifest.py | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 9e744976..4f990b83 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -608,33 +608,23 @@ def test_manifest_disk_customization_lvm(tmp_path, build_container): container_ref = "quay.io/centos-bootc/centos-bootc:stream9" testutil.pull_container(container_ref) - config = { - "customizations": { - "disk": { - "partitions": [ - { - "type": "lvm", - "minsize": "10 GiB", - "logical_volumes": [ - { - "minsize": "10 GiB", - "fs_type": "ext4", - "mountpoint": "/", - } - ] - } - ] - } - } - } - config_path = tmp_path / "config.json" - with config_path.open("w") as config_file: - json.dump(config, config_file) + config = textwrap.dedent("""\ + [[customizations.disk.partitions]] + type = "lvm" + minsize = "10 GiB" + + [[customizations.disk.partitions.logical_volumes]] + minsize = "10 GiB" + fs_type = "ext4" + mountpoint = "/" + """) + config_path = tmp_path / "config.toml" + config_path.write_text(config) testutil.pull_container(container_ref) output = subprocess.check_output([ *testutil.podman_run_common, - "-v", f"{config_path}:/config.json:ro", + "-v", f"{config_path}:/config.toml:ro", build_container, "manifest", f"{container_ref}", ]) From d843be3c6395000457ad14e94049fccba3b3702b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 28 Apr 2025 13:11:20 +0200 Subject: [PATCH 195/279] test: enable `osinfo_for()` centos10 This commit enables testing for centos10 based installer images. The fix https://gitlab.com/libosinfo/osinfo-db/-/commit/fc811ba5a792967e22a0108de5a245b23da3cc66 is now released. --- test/bib/test_build.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/test/bib/test_build.py b/test/bib/test_build.py index f45318e0..8213c35f 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build.py @@ -634,11 +634,7 @@ def osinfo_for(it: ImageBuildResult, arch: str) -> str: if it.container_ref.endswith("/centos-bootc/centos-bootc:stream9"): return f"{base} 'CentOS Stream 9 ({arch})'\n" if it.container_ref.endswith("/centos-bootc/centos-bootc:stream10"): - # XXX: uncomment once - # https://gitlab.com/libosinfo/osinfo-db/-/commit/fc811ba5a792967e22a0108de5a245b23da3cc66 - # gets released - # return f"CentOS Stream 10 ({arch})" - return "" + return f"Media is an installer for OS 'CentOS Stream 10 ({arch})'\n" if "/fedora/fedora-bootc:" in it.container_ref: ver = it.container_ref.rsplit(":", maxsplit=1)[1] return f"{base} 'Fedora Server {ver} ({arch})'\n" From 3ab1c1fd25614906a3a5b82042c2d868ab6648ea Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 29 Apr 2025 10:30:51 +0200 Subject: [PATCH 196/279] test: split test_build.py into test_build_{disk,iso} Split this big test into smaller files because we will run the tests in parallel via a dynamic (per-file) github matrix. This will allow faster tests and easier re-runs if a single test is flaky only a small subset will have to be retriggered. --- .../bib/{test_build.py => test_build_disk.py} | 47 ---------- test/bib/test_build_iso.py | 85 +++++++++++++++++++ 2 files changed, 85 insertions(+), 47 deletions(-) rename test/bib/{test_build.py => test_build_disk.py} (92%) create mode 100644 test/bib/test_build_iso.py diff --git a/test/bib/test_build.py b/test/bib/test_build_disk.py similarity index 92% rename from test/bib/test_build.py rename to test/bib/test_build_disk.py index 8213c35f..decf5c70 100644 --- a/test/bib/test_build.py +++ b/test/bib/test_build_disk.py @@ -610,53 +610,6 @@ def test_image_build_without_se_linux_denials(image_type): f"denials in log {image_type.journal_output}" -@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") -@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) -def test_iso_installs(image_type): - installer_iso_path = image_type.img_path - test_disk_path = installer_iso_path.with_name("test-disk.img") - with open(test_disk_path, "w", encoding="utf8") as fp: - fp.truncate(10_1000_1000_1000) - # install to test disk - with QEMU(test_disk_path, cdrom=installer_iso_path) as vm: - vm.start(wait_event="qmp:RESET", snapshot=False, use_ovmf=True) - vm.force_stop() - # boot test disk and do extremly simple check - with QEMU(test_disk_path) as vm: - vm.start(use_ovmf=True) - exit_status, _ = vm.run("true", user=image_type.username, password=image_type.password) - assert exit_status == 0 - assert_kernel_args(vm, image_type) - - -def osinfo_for(it: ImageBuildResult, arch: str) -> str: - base = "Media is an installer for OS" - if it.container_ref.endswith("/centos-bootc/centos-bootc:stream9"): - return f"{base} 'CentOS Stream 9 ({arch})'\n" - if it.container_ref.endswith("/centos-bootc/centos-bootc:stream10"): - return f"Media is an installer for OS 'CentOS Stream 10 ({arch})'\n" - if "/fedora/fedora-bootc:" in it.container_ref: - ver = it.container_ref.rsplit(":", maxsplit=1)[1] - return f"{base} 'Fedora Server {ver} ({arch})'\n" - raise ValueError(f"unknown osinfo string for '{it.container_ref}'") - - -@pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") -@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) -def test_iso_os_detection(image_type): - installer_iso_path = image_type.img_path - arch = image_type.img_arch - if not arch: - arch = platform.machine() - result = subprocess.run([ - "osinfo-detect", - installer_iso_path, - ], capture_output=True, text=True, check=True) - osinfo_output = result.stdout - expected_output = f"Media is bootable.\n{osinfo_for(image_type, arch)}" - assert osinfo_output == expected_output - - @pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") @pytest.mark.skipif(not testutil.has_executable("unsquashfs"), reason="need unsquashfs") @pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) diff --git a/test/bib/test_build_iso.py b/test/bib/test_build_iso.py new file mode 100644 index 00000000..d702e74d --- /dev/null +++ b/test/bib/test_build_iso.py @@ -0,0 +1,85 @@ +import os +import platform +import subprocess +from contextlib import ExitStack + +import pytest +# local test utils +import testutil +from containerbuild import build_container_fixture # pylint: disable=unused-import +from testcases import gen_testcases +from vm import QEMU + +from test_build_disk import ( + assert_kernel_args, + ImageBuildResult, +) +from test_build_disk import ( # pylint: disable=unused-import + gpg_conf_fixture, + image_type_fixture, + registry_conf_fixture, + shared_tmpdir_fixture, +) + + +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_installs(image_type): + installer_iso_path = image_type.img_path + test_disk_path = installer_iso_path.with_name("test-disk.img") + with open(test_disk_path, "w", encoding="utf8") as fp: + fp.truncate(10_1000_1000_1000) + # install to test disk + with QEMU(test_disk_path, cdrom=installer_iso_path) as vm: + vm.start(wait_event="qmp:RESET", snapshot=False, use_ovmf=True) + vm.force_stop() + # boot test disk and do extremly simple check + with QEMU(test_disk_path) as vm: + vm.start(use_ovmf=True) + exit_status, _ = vm.run("true", user=image_type.username, password=image_type.password) + assert exit_status == 0 + assert_kernel_args(vm, image_type) + + +def osinfo_for(it: ImageBuildResult, arch: str) -> str: + base = "Media is an installer for OS" + if it.container_ref.endswith("/centos-bootc/centos-bootc:stream9"): + return f"{base} 'CentOS Stream 9 ({arch})'\n" + if it.container_ref.endswith("/centos-bootc/centos-bootc:stream10"): + return f"Media is an installer for OS 'CentOS Stream 10 ({arch})'\n" + if "/fedora/fedora-bootc:" in it.container_ref: + ver = it.container_ref.rsplit(":", maxsplit=1)[1] + return f"{base} 'Fedora Server {ver} ({arch})'\n" + raise ValueError(f"unknown osinfo string for '{it.container_ref}'") + + +@pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_os_detection(image_type): + installer_iso_path = image_type.img_path + arch = image_type.img_arch + if not arch: + arch = platform.machine() + result = subprocess.run([ + "osinfo-detect", + installer_iso_path, + ], capture_output=True, text=True, check=True) + osinfo_output = result.stdout + expected_output = f"Media is bootable.\n{osinfo_for(image_type, arch)}" + assert osinfo_output == expected_output + + +@pytest.mark.skipif(platform.system() != "Linux", reason="osinfo detect test only runs on linux right now") +@pytest.mark.skipif(not testutil.has_executable("unsquashfs"), reason="need unsquashfs") +@pytest.mark.parametrize("image_type", gen_testcases("anaconda-iso"), indirect=["image_type"]) +def test_iso_install_img_is_squashfs(tmp_path, image_type): + installer_iso_path = image_type.img_path + with ExitStack() as cm: + mount_point = tmp_path / "cdrom" + mount_point.mkdir() + subprocess.check_call(["mount", installer_iso_path, os.fspath(mount_point)]) + cm.callback(subprocess.check_call, ["umount", os.fspath(mount_point)]) + # ensure install.img is the "flat" squashfs, before PR#777 the content + # was an intermediate ext4 image "squashfs-root/LiveOS/rootfs.img" + output = subprocess.check_output(["unsquashfs", "-ls", mount_point / "images/install.img"], text=True) + assert "usr/bin/bootc" in output From 60efb3d867361a2f36eaa4044a947b30f9fea1c3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 29 Apr 2025 09:39:57 +0200 Subject: [PATCH 197/279] workflow: run pytests via matrix to get more parallel runs This commit moves the test running into a matrix so that we get more parallel testing. It will still be (much) dominated by `test_build_iso.py` but at least this way a flaky test in e.g. `test_container` is much faster to re-run. With multiple VMs we can probably also parallize the tests because we have less images per VM to test so diskspace may be less of an issue. --- .github.com/workflows/bibtests.yaml | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index c6d4a333..c0859dbc 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -70,10 +70,30 @@ jobs: # allow seemingly unreachable commands SHELLCHECK_OPTS: -e SC1091 -e SC2002 -e SC2317 + collect_tests: + runs-on: ubuntu-latest + outputs: + test_files: ${{ steps.collect.outputs.test_files }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Collect test files + id: collect + run: | + TEST_FILES=$(ls test/test_*.py | sort) + JSON_FILES=$(echo "${TEST_FILES}" | jq -R | jq -cs ) + echo "test_files=${JSON_FILES}" >> $GITHUB_OUTPUT + integration: # TODO: run this also via tmt/testing-farm name: "Integration" runs-on: ubuntu-24.04 + needs: collect_tests + strategy: + matrix: + test_file: ${{ fromJson(needs.collect_tests.outputs.test_files) }} steps: - uses: actions/checkout@v4 with: @@ -136,7 +156,7 @@ jobs: # podman needs (parts of) the environment but will break when # XDG_RUNTIME_DIR is set. # TODO: figure out what exactly podman needs - sudo -E XDG_RUNTIME_DIR= pytest-3 --basetemp=/mnt/var/tmp/bib-tests + sudo -E XDG_RUNTIME_DIR= pytest-3 --basetemp=/mnt/var/tmp/bib-tests ${{ matrix.test_file }} - name: Diskspace (after) if: ${{ always() }} run: | From 030de4a98cb350ae62bc8c6c3bff516dd00b6b9d Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 29 Apr 2025 12:54:25 +0200 Subject: [PATCH 198/279] test: fix missing testutil.pull_container() in test_progress.py --- test/bib/test_progress.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/bib/test_progress.py b/test/bib/test_progress.py index 3b7a7a2b..b5621fea 100644 --- a/test/bib/test_progress.py +++ b/test/bib/test_progress.py @@ -13,6 +13,9 @@ def test_progress_debug(tmp_path, build_fake_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + output_path = tmp_path / "output" output_path.mkdir(exist_ok=True) @@ -21,7 +24,7 @@ def test_progress_debug(tmp_path, build_fake_container): build_fake_container, "build", "--progress=debug", - "quay.io/centos-bootc/centos-bootc:stream9", + container_ref, ] res = subprocess.run(cmdline, capture_output=True, check=True, text=True) assert res.stderr.count("Start progressbar") == 1 From b5b24347317c1377123787cedcb82143c1f0f94b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20Sch=C3=BCller?= Date: Wed, 9 Apr 2025 10:33:19 +0200 Subject: [PATCH 199/279] github: bump golangci-lint version --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index c0859dbc..ed716774 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -15,7 +15,7 @@ env: GO_VERSION: 1.22 # see https://golangci-lint.run/product/changelog # to select a version that supports the GO_VERSION given above - GOLANGCI_LINT_VERSION: v1.59.1 + GOLANGCI_LINT_VERSION: v2.0.2 concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} From 1518d49f723600aa7975e7af8bbc1e715248f6fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20Sch=C3=BCller?= Date: Thu, 10 Apr 2025 11:36:20 +0200 Subject: [PATCH 200/279] Makefile,github: implement `make lint` and centralize GOLANGCI_LINT_VERSION --- .github.com/workflows/bibtests.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index ed716774..947ca366 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -13,9 +13,6 @@ on: env: GO_VERSION: 1.22 - # see https://golangci-lint.run/product/changelog - # to select a version that supports the GO_VERSION given above - GOLANGCI_LINT_VERSION: v2.0.2 concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -44,10 +41,14 @@ jobs: - name: Install libgpgme devel package run: sudo apt install -y libgpgme-dev libbtrfs-dev libdevmapper-dev + - name: Extract golangci-lint version from Makefile + id: golangci_lint_version + run: echo "GOLANGCI_LINT_VERSION=$(awk -F '=' '/^GOLANGCI_LINT_VERSION *=/{print $2}' Makefile)" >> "$GITHUB_OUTPUT" + - name: Run golangci-lint uses: golangci/golangci-lint-action@v7 with: - version: ${{ env.GOLANGCI_LINT_VERSION }} + version: ${{ steps.golangci_lint_version.outputs.GOLANGCI_LINT_VERSION }} args: --timeout 5m0s working-directory: bib From d5fd4adf4255cc89af6651d0c6bc9d9e314a38e3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 30 Apr 2025 19:22:18 +0200 Subject: [PATCH 201/279] test: update test_mount_ostree_error The previous commits changed the error message which is part of this test. Update it accordingly. --- test/bib/test_manifest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 4f990b83..d10b32ab 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -304,7 +304,7 @@ def test_mount_ostree_error(tmpdir_factory, build_container): "manifest", f"{container_ref}", "--config", "/output/config.json", ], stderr=subprocess.PIPE, encoding="utf8") - assert 'The following errors occurred while validating custom mountpoints:\npath "/ostree" is not allowed' \ + assert 'the following errors occurred while validating custom mountpoints:\npath "/ostree" is not allowed' \ in exc.value.stderr From 6f71b5081d8a4e33d1458d9959defe9d94ac6d6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 May 2025 04:38:51 +0000 Subject: [PATCH 202/279] build(deps): bump golangci/golangci-lint-action from 7 to 8 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 7 to 8. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v7...v8) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-version: '8' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 947ca366..844e89eb 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -46,7 +46,7 @@ jobs: run: echo "GOLANGCI_LINT_VERSION=$(awk -F '=' '/^GOLANGCI_LINT_VERSION *=/{print $2}' Makefile)" >> "$GITHUB_OUTPUT" - name: Run golangci-lint - uses: golangci/golangci-lint-action@v7 + uses: golangci/golangci-lint-action@v8 with: version: ${{ steps.golangci_lint_version.outputs.GOLANGCI_LINT_VERSION }} args: --timeout 5m0s From e88b21233c8457ae5554212eaa68deb3ce7807e4 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 30 Apr 2025 11:16:49 +0200 Subject: [PATCH 203/279] test: split cross-arch test into its own file This commit moves the cross arch build into its own file so that it ran run in parallel in the GH runners. Its is a relatively expensive test (~20min on my machine, ~30min on GH) so moving it out should save quite a bit of time. --- test/bib/test_build_cross.py | 23 +++++++++++++++++++++++ test/bib/test_build_disk.py | 4 ++++ test/bib/testcases.py | 19 ++++++++++--------- 3 files changed, 37 insertions(+), 9 deletions(-) create mode 100644 test/bib/test_build_cross.py diff --git a/test/bib/test_build_cross.py b/test/bib/test_build_cross.py new file mode 100644 index 00000000..12b89eeb --- /dev/null +++ b/test/bib/test_build_cross.py @@ -0,0 +1,23 @@ +import platform + +import pytest + +from testcases import gen_testcases + +from test_build_disk import ( # pylint: disable=unused-import + assert_disk_image_boots, + build_container_fixture, + gpg_conf_fixture, + image_type_fixture, + registry_conf_fixture, + shared_tmpdir_fixture, +) + + +# This testcase is not part of "test_build_disk.py:test_image_boots" +# because it takes ~30min on the GH runners so moving it into a +# separate file ensures it is run in parallel on GH. +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") +@pytest.mark.parametrize("image_type", gen_testcases("qemu-cross"), indirect=["image_type"]) +def test_image_boots_cross(image_type): + assert_disk_image_boots(image_type) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index decf5c70..63699aac 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -520,6 +520,10 @@ def assert_kernel_args(test_vm, image_type): @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") @pytest.mark.parametrize("image_type", gen_testcases("qemu-boot"), indirect=["image_type"]) def test_image_boots(image_type): + assert_disk_image_boots(image_type) + + +def assert_disk_image_boots(image_type): with QEMU(image_type.img_path, arch=image_type.img_arch) as test_vm: # user/password login works exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 88ed8cd8..c246d98e 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -100,8 +100,17 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements TestCaseC9S(image="anaconda-iso"), TestCaseC10S(image="anaconda-iso"), ] + if what == "qemu-cross": + test_cases = [] + if platform.machine() == "x86_64": + test_cases.append( + TestCaseC9S(image="raw", target_arch="arm64")) + elif platform.machine() == "arm64": + # TODO: add arm64->x86_64 cross build test too + pass + return test_cases if what == "qemu-boot": - test_cases = [ + return [ # test default partitioning TestCaseFedora(image="qcow2"), # test with custom disk configs @@ -109,14 +118,6 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements TestCaseFedora(image="raw", disk_config="btrfs"), TestCaseC9S(image="raw", disk_config="lvm"), ] - # do a cross arch test too - if platform.machine() == "x86_64": - test_cases.append( - TestCaseC9S(image="raw", target_arch="arm64")) - elif platform.machine() == "arm64": - # TODO: add arm64->x86_64 cross build test too - pass - return test_cases if what == "all": return [ klass(image=img) From fcb3721392401bc0f608a6148426ca73469e6cb3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 6 May 2025 12:56:14 +0200 Subject: [PATCH 204/279] test: update testcases for fedora42/fedora43 With fedora-42 release we move our testing to that. It also adds fedora-43 to the disk image tests. This is a bit of an experiment, if its too fragile we need to disable it again. But with a test like this we would have found https://github.com/osbuild/bootc-image-builder/issues/868 earlier. --- test/bib/testcases.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index c246d98e..1921e5f6 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -45,14 +45,14 @@ def __str__(self): @dataclasses.dataclass class TestCaseFedora(TestCase): - container_ref: str = "quay.io/fedora/fedora-bootc:40" + container_ref: str = "quay.io/fedora/fedora-bootc:42" rootfs: str = "btrfs" use_librepo: bool = True @dataclasses.dataclass -class TestCaseFedora42(TestCase): - container_ref: str = "quay.io/fedora/fedora-bootc:42" +class TestCaseFedora43(TestCase): + container_ref: str = "quay.io/fedora/fedora-bootc:43" rootfs: str = "btrfs" use_librepo: bool = True @@ -115,7 +115,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements TestCaseFedora(image="qcow2"), # test with custom disk configs TestCaseC9S(image="qcow2", disk_config="swap"), - TestCaseFedora(image="raw", disk_config="btrfs"), + TestCaseFedora43(image="raw", disk_config="btrfs"), TestCaseC9S(image="raw", disk_config="lvm"), ] if what == "all": @@ -136,8 +136,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements if what == "target-arch-smoke": return [ TestCaseC9S(target_arch="arm64"), - # TODO: merge with TestCaseFedora once the arches are build there - TestCaseFedora42(target_arch="ppc64le"), - TestCaseFedora42(target_arch="s390x"), + TestCaseFedora(target_arch="ppc64le"), + TestCaseFedora(target_arch="s390x"), ] raise ValueError(f"unknown test-case type {what}") From 48cc7a44dcbf6dfb8ae90b5921c695cd850e60ba Mon Sep 17 00:00:00 2001 From: Simon de Vlieger Date: Wed, 7 May 2025 12:26:39 +0200 Subject: [PATCH 205/279] main: drop rootfs cross-arch disable All platforms where `bootc-image-builder` runs that have `qemu-user` available have upgrade to a version >= 9.1.0. Signed-off-by: Simon de Vlieger --- test/bib/test_manifest.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index d10b32ab..2b149369 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -556,8 +556,7 @@ def test_manifest_fs_customizations_xarch(tmp_path, build_container, fscustomiza "manifest", f"{container_ref}", ]) - # cross-arch builds only support ext4 (for now) - assert_fs_customizations(fscustomizations, "ext4", output) + assert_fs_customizations(fscustomizations, rootfs, output) def find_grub2_iso_stage_from(manifest_str): From dbf4954beea3d7ae09fd4e8ead1fbe4278606121 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 7 May 2025 20:58:30 +0200 Subject: [PATCH 206/279] test: set the cache-ttl to 1h in our tests The current caching of the test containers can lead to unexpected results when e.g. the upstream osbuild repo gets updated but podman caches the relevant line in the Containerfile because the install of osbuild has not changed. To counter this the container building sets an agressive TTL of 1h for the cache. This fixes the issues locally. --- test/bib/containerbuild.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index b762a8d9..76fda8ba 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -25,6 +25,7 @@ def make_container(container_path, arch=None): subprocess.check_call([ "podman", "build", + "--cache-ttl=1h", "-t", container_tag, "--arch", arch, container_path], encoding="utf8") @@ -41,6 +42,7 @@ def build_container_fixture(): container_tag = "bootc-image-builder-test" subprocess.check_call([ "podman", "build", + "--cache-ttl=1h", "-f", "Containerfile", "-t", container_tag, ]) From 0aae452bf93167b2be28d18cc50ad40dd11609b5 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 21 May 2025 16:57:51 +0200 Subject: [PATCH 207/279] test: disable `fedora-43` bootc test for now This commit disabled the `TestCaseFedora43` fow now because it fails in CI with: ``` ... org.osbuild.bootc.install-to-filesystem: 19bb778fae4541936924e98952fc101eabf7f1782856dd0447ae1fef4ad3ac61 { "kernel-args": [ "rw", "console=tty0", "console=ttyS0", "systemd.journald.forward_to_console=1" ], "target-imgref": "quay.io/fedora/fedora-bootc:43" } device/disk (org.osbuild.loopback): loop0 acquired (locked: False) mount/- (org.osbuild.btrfs): mounting /dev/loop0p4 -> /store/tmp/buildroot-tmp-e004ml_u/mounts/ mount/boot (org.osbuild.xfs): mounting /dev/loop0p3 -> /store/tmp/buildroot-tmp-e004ml_u/mounts/boot mount/boot-efi (org.osbuild.fat): mounting /dev/loop0p2 -> /store/tmp/buildroot-tmp-e004ml_u/mounts/boot/efi Mount transient overlayfs for /etc/containers Creating bind mount for run/osbuild/containers Installing image: docker://quay.io/fedora/fedora-bootc:43 Initializing ostree layout ERROR Installing to filesystem: Creating ostree deployment: invalid reference format ``` Until this is resolved this test (against the current in development fedora) is not useful and blocks our CI. --- test/bib/testcases.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 1921e5f6..44d54043 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -115,7 +115,9 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements TestCaseFedora(image="qcow2"), # test with custom disk configs TestCaseC9S(image="qcow2", disk_config="swap"), - TestCaseFedora43(image="raw", disk_config="btrfs"), + # mvo: disabled 2025-05-21 because: + # "ERROR Installing to filesystem: Creating ostree deployment: invalid reference format" + # TestCaseFedora43(image="raw", disk_config="btrfs"), TestCaseC9S(image="raw", disk_config="lvm"), ] if what == "all": From 96c4f4da8fc62b163075058da6be998e69815d68 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 20 May 2025 17:30:34 +0200 Subject: [PATCH 208/279] test_manifest: Add test for embedded disk and filesystem customization --- test/bib/test_manifest.py | 98 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 2b149369..90ed1c69 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -826,3 +826,101 @@ def test_manifest_customization_custom_file_smoke(tmp_path, build_container): '[{"path":"/etc/custom_dir","exist_ok":true}]},' '"devices":{"disk":{"type":"org.osbuild.loopback"' ',"options":{"filename":"disk.raw"') in output + + +def find_sfdisk_stage_from(manifest_str): + manifest = json.loads(manifest_str) + for pipl in manifest["pipelines"]: + if pipl["name"] == "image": + for st in pipl["stages"]: + if st["type"] == "org.osbuild.sfdisk": + return st["options"] + raise ValueError(f"cannot find sfdisk stage manifest:\n{manifest_str}") + + +def test_manifest_image_customize_filesystem(tmp_path, build_container): + # no need to parameterize this test, overrides behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + cfg = { + "blueprint": { + "customizations": { + "filesystem": [ + { + "mountpoint": "/boot", + "minsize": "3GiB" + } + ] + }, + }, + } + + config_json_path = tmp_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + # create derrived container with filesystem customization + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {container_ref} + RUN mkdir -p -m 0755 /usr/lib/bootc-image-builder + COPY config.json /usr/lib/bootc-image-builder/ + """), encoding="utf8") + + print(f"building filesystem customize container from {container_ref}") + with make_container(tmp_path) as container_tag: + print(f"using {container_tag}") + manifest_str = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + f"localhost/{container_tag}", + ], encoding="utf8") + sfdisk_options = find_sfdisk_stage_from(manifest_str) + assert sfdisk_options["partitions"][2]["size"] == 3 * 1024 * 1024 * 1024 / 512 + + +def test_manifest_image_customize_disk(tmp_path, build_container): + # no need to parameterize this test, overrides behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + cfg = { + "blueprint": { + "customizations": { + "disk": { + "partitions": [ + { + "label": "var", + "mountpoint": "/var", + "fs_type": "ext4", + "minsize": "3 GiB", + }, + ], + }, + }, + }, + } + + config_json_path = tmp_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + # create derrived container with disk customization + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {container_ref} + RUN mkdir -p -m 0755 /usr/lib/bootc-image-builder + COPY config.json /usr/lib/bootc-image-builder/ + """), encoding="utf8") + + print(f"building filesystem customize container from {container_ref}") + with make_container(tmp_path) as container_tag: + print(f"using {container_tag}") + manifest_str = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + f"localhost/{container_tag}", + ], encoding="utf8") + sfdisk_options = find_sfdisk_stage_from(manifest_str) + assert sfdisk_options["partitions"][2]["size"] == 3 * 1024 * 1024 * 1024 / 512 From 9d4b48daea1bb228a39e6183b51adb8b3826502d Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Wed, 21 May 2025 15:05:21 +0200 Subject: [PATCH 209/279] tests: Avoid running into docker.io rate limits We use a custom copy (ghcr.io/osbuild/bootc-image-builder/registry) of the docker registy image to avoid running into pull rate limits. --- test/bib/test_build_disk.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index 63699aac..d2467aaa 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -137,7 +137,8 @@ def registry_conf_fixture(shared_tmpdir, request): "-p", f"{registry_port}:5000", "--restart", "always", "--name", registry_container_name, - "registry:2" + # We use a copy of docker.io registry to avoid running into docker.io pull rate limits + "ghcr.io/osbuild/bootc-image-builder/registry:2" ], check=True) registry_container_state = subprocess.run([ From e789437cf8a86f6088184dcfaf25885662b2fa5d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 27 May 2025 14:47:39 -0400 Subject: [PATCH 210/279] Clarify we build disk images I almost always stop people who say "image" without qualification because disk images are very different from container images. There's a lingering confusion that bootc-image-builder builds bootc images, which is not true... Signed-off-by: Colin Walters --- test/bib/test_progress.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/test_progress.py b/test/bib/test_progress.py index b5621fea..678685d1 100644 --- a/test/bib/test_progress.py +++ b/test/bib/test_progress.py @@ -29,7 +29,7 @@ def test_progress_debug(tmp_path, build_fake_container): res = subprocess.run(cmdline, capture_output=True, check=True, text=True) assert res.stderr.count("Start progressbar") == 1 assert res.stderr.count("Manifest generation step") == 1 - assert res.stderr.count("Image building step") == 1 + assert res.stderr.count("Disk image building step") == 1 assert res.stderr.count("Build complete") == 1 assert res.stderr.count("Stop progressbar") == 1 assert res.stdout.strip() == "" From 8723cdad533953a8538a75a01fc1cdf2f69740bf Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 3 Jun 2025 09:26:42 +0200 Subject: [PATCH 211/279] test: re-enable Fedora43 test The bootc fedora43 install was failing but with: https://github.com/bootc-dev/bootc/pull/1337 this should now be fixed (thanks Colin!). --- test/bib/testcases.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 44d54043..1921e5f6 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -115,9 +115,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements TestCaseFedora(image="qcow2"), # test with custom disk configs TestCaseC9S(image="qcow2", disk_config="swap"), - # mvo: disabled 2025-05-21 because: - # "ERROR Installing to filesystem: Creating ostree deployment: invalid reference format" - # TestCaseFedora43(image="raw", disk_config="btrfs"), + TestCaseFedora43(image="raw", disk_config="btrfs"), TestCaseC9S(image="raw", disk_config="lvm"), ] if what == "all": From 4ce65af47a1613150dea661d61bc47a0b9a6312a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 23 Jun 2025 18:01:40 +0200 Subject: [PATCH 212/279] test: disable anaconda-iso C10S test for now We need to disable the C10S test for now, there is bug in the C10S kernel that prevents the iso to mount its squashfs. See also: - https://github.com/osbuild/bootc-image-builder/issues/965 - https://issues.redhat.com/browse/RHEL-97547 - https://issues.redhat.com/browse/RHEL-97487 --- test/bib/testcases.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 1921e5f6..2df3e446 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -98,7 +98,9 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements # a bit more stable # TestCaseFedora(image="anaconda-iso", sign=True), TestCaseC9S(image="anaconda-iso"), - TestCaseC10S(image="anaconda-iso"), + # 2025-06-23: disable because of: + # https://github.com/osbuild/bootc-image-builder/issues/965 + # TestCaseC10S(image="anaconda-iso"), ] if what == "qemu-cross": test_cases = [] From 91cce1a208845a63673935748b0be31693733578 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Mon, 26 May 2025 15:53:57 +0200 Subject: [PATCH 213/279] test: Add test of --build-container This just tries to build a cs9 image using a cs10 build container. --- test/bib/test_build_disk.py | 18 ++++++++++++++++-- test/bib/testcases.py | 6 ++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index d2467aaa..9bacdaf2 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -35,6 +35,7 @@ class ImageBuildResult(NamedTuple): img_path: str img_arch: str container_ref: str + build_container_ref: str rootfs: str disk_config: str username: str @@ -314,7 +315,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ bib_output = bib_output_path.read_text(encoding="utf8") results.append(ImageBuildResult( image_type, generated_img, tc.target_arch, - container_ref, tc.rootfs, tc.disk_config, + container_ref, tc.build_container_ref, tc.rootfs, tc.disk_config, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output)) @@ -384,6 +385,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ upload_args = [] creds_args = [] target_arch_args = [] + build_container_args = [] if tc.target_arch: target_arch_args = ["--target-arch", tc.target_arch] @@ -433,10 +435,16 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ # Pull the signed image testutil.pull_container(container_ref, tls_verify=False) + if tc.build_container_ref: + build_container_args = [ + "--build-container", tc.build_container_ref, + ] + cmd.extend([ *creds_args, build_container, container_ref, + *build_container_args, *types_arg, *upload_args, *target_arch_args, @@ -476,7 +484,7 @@ def del_ami(): for image_type in image_types: results.append(ImageBuildResult( image_type, artifact[image_type], tc.target_arch, - container_ref, tc.rootfs, tc.disk_config, + container_ref, tc.build_container_ref, tc.rootfs, tc.disk_config, username, password, ssh_keyfile_private_path, kargs, bib_output, journal_output, metadata)) yield results @@ -510,6 +518,12 @@ def test_image_is_generated(image_type): f"content: {os.listdir(os.fspath(image_type.img_path))}" +@pytest.mark.parametrize("image_type", gen_testcases("build-container"), indirect=["image_type"]) +def test_build_container_works(image_type): + assert image_type.img_path.exists(), "output file missing, dir "\ + f"content: {os.listdir(os.fspath(image_type.img_path))}" + + def assert_kernel_args(test_vm, image_type): exit_status, kcmdline = test_vm.run("cat /proc/cmdline", user=image_type.username, password=image_type.password) assert exit_status == 0 diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 2df3e446..323af494 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -14,6 +14,8 @@ class TestCase: # container_ref to the bootc image, e.g. quay.io/fedora/fedora-bootc:40 container_ref: str = "" + # optional build_container_ref to the bootc image, e.g. quay.io/fedora/fedora-bootc:40 + build_container_ref: str = "" # image is the image type, e.g. "ami" image: str = "" # target_arch is the target archicture, empty means current arch @@ -141,4 +143,8 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements TestCaseFedora(target_arch="ppc64le"), TestCaseFedora(target_arch="s390x"), ] + if what == "build-container": + return [ + TestCaseC9S(build_container_ref="quay.io/centos-bootc/centos-bootc:stream10", image="qcow2"), + ] raise ValueError(f"unknown test-case type {what}") From 6f0e6d7b731c093df55b591ac1105f0e6b790dd3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 24 Jun 2025 17:35:25 +0200 Subject: [PATCH 214/279] Revert "test: disable anaconda-iso C10S test for now" This reverts commit bbec688e7e8cd6b6ed0591c33925142c7652998a. --- test/bib/testcases.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 323af494..3f106229 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -100,9 +100,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements # a bit more stable # TestCaseFedora(image="anaconda-iso", sign=True), TestCaseC9S(image="anaconda-iso"), - # 2025-06-23: disable because of: - # https://github.com/osbuild/bootc-image-builder/issues/965 - # TestCaseC10S(image="anaconda-iso"), + TestCaseC10S(image="anaconda-iso"), ] if what == "qemu-cross": test_cases = [] From 3619c0e35424f16c8e614ff072764a7ada6c304b Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Sat, 7 Jun 2025 19:39:04 +0200 Subject: [PATCH 215/279] test: use randomized password for test VMs This commit uses a randomized password for the test user in the test VM. --- test/bib/test_build_disk.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index 9bacdaf2..8d47674f 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -2,8 +2,10 @@ import os import pathlib import platform +import random import re import shutil +import string import subprocess import tempfile import uuid @@ -256,7 +258,9 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ image_types = request.param.image.split("+") username = "test" - password = "password" + # use 18 char random password + password = "".join( + random.choices(string.ascii_uppercase + string.digits, k=18)) kargs = "systemd.journald.forward_to_console=1" container_ref = tc.container_ref @@ -375,10 +379,13 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ } testutil.maybe_create_filesystem_customizations(cfg, tc) testutil.maybe_create_disk_customizations(cfg, tc) - print(f"config for {output_path} {tc=}: {cfg=}") config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + # mask pw + for user in cfg["customizations"]["user"]: + user["password"] = "***" + print(f"config for {output_path} {tc=}: {cfg=}") cursor = testutil.journal_cursor() From 6b0dd6c09a7035876c837b6ae350bdf873b0e3b5 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 4 Jul 2025 09:31:26 +0200 Subject: [PATCH 216/279] test: skip the test_image_build_without_se_linux_denials Skip the test_image_build_without_se_linux_denials test until https://github.com/osbuild/bootc-image-builder/issues/645 is resolved. It pains me to do this but the test is failing for some time and bib itself cannot do anything to resolve this (AIUI it need an upstream selinux policy change so that install_t can transition to container_runtime_t because of https://github.com/bootc-dev/bootc/commit/0527ca96202633625f79dfe06277b96cfb522000 --- test/bib/test_build_disk.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index 8d47674f..9dd990ca 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -630,6 +630,8 @@ def has_selinux(): @pytest.mark.skipif(not has_selinux(), reason="selinux not enabled") @pytest.mark.parametrize("image_type", gen_testcases("qemu-boot"), indirect=["image_type"]) def test_image_build_without_se_linux_denials(image_type): + pytest.skip("skip until https://github.com/osbuild/bootc-image-builder/issues/645 is resolved") + # the journal always contains logs from the image building assert image_type.journal_output != "" assert not log_has_osbuild_selinux_denials(image_type.journal_output), \ From c38f2d7d72e14ce76296461e13279943148f8227 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 4 Jul 2025 09:45:45 +0200 Subject: [PATCH 217/279] test: enable `OSBUILD_EXPERIMENTAL=debug-qemu-user` This commit enables OSBUILD_EXPERIMENTAL=debug-qemu-user during the tests so that we see what unimplemented syscalls or ioctls are used. This should help with the cross-arch failure debugging we see in tmt right now. --- test/bib/test_build_disk.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index 9dd990ca..eaf2138c 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -426,6 +426,11 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ "-v", "/var/tmp/osbuild-test-store:/store", # share the cache between builds "-v", "/var/lib/containers/storage:/var/lib/containers/storage", # mount the host's containers storage ] + if tc.target_arch: + # help debug cross-arch issues by making qemu-user print + cmd.extend( + ["--env", "OSBUILD_EXPERIMENTAL=debug-qemu-user"]) + if tc.podman_terminal: cmd.append("-t") From 315d868b84e6bd95c3556fda748af2c064f19d4e Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Fri, 1 Aug 2025 17:42:16 +0200 Subject: [PATCH 218/279] test: add manifest test for partition table type --- test/bib/test_manifest.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 90ed1c69..bf275401 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -631,6 +631,28 @@ def test_manifest_disk_customization_lvm(tmp_path, build_container): assert st["devices"]["rootlv"]["type"] == "org.osbuild.lvm2.lv" +def test_manifest_disk_customization_dos(tmp_path, build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config = textwrap.dedent("""\ + [customizations.disk] + type = "dos" + """) + config_path = tmp_path / "config.toml" + config_path.write_text(config) + + testutil.pull_container(container_ref) + output = subprocess.check_output([ + *testutil.podman_run_common, + "-v", f"{config_path}:/config.toml:ro", + build_container, + "manifest", f"{container_ref}", + ]) + st = find_sfdisk_stage_from(output) + assert st["label"] == "dos" + + def test_manifest_disk_customization_btrfs(tmp_path, build_container): container_ref = "quay.io/centos-bootc/centos-bootc:stream9" From 8e51c17d0dec8850f5a20ee8390eaa238e575253 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 21 Aug 2025 10:42:42 +0200 Subject: [PATCH 219/279] test: disable centos9 iso test for now because kernel panic This commit disables the centos9 iso test because it currently crashes the centos9 kernel, see https://issues.redhat.com/browse/RHEL-109635 This unbreaks bib merges and should be reverted once RHEL-109635 is fixed. --- test/bib/testcases.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 3f106229..21c53767 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -99,7 +99,8 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements # 2024-12-19: disabled for now until the mirror situation becomes # a bit more stable # TestCaseFedora(image="anaconda-iso", sign=True), - TestCaseC9S(image="anaconda-iso"), + # 2025-08-21: disabled because of https://issues.redhat.com/browse/RHEL-109635 + # TestCaseC9S(image="anaconda-iso"), TestCaseC10S(image="anaconda-iso"), ] if what == "qemu-cross": From 6416d8f2cf93739bc77ea8dd4a57bc12e7473932 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 05:58:42 +0000 Subject: [PATCH 220/279] build(deps): bump actions/checkout from 4 to 5 Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 844e89eb..4c47ee72 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -30,7 +30,7 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: ${{ github.event.pull_request.head.sha }} @@ -59,7 +59,7 @@ jobs: name: "🐚 Shellcheck" runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: ref: ${{ github.event.pull_request.head.sha }} - name: Run ShellCheck @@ -77,7 +77,7 @@ jobs: test_files: ${{ steps.collect.outputs.test_files }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: ${{ github.event.pull_request.head.sha }} - name: Collect test files @@ -96,7 +96,7 @@ jobs: matrix: test_file: ${{ fromJson(needs.collect_tests.outputs.test_files) }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: ref: ${{ github.event.pull_request.head.sha }} - name: Setup up python From df12e55c86d8c845846c7661923759685fc85b25 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 25 Aug 2025 09:31:10 +0200 Subject: [PATCH 221/279] test: re-enable the fedora 42 iso test We had this test disabled for a while because the mirror situation was not very stable. With librepo by default and some time having passed we should try it again. --- test/bib/testcases.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index 21c53767..e48755e3 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -96,9 +96,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements return [TestCaseC9S(image="ami"), TestCaseFedora(image="ami")] if what == "anaconda-iso": return [ - # 2024-12-19: disabled for now until the mirror situation becomes - # a bit more stable - # TestCaseFedora(image="anaconda-iso", sign=True), + TestCaseFedora(image="anaconda-iso", sign=True), # 2025-08-21: disabled because of https://issues.redhat.com/browse/RHEL-109635 # TestCaseC9S(image="anaconda-iso"), TestCaseC10S(image="anaconda-iso"), From cc33616321a35951819a72f539ef1195fed0701f Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 25 Aug 2025 12:46:05 +0200 Subject: [PATCH 222/279] test: update test_iso_manifest_smoke to include rootfs Include the testcase rootfs in the `test_iso_manifest_smoke` test. This allows us to smoke test fedora ISOs that do not have a hardcoded rootfs. --- test/bib/test_manifest.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index bf275401..91b99b86 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -58,7 +58,9 @@ def test_iso_manifest_smoke(build_container, tc): *testutil.podman_run_common, build_container, "manifest", - "--type=anaconda-iso", f"{tc.container_ref}", + *tc.bib_rootfs_args(), + "--type=anaconda-iso", + f"{tc.container_ref}", ]) manifest = json.loads(output) # just some basic validation From a57e1b1b04f1ee6061db8f480e46b83af278536e Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Wed, 27 Aug 2025 17:55:10 +0200 Subject: [PATCH 223/279] Add test for aboot support This adds a test that a write-device stage is correctly generated if the partition table contains the right partition and an aboot.img in the modules dir. --- test/bib/test_manifest.py | 80 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 74 insertions(+), 6 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 91b99b86..347c64fe 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -1,3 +1,5 @@ +# pylint: disable=too-many-lines + import base64 import hashlib import json @@ -651,7 +653,7 @@ def test_manifest_disk_customization_dos(tmp_path, build_container): build_container, "manifest", f"{container_ref}", ]) - st = find_sfdisk_stage_from(output) + st = find_stage_options_from(output, "org.osbuild.sfdisk") assert st["label"] == "dos" @@ -852,14 +854,14 @@ def test_manifest_customization_custom_file_smoke(tmp_path, build_container): ',"options":{"filename":"disk.raw"') in output -def find_sfdisk_stage_from(manifest_str): +def find_stage_options_from(manifest_str, stage_type): manifest = json.loads(manifest_str) for pipl in manifest["pipelines"]: if pipl["name"] == "image": for st in pipl["stages"]: - if st["type"] == "org.osbuild.sfdisk": + if st["type"] == stage_type: return st["options"] - raise ValueError(f"cannot find sfdisk stage manifest:\n{manifest_str}") + raise ValueError(f"cannot find {stage_type} stage manifest:\n{manifest_str}") def test_manifest_image_customize_filesystem(tmp_path, build_container): @@ -900,7 +902,7 @@ def test_manifest_image_customize_filesystem(tmp_path, build_container): "manifest", f"localhost/{container_tag}", ], encoding="utf8") - sfdisk_options = find_sfdisk_stage_from(manifest_str) + sfdisk_options = find_stage_options_from(manifest_str, "org.osbuild.sfdisk") assert sfdisk_options["partitions"][2]["size"] == 3 * 1024 * 1024 * 1024 / 512 @@ -946,5 +948,71 @@ def test_manifest_image_customize_disk(tmp_path, build_container): "manifest", f"localhost/{container_tag}", ], encoding="utf8") - sfdisk_options = find_sfdisk_stage_from(manifest_str) + sfdisk_options = find_stage_options_from(manifest_str, "org.osbuild.sfdisk") assert sfdisk_options["partitions"][2]["size"] == 3 * 1024 * 1024 * 1024 / 512 + + +def test_manifest_image_aboot(tmp_path, build_container): + # no need to parameterize this test, overrides behaves same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + cfg = { + "blueprint": { + "customizations": { + "disk": { + "partitions": [ + { + "part_label": "ukiboot_a", + "part_uuid": "DF331E4D-BE00-463F-B4A7-8B43E18FB53A", + "fs_type": "none", + "minsize": "1 GiB", + }, + { + "part_label": "ukiboot_b", + "part_uuid": "DF331E4D-BE00-463F-B4A7-8B43E18FB53A", + "fs_type": "none", + "minsize": "1 GiB", + }, + { + "part_label": "ukibootctl", + "part_uuid": "FEFD9070-346F-4C9A-85E6-17F07F922773", + "fs_type": "none", + "minsize": "1 GiB", + }, + ], + }, + }, + }, + } + + config_json_path = tmp_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + + testdata_path = tmp_path / "testdata" + testdata_path.write_text("some test data", encoding="utf-8") + + # Create derived container with the custom partitioning with an aboot + # partition and a kernel module dir with an aboot.img file + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {container_ref} + RUN mkdir -p -m 0755 /usr/lib/bootc-image-builder + COPY config.json /usr/lib/bootc-image-builder/ + RUN rm -rf /usr/lib/modules/* + RUN mkdir -p -m 0755 /usr/lib/modules/5.0-x86_64/ + COPY testdata /usr/lib/modules/5.0-x86_64/vmlinuz + COPY testdata /usr/lib/modules/5.0-x86_64/aboot.img + """), encoding="utf8") + + print(f"building filesystem customize container from {container_ref}") + with make_container(tmp_path) as container_tag: + print(f"using {container_tag}") + manifest_str = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + f"localhost/{container_tag}", + ], encoding="utf8") + write_device_options = find_stage_options_from(manifest_str, "org.osbuild.write-device") + assert write_device_options["from"] == "input://tree/usr/lib/modules/5.0-x86_64/aboot.img" From 45628daae651b04a9d0308b0cebddc2ab50d671d Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 7 Aug 2025 21:08:45 +0200 Subject: [PATCH 224/279] test: update architecutre mismatch error string --- test/bib/test_manifest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 347c64fe..c628c678 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -153,7 +153,8 @@ def test_manifest_cross_arch_check(tmp_path, build_container): "manifest", "--target-arch=aarch64", f"localhost/{container_tag}" ], check=True, capture_output=True, encoding="utf8") - assert 'image found is for unexpected architecture "x86_64"' in exc.value.stderr + assert 'cannot generate manifest: requested container architecture '\ + 'does not match resolved container: "x86_64" !=' in exc.value.stderr def find_rootfs_type_from(manifest_str): From e8f0b8b07c757ed9203bb186db885afd5e7eb70a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 5 Sep 2025 10:32:20 +0200 Subject: [PATCH 225/279] test: update test for new `/usr/lib/bootc-image-builder/disk.yaml` We decided to move away from the blueprint way to specify the partition tables for a bootc container and use the more descriptive `disk.yaml`. Adjust the test to check the same functionality (aboot.img writing) using the new mechanism. Thanks to Alex for the example disk.yaml. --- test/bib/test_manifest.py | 89 +++++++++++++++++++++++---------------- 1 file changed, 52 insertions(+), 37 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index c628c678..5cd578ae 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -953,45 +953,61 @@ def test_manifest_image_customize_disk(tmp_path, build_container): assert sfdisk_options["partitions"][2]["size"] == 3 * 1024 * 1024 * 1024 / 512 -def test_manifest_image_aboot(tmp_path, build_container): +def test_manifest_image_disk_yaml(tmp_path, build_container): # no need to parameterize this test, overrides behaves same for all containers container_ref = "quay.io/centos-bootc/centos-bootc:stream9" testutil.pull_container(container_ref) - cfg = { - "blueprint": { - "customizations": { - "disk": { - "partitions": [ - { - "part_label": "ukiboot_a", - "part_uuid": "DF331E4D-BE00-463F-B4A7-8B43E18FB53A", - "fs_type": "none", - "minsize": "1 GiB", - }, - { - "part_label": "ukiboot_b", - "part_uuid": "DF331E4D-BE00-463F-B4A7-8B43E18FB53A", - "fs_type": "none", - "minsize": "1 GiB", - }, - { - "part_label": "ukibootctl", - "part_uuid": "FEFD9070-346F-4C9A-85E6-17F07F922773", - "fs_type": "none", - "minsize": "1 GiB", - }, - ], - }, - }, - }, - } + disk_yaml = textwrap.dedent("""--- + #enabled once https://github.com/osbuild/images/pull/1834 is in + #mount_configuration: none + partition_table: + size: '8589934592' + partitions: + - bootable: true + size: 1 MiB + type: 21686148-6449-6E6F-744E-656564454649 + uuid: fac7f1fb-3e8d-4137-a512-961de09a5549 + - bootable: false + label: efi + payload: + label: ESP + mountpoint: /boot/efi + type: vfat + payload_type: filesystem + size: '104857600' + type: c12a7328-f81f-11d2-ba4b-00a0c93ec93b + uuid: 68b2905b-df3e-4fb3-80fa-49d1e773aa33 + - label: ukiboot_a + size: '134217728' + type: df331e4d-be00-463f-b4a7-8b43e18fb53a + uuid: CD3B4BE3-0139-4A63-8060-658554C7273B + payload_type: raw + payload: + source_path: /usr/lib/modules/5.0-x86_64/aboot.img + - label: ukiboot_b + size: '134217728' + type: df331e4d-be00-463f-b4a7-8b43e18fb53a + uuid: E4D4DA50-7050-41AE-A5F9-DEF12B94DFB5 + - label: ukibootctl + size: '1048576' + type: fefd9070-346f-4c9a-85e6-17f07f922773 + uuid: 5A6F3ADE-EEB0-11EF-A838-E89C256C3906 + - label: root + payload: + label: root + mountpoint: / + type: ext4 + payload_type: filesystem + type: b921b045-1df0-41c3-af44-4c6f280d3fae + uuid: 6264d520-3fb9-423f-8ab8-7a0a8e3d3562 + """) - config_json_path = tmp_path / "config.json" - config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + disk_yaml_path = tmp_path / "disk.yaml" + disk_yaml_path.write_text(disk_yaml, encoding="utf-8") - testdata_path = tmp_path / "testdata" - testdata_path.write_text("some test data", encoding="utf-8") + testdata_path = tmp_path / "fake-aboot.img" + testdata_path.write_text("fake aboot.img content", encoding="utf-8") # Create derived container with the custom partitioning with an aboot # partition and a kernel module dir with an aboot.img file @@ -999,11 +1015,10 @@ def test_manifest_image_aboot(tmp_path, build_container): cntf_path.write_text(textwrap.dedent(f"""\n FROM {container_ref} RUN mkdir -p -m 0755 /usr/lib/bootc-image-builder - COPY config.json /usr/lib/bootc-image-builder/ - RUN rm -rf /usr/lib/modules/* + COPY disk.yaml /usr/lib/bootc-image-builder/ + # add a preditable aboot.img for the write-device tes RUN mkdir -p -m 0755 /usr/lib/modules/5.0-x86_64/ - COPY testdata /usr/lib/modules/5.0-x86_64/vmlinuz - COPY testdata /usr/lib/modules/5.0-x86_64/aboot.img + COPY fake-aboot.img /usr/lib/modules/5.0-x86_64/aboot.img """), encoding="utf8") print(f"building filesystem customize container from {container_ref}") From 39043abf13e328db508411a9a6e410c8115188a6 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 5 Sep 2025 11:02:07 +0200 Subject: [PATCH 226/279] test: remove roots home customizations for ISOs The root user cannot be customized much with anaconda and with https://github.com/osbuild/images/pull/1806 we got stricter about it. Unfortunately our own test manifest sets the root home for ISOs (because we need it for disks and use the same blueprint) and it panic()s now. Fix this by removing the problematic option for ISOs from the blueprint. --- test/bib/test_build_disk.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index eaf2138c..497c592f 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -379,6 +379,11 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ } testutil.maybe_create_filesystem_customizations(cfg, tc) testutil.maybe_create_disk_customizations(cfg, tc) + # if we build an iso we cannot have the "home" customization for + # user root or images will panic(), c.f. + # https://github.com/osbuild/images/pull/1806 + if not image_types[0] in DISK_IMAGE_TYPES: + del cfg["customizations"]["user"][0]["home"] config_json_path = output_path / "config.json" config_json_path.write_text(json.dumps(cfg), encoding="utf-8") @@ -416,6 +421,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ if image_types[0] in DISK_IMAGE_TYPES: types_arg = [f"--type={it}" for it in DISK_IMAGE_TYPES] else: + # building an iso types_arg = [f"--type={image_types[0]}"] # run container to deploy an image into a bootable disk and upload to a cloud service if applicable From d3b89680783736aa7fc8f392712bf0cdfba7fb06 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 19 Sep 2025 11:08:11 +0200 Subject: [PATCH 227/279] test: disable cross arch test for now This commit drops the cross arch test for now. It keeps failing in the GH action with: ``` ERROR paramiko.transport:transport.py:1904 Exception (client): Error reading SSH protocol banner ERROR paramiko.transport:transport.py:1902 Traceback (most recent call last): ERROR paramiko.transport:transport.py:1902 File "/usr/lib/python3/dist-packages/paramiko/transport.py", line 2320, in _check_banner ERROR paramiko.transport:transport.py:1902 buf = self.packetizer.readline(timeout) ERROR paramiko.transport:transport.py:1902 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ERROR paramiko.transport:transport.py:1902 File "/usr/lib/python3/dist-packages/paramiko/packet.py", line 387, in readline ERROR paramiko.transport:transport.py:1902 buf += self._read_timeout(timeout) ERROR paramiko.transport:transport.py:1902 ^^^^^^^^^^^^^^^^^^^^^^^^^^^ ERROR paramiko.transport:transport.py:1902 File "/usr/lib/python3/dist-packages/paramiko/packet.py", line 624, in _read_timeout ERROR paramiko.transport:transport.py:1902 raise EOFError() ERROR paramiko.transport:transport.py:1902 EOFError ERROR paramiko.transport:transport.py:1902 ERROR paramiko.transport:transport.py:1902 During handling of the above exception, another exception occurred: ERROR paramiko.transport:transport.py:1902 ERROR paramiko.transport:transport.py:1902 Traceback (most recent call last): ERROR paramiko.transport:transport.py:1902 File "/usr/lib/python3/dist-packages/paramiko/transport.py", line 2138, in run ERROR paramiko.transport:transport.py:1902 self._check_banner() ERROR paramiko.transport:transport.py:1902 File "/usr/lib/python3/dist-packages/paramiko/transport.py", line 2324, in _check_banner ERROR paramiko.transport:transport.py:1902 raise SSHException( ERROR paramiko.transport:transport.py:1902 paramiko.ssh_exception.SSHException: Error reading SSH protocol banner ERROR paramiko.transport:transport.py:1902 ``` and its unclear what is going on. As the cross arch is best effort and this failure is blocking our releases we drop it for now. --- test/bib/testcases.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index e48755e3..f565e1dc 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -104,8 +104,11 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements if what == "qemu-cross": test_cases = [] if platform.machine() == "x86_64": - test_cases.append( - TestCaseC9S(image="raw", target_arch="arm64")) + # 2025-09-19: disabled because CI hangs, see + # https://github.com/osbuild/bootc-image-builder/actions/runs/17821609665 + # test_cases.append( + # TestCaseC9S(image="raw", target_arch="arm64")) + pass elif platform.machine() == "arm64": # TODO: add arm64->x86_64 cross build test too pass From f373d533ff287257540d97568e0ceb14f4a00425 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 16 Sep 2025 14:53:42 +0200 Subject: [PATCH 228/279] test: run df with --all When df runs it will hide duplicated mountpoints. This can mean that /sysroot is not available in the df output because the underlying disk is also mounted in multiple places. So pass `df --all` to ensure all mountpoints are available for the test to inspect. --- test/bib/test_build_disk.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index 497c592f..5d3a6710 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -685,13 +685,16 @@ def assert_fs_customizations(image_type, test_vm): """ # check the minsize specified in the build configuration for each mountpoint against the sizes in the image # TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint - exit_status, output = test_vm.run("df --output=target,size", user="root", + exit_status, output = test_vm.run("df --all --output=target,size", user="root", keyfile=image_type.ssh_keyfile_private_path) assert exit_status == 0 # parse the output of 'df' to a mountpoint -> size dict for convenience mountpoint_sizes = {} for line in output.splitlines()[1:]: fields = line.split() + # some filesystems to not report a size with --all + if fields[1] == "-": + continue # Note that df output is in 1k blocks, not bytes mountpoint_sizes[fields[0]] = int(fields[1]) * 2 ** 10 # in bytes From 257775a24727feb1702341c79022872e7bf6af4e Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 16 Sep 2025 10:51:44 +0200 Subject: [PATCH 229/279] test: tweak error message for new images error --- test/bib/test_manifest.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 5cd578ae..7af333b9 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -153,8 +153,7 @@ def test_manifest_cross_arch_check(tmp_path, build_container): "manifest", "--target-arch=aarch64", f"localhost/{container_tag}" ], check=True, capture_output=True, encoding="utf8") - assert 'cannot generate manifest: requested container architecture '\ - 'does not match resolved container: "x86_64" !=' in exc.value.stderr + assert 'cannot generate manifest: invalid arch: aarch64' in exc.value.stderr def find_rootfs_type_from(manifest_str): From f28eb3b20deab3453f42cb63b73e12859dc49644 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 21 Aug 2025 10:53:39 +0200 Subject: [PATCH 230/279] Revert "test: disable centos9 iso test for now because kernel panic" This reverts commit 3f3b315cb8fbd1f31226bebf50efc9f18a91e291. --- test/bib/testcases.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/bib/testcases.py b/test/bib/testcases.py index f565e1dc..97326158 100644 --- a/test/bib/testcases.py +++ b/test/bib/testcases.py @@ -97,8 +97,7 @@ def gen_testcases(what): # pylint: disable=too-many-return-statements if what == "anaconda-iso": return [ TestCaseFedora(image="anaconda-iso", sign=True), - # 2025-08-21: disabled because of https://issues.redhat.com/browse/RHEL-109635 - # TestCaseC9S(image="anaconda-iso"), + TestCaseC9S(image="anaconda-iso"), TestCaseC10S(image="anaconda-iso"), ] if what == "qemu-cross": From 3bea4d3a457d2cd9bf13b49c0cd1818069d94c4f Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 26 Sep 2025 08:45:41 +0200 Subject: [PATCH 231/279] test: update invalid cross arch test error msg The error message on mismatch of `--target-arch` with the actual container has changed (for the better) so we need to update the test for this. --- test/bib/test_manifest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 7af333b9..7171bb0f 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -153,7 +153,8 @@ def test_manifest_cross_arch_check(tmp_path, build_container): "manifest", "--target-arch=aarch64", f"localhost/{container_tag}" ], check=True, capture_output=True, encoding="utf8") - assert 'cannot generate manifest: invalid arch: aarch64' in exc.value.stderr + assert ('cannot generate manifest: requested bootc arch "aarch64" ' + 'does not match available arches [x86_64]') in exc.value.stderr def find_rootfs_type_from(manifest_str): From 96f3648c4365a7abb85379a25beaf4ed2ea9c4b7 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 7 Oct 2025 17:46:37 +0200 Subject: [PATCH 232/279] bib: expose new `ova` image type in bib This commit exposes the new `ova` image type and adds a basic smoke test. --- test/bib/test_manifest.py | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index 7171bb0f..e312883a 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -858,10 +858,9 @@ def test_manifest_customization_custom_file_smoke(tmp_path, build_container): def find_stage_options_from(manifest_str, stage_type): manifest = json.loads(manifest_str) for pipl in manifest["pipelines"]: - if pipl["name"] == "image": - for st in pipl["stages"]: - if st["type"] == stage_type: - return st["options"] + for st in pipl["stages"]: + if st["type"] == stage_type: + return st["options"] raise ValueError(f"cannot find {stage_type} stage manifest:\n{manifest_str}") @@ -1032,3 +1031,27 @@ def test_manifest_image_disk_yaml(tmp_path, build_container): ], encoding="utf8") write_device_options = find_stage_options_from(manifest_str, "org.osbuild.write-device") assert write_device_options["from"] == "input://tree/usr/lib/modules/5.0-x86_64/aboot.img" + + +@pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) +def test_ova_manifest_smoke(build_container, tc): + testutil.pull_container(tc.container_ref, tc.target_arch) + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + *tc.bib_rootfs_args(), + "--type=ova", + f"{tc.container_ref}", + ]) + # just some basic validation that we generate a ova + assert find_stage_options_from(output, "org.osbuild.tar") == { + "filename": "image.ova", + "format": "ustar", + "paths": [ + "image.ovf", + "image.mf", + "image.vmdk" + ] + } From 02ffe61847680a12d041a3398d0eeb26aef8255a Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 29 Oct 2025 16:19:13 +0100 Subject: [PATCH 233/279] bib: add support for `--type bootc-installer` This commit adds support for the new `bootc-installer` image type that will take a bootc container and create an ISO out of it. It also adds a new `--installer-payload-ref` option so that the user can specify a different payload container to install. See https://github.com/osbuild/images/pull/1906 for details. This is the equivalent of https://github.com/osbuild/image-builder-cli/pull/341 for bootc-image-builder and allows us to build these kinds of images with bib now too. --- test/bib/test_manifest.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index e312883a..e523410d 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -53,7 +53,7 @@ def test_manifest_smoke(build_container, tc): @pytest.mark.parametrize("tc", gen_testcases("anaconda-iso")) -def test_iso_manifest_smoke(build_container, tc): +def test_rpm_iso_manifest_smoke(build_container, tc): testutil.pull_container(tc.container_ref, tc.target_arch) output = subprocess.check_output([ @@ -71,6 +71,31 @@ def test_iso_manifest_smoke(build_container, tc): assert [pipeline["name"] for pipeline in manifest["pipelines"]] == expected_pipeline_names +def test_bootc_iso_manifest_smoke(build_container): + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + # Note that this is not a realistic ref, a generic bootc + # image does not contain anaconda so this won't produce a + # working installer. For the purpose of the test to validate + # that we get a manifest with the right refs its good enough. + installer_payload_ref = "quay.io/centos-bootc/centos-bootc:stream10" + testutil.pull_container(container_ref) + testutil.pull_container(installer_payload_ref) + + output = subprocess.check_output([ + *testutil.podman_run_common, + build_container, + "manifest", + "--type=bootc-installer", + f"{container_ref}", + f"--installer-payload-ref={installer_payload_ref}", + ]) + manifest = json.loads(output) + # just some basic validation + expected_pipeline_names = ["build", "anaconda-tree", "efiboot-tree", "bootiso-tree", "bootiso"] + assert manifest["version"] == "2" + assert [pipeline["name"] for pipeline in manifest["pipelines"]] == expected_pipeline_names + + @pytest.mark.parametrize("tc", gen_testcases("manifest")) def test_manifest_disksize(tmp_path, build_container, tc): testutil.pull_container(tc.container_ref, tc.target_arch) From ef2cb603238cfaf18e9793356f7fff2739a2c7ac Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 25 Sep 2025 17:25:31 +0200 Subject: [PATCH 234/279] test: fix missing wait() when killing qemu --- test/bib/vm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/bib/vm.py b/test/bib/vm.py index a1be56a5..6157e3eb 100644 --- a/test/bib/vm.py +++ b/test/bib/vm.py @@ -202,6 +202,7 @@ def wait_qmp_event(self, qmp_event): def force_stop(self): if self._qemu_p: self._qemu_p.kill() + self._qemu_p.wait() self._qemu_p = None self._address = None self._ssh_port = None From 8892a43832eb55ecfb07c85eaed73c72e1ef8cde Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 25 Sep 2025 17:32:03 +0200 Subject: [PATCH 235/279] test: add bootc-installer image type test Add an integration test that uses the new `bootc-installer` image type to perform a full install and validate that booting into the resulting image works. --- test/bib/test_build_iso.py | 122 +++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) diff --git a/test/bib/test_build_iso.py b/test/bib/test_build_iso.py index d702e74d..63934490 100644 --- a/test/bib/test_build_iso.py +++ b/test/bib/test_build_iso.py @@ -1,12 +1,17 @@ import os +import random +import json import platform +import string import subprocess +import textwrap from contextlib import ExitStack import pytest # local test utils import testutil from containerbuild import build_container_fixture # pylint: disable=unused-import +from containerbuild import make_container from testcases import gen_testcases from vm import QEMU @@ -83,3 +88,120 @@ def test_iso_install_img_is_squashfs(tmp_path, image_type): # was an intermediate ext4 image "squashfs-root/LiveOS/rootfs.img" output = subprocess.check_output(["unsquashfs", "-ls", mount_point / "images/install.img"], text=True) assert "usr/bin/bootc" in output + + +@pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") +@pytest.mark.parametrize("container_ref", [ + "quay.io/centos-bootc/centos-bootc:stream10", + "quay.io/fedora/fedora-bootc:42", + "quay.io/centos-bootc/centos-bootc:stream9", +]) +# pylint: disable=too-many-locals +def test_bootc_installer_iso_installs(tmp_path, build_container, container_ref): + # XXX: duplicated from test_build_disk.py + username = "test" + password = "".join( + random.choices(string.ascii_uppercase + string.digits, k=18)) + ssh_keyfile_private_path = tmp_path / "ssh-keyfile" + ssh_keyfile_public_path = ssh_keyfile_private_path.with_suffix(".pub") + if not ssh_keyfile_private_path.exists(): + subprocess.run([ + "ssh-keygen", + "-N", "", + # be very conservative with keys for paramiko + "-b", "2048", + "-t", "rsa", + "-f", os.fspath(ssh_keyfile_private_path), + ], check=True) + ssh_pubkey = ssh_keyfile_public_path.read_text(encoding="utf8").strip() + cfg = { + "customizations": { + "user": [ + { + "name": "root", + "key": ssh_pubkey, + # note that we have no "home" here for ISOs + }, { + "name": username, + "password": password, + "groups": ["wheel"], + }, + ], + "kernel": { + # XXX: we need https://github.com/osbuild/images/pull/1786 or no kargs are added to anaconda + # XXX2: drop a bunch of the debug flags + # + # Use console=ttyS0 so that we see output in our debug + # logs. by default anaconda prints to the last console= + # from the kernel commandline + "append": "systemd.debug-shell=1 rd.systemd.debug-shell=1 inst.debug console=ttyS0", + }, + }, + } + config_json_path = tmp_path / "config.json" + config_json_path.write_text(json.dumps(cfg), encoding="utf-8") + # create anaconda iso from base + cntf_path = tmp_path / "Containerfile" + cntf_path.write_text(textwrap.dedent(f"""\n + FROM {container_ref} + RUN dnf install -y \ + anaconda-core \ + anaconda-dracut \ + anaconda-install-img-deps \ + biosdevname \ + grub2-efi-x64-cdboot \ + net-tools \ + prefixdevname \ + python3-mako \ + lorax-templates-* \ + squashfs-tools \ + && dnf clean all + # shim-x64 is marked installed but the files are not in the expected + # place for https://github.com/osbuild/osbuild/blob/v160/stages/org.osbuild.grub2.iso#L91, see + # workaround via reinstall, we could add a config to the grub2.iso + # stage to allow a different prefix that then would be used by + # anaconda. + # If https://github.com/osbuild/osbuild/pull/2204 would get merged we + # can update images/ to set the correct efi_src_dirs and this can + # be removed (but its rather ugly). + # See also https://bugzilla.redhat.com/show_bug.cgi?id=1750708 + RUN dnf reinstall -y shim-x64 + # lorax wants to create a symlink in /mnt which points to /var/mnt + # on bootc but /var/mnt does not exist on some images. + # + # If https://gitlab.com/fedora/bootc/base-images/-/merge_requests/294 + # gets merged this will be no longer needed + RUN mkdir /var/mnt + """), encoding="utf8") + output_path = tmp_path / "output" + output_path.mkdir() + with make_container(tmp_path) as container_tag: + cmd = [ + *testutil.podman_run_common, + "-v", f"{config_json_path}:/config.json:ro", + "-v", f"{output_path}:/output", + "-v", "/var/tmp/osbuild-test-store:/store", # share the cache between builds + "-v", "/var/lib/containers/storage:/var/lib/containers/storage", + build_container, + "--type", "bootc-installer", + "--rootfs", "ext4", + "--installer-payload-ref", container_ref, + f"localhost/{container_tag}", + ] + subprocess.check_call(cmd) + installer_iso_path = output_path / "bootiso" / "install.iso" + test_disk_path = installer_iso_path.with_name("test-disk.img") + with open(test_disk_path, "w", encoding="utf8") as fp: + fp.truncate(10_1000_1000_1000) + # install to test disk + with QEMU(test_disk_path, cdrom=installer_iso_path) as vm: + vm.start(wait_event="qmp:RESET", snapshot=False, use_ovmf=True) + vm.force_stop() + # boot test disk and do extremly simple check + with QEMU(test_disk_path) as vm: + vm.start(use_ovmf=True) + exit_status, _ = vm.run("true", user=username, password=password) + assert exit_status == 0 + exit_status, output = vm.run("bootc status", user="root", keyfile=ssh_keyfile_private_path) + assert exit_status == 0 + assert f"Booted image: {container_ref}" in output From 5ade5e05e43558139f076c65bf29cb94be21cea6 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 5 Nov 2025 12:50:52 +0100 Subject: [PATCH 236/279] test: refactor so that all vm related tests can be reused This commit reshuffles the code a bit so that we a reusable ./test/vmtest directory that can be used by the images library. With that we can add a toplevel pyproject.toml file so that we can import vmtest via ```console $ pip install git+https@github.com/osbuild/bootc-image-builder ``` im other projects. Note that none of this is ideal, butt this is (hopefully) a temporary measure until we find a more permanent home for our vm runner or replace it with something like test.thing or the new osbuild QEMU code. --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/conftest.py | 7 + test/bib/test_build_disk.py | 9 +- test/bib/test_build_iso.py | 6 +- test/bib/testutil.py | 30 +-- test/bib/testutil_test.py | 58 ------ test/bib/vm.py | 311 ---------------------------- 7 files changed, 17 insertions(+), 406 deletions(-) delete mode 100644 test/bib/testutil_test.py delete mode 100644 test/bib/vm.py diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 4c47ee72..63b336d1 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -157,7 +157,7 @@ jobs: # podman needs (parts of) the environment but will break when # XDG_RUNTIME_DIR is set. # TODO: figure out what exactly podman needs - sudo -E XDG_RUNTIME_DIR= pytest-3 --basetemp=/mnt/var/tmp/bib-tests ${{ matrix.test_file }} + sudo -E XDG_RUNTIME_DIR= PYTHONPATH=. pytest-3 --basetemp=/mnt/var/tmp/bib-tests ${{ matrix.test_file }} - name: Diskspace (after) if: ${{ always() }} run: | diff --git a/test/bib/conftest.py b/test/bib/conftest.py index 4db68ad6..acdfb393 100644 --- a/test/bib/conftest.py +++ b/test/bib/conftest.py @@ -1,6 +1,8 @@ import pytest +# pylint: disable=wrong-import-order from testcases import TestCase +from vmtest.util import get_free_port def pytest_addoption(parser): @@ -20,3 +22,8 @@ def pytest_make_parametrize_id(config, val): # pylint: disable=W0613 if isinstance(val, TestCase): return f"{val}" return None + + +@pytest.fixture(name="free_port") +def free_port_fixture(): + return get_free_port() diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index 5d3a6710..03f3b822 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -18,7 +18,8 @@ import testutil from containerbuild import build_container_fixture # pylint: disable=unused-import from testcases import CLOUD_BOOT_IMAGE_TYPES, DISK_IMAGE_TYPES, gen_testcases -from vm import AWS, QEMU +import vmtest.util +from vmtest.vm import AWS_REGION, AWS, QEMU if not testutil.has_executable("podman"): pytest.skip("no podman, skipping integration tests that required podman", allow_module_level=True) @@ -113,7 +114,7 @@ def registry_conf_fixture(shared_tmpdir, request): {local_registry}: lookaside: file:///{sigstore_dir} """ - registry_port = testutil.get_free_port() + registry_port = vmtest.util.get_free_port() # We cannot use localhost as we need to access the registry from both # the host system and the bootc-image-builder container. default_ip = testutil.get_ip_from_default_route() @@ -410,7 +411,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ upload_args = [ f"--aws-ami-name=bootc-image-builder-test-{str(uuid.uuid4())}", - f"--aws-region={testutil.AWS_REGION}", + f"--aws-region={AWS_REGION}", "--aws-bucket=bootc-image-builder-ci", ] elif force_aws_upload: @@ -492,7 +493,7 @@ def build_images(shared_tmpdir, build_container, request, force_aws_upload, gpg_ metadata["ami_id"] = parse_ami_id_from_log(journal_output) def del_ami(): - testutil.deregister_ami(metadata["ami_id"]) + testutil.deregister_ami(metadata["ami_id"], AWS_REGION) request.addfinalizer(del_ami) journal_log_path.write_text(journal_output, encoding="utf8") diff --git a/test/bib/test_build_iso.py b/test/bib/test_build_iso.py index 63934490..7142bdca 100644 --- a/test/bib/test_build_iso.py +++ b/test/bib/test_build_iso.py @@ -10,11 +10,8 @@ import pytest # local test utils import testutil -from containerbuild import build_container_fixture # pylint: disable=unused-import -from containerbuild import make_container +from containerbuild import build_container_fixture, make_container # pylint: disable=unused-import from testcases import gen_testcases -from vm import QEMU - from test_build_disk import ( assert_kernel_args, ImageBuildResult, @@ -25,6 +22,7 @@ registry_conf_fixture, shared_tmpdir_fixture, ) +from vmtest.vm import QEMU @pytest.mark.skipif(platform.system() != "Linux", reason="boot test only runs on linux right now") diff --git a/test/bib/testutil.py b/test/bib/testutil.py index e1700078..096d8f66 100644 --- a/test/bib/testutil.py +++ b/test/bib/testutil.py @@ -2,15 +2,11 @@ import pathlib import platform import shutil -import socket import subprocess -import time import boto3 from botocore.exceptions import ClientError -AWS_REGION = "us-east-1" - def run_journalctl(*args): pre = [] @@ -35,28 +31,6 @@ def has_executable(name): return shutil.which(name) is not None -def get_free_port() -> int: - # this is racy but there is no race-free way to do better with the qemu CLI - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(("localhost", 0)) - return s.getsockname()[1] - - -def wait_ssh_ready(address, port, sleep, max_wait_sec): - for _ in range(int(max_wait_sec / sleep)): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.settimeout(sleep) - try: - s.connect((address, port)) - data = s.recv(256) - if b"OpenSSH" in data: - return - except (ConnectionRefusedError, ConnectionResetError, TimeoutError): - pass - time.sleep(sleep) - raise ConnectionRefusedError(f"cannot connect to port {port} after {max_wait_sec}s") - - def has_x86_64_v3_cpu(): # x86_64-v3 has multiple features, see # https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels @@ -95,8 +69,8 @@ def write_aws_creds(path): return True -def deregister_ami(ami_id): - ec2 = boto3.resource("ec2", region_name=AWS_REGION) +def deregister_ami(ami_id, aws_region): + ec2 = boto3.resource("ec2", region_name=aws_region) try: print(f"Deregistering image {ami_id}") ami = ec2.Image(ami_id) diff --git a/test/bib/testutil_test.py b/test/bib/testutil_test.py deleted file mode 100644 index a1b2f0d2..00000000 --- a/test/bib/testutil_test.py +++ /dev/null @@ -1,58 +0,0 @@ -import contextlib -import platform -import subprocess -from unittest.mock import call, patch - -import pytest -from testutil import get_free_port, has_executable, wait_ssh_ready - - -def test_get_free_port(): - port_nr = get_free_port() - assert 1024 < port_nr < 65535 - - -@pytest.fixture(name="free_port") -def free_port_fixture(): - return get_free_port() - - -@patch("time.sleep") -def test_wait_ssh_ready_sleeps_no_connection(mocked_sleep, free_port): - with pytest.raises(ConnectionRefusedError): - wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=0.35) - assert mocked_sleep.call_args_list == [call(0.1), call(0.1), call(0.1)] - - -@pytest.mark.skipif(not has_executable("nc"), reason="needs nc") -def test_wait_ssh_ready_sleeps_wrong_reply(free_port): - with contextlib.ExitStack() as cm: - with subprocess.Popen( - f"echo not-ssh | nc -vv -l -p {free_port}", - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - encoding="utf-8", - ) as p: - cm.callback(p.kill) - # wait for nc to be ready - while True: - # netcat tranditional uses "listening", others "Listening" - # so just omit the first char - if "istening " in p.stdout.readline(): - break - # now connect - with patch("time.sleep") as mocked_sleep: - with pytest.raises(ConnectionRefusedError): - wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=0.55) - assert mocked_sleep.call_args_list == [ - call(0.1), call(0.1), call(0.1), call(0.1), call(0.1)] - - -@pytest.mark.skipif(platform.system() == "Darwin", reason="hangs on macOS") -@pytest.mark.skipif(not has_executable("nc"), reason="needs nc") -def test_wait_ssh_ready_integration(free_port): - with contextlib.ExitStack() as cm: - with subprocess.Popen(f"echo OpenSSH | nc -l -p {free_port}", shell=True) as p: - cm.callback(p.kill) - wait_ssh_ready("localhost", free_port, sleep=0.1, max_wait_sec=10) diff --git a/test/bib/vm.py b/test/bib/vm.py deleted file mode 100644 index 6157e3eb..00000000 --- a/test/bib/vm.py +++ /dev/null @@ -1,311 +0,0 @@ -import abc -import os -import pathlib -import platform -import subprocess -import sys -import time -import uuid -from io import StringIO - -import boto3 -import paramiko -from botocore.exceptions import ClientError -from paramiko.client import AutoAddPolicy, SSHClient -from testutil import AWS_REGION, get_free_port, wait_ssh_ready - - -class VM(abc.ABC): - - def __init__(self): - self._ssh_port = None - self._address = None - - def __del__(self): - self.force_stop() - - @abc.abstractmethod - def start(self): - """ - Start the VM. This method will be called automatically if it is not called explicitly before calling run(). - """ - - def _log(self, msg): - # XXX: use a proper logger - sys.stdout.write(msg.rstrip("\n") + "\n") - - def wait_ssh_ready(self): - wait_ssh_ready(self._address, self._ssh_port, sleep=1, max_wait_sec=600) - - @abc.abstractmethod - def force_stop(self): - """ - Stop the VM and clean up any resources that were created when setting up and starting the machine. - """ - - def run(self, cmd, user, password="", keyfile=None): - """ - Run a command on the VM via SSH using the provided credentials. - """ - if not self.running(): - self.start() - client = SSHClient() - client.set_missing_host_key_policy(AutoAddPolicy) - # workaround, see https://github.com/paramiko/paramiko/issues/2048 - pkey = None - if keyfile: - pkey = paramiko.RSAKey.from_private_key_file(keyfile) - client.connect( - self._address, self._ssh_port, - user, password, pkey=pkey, - allow_agent=False, look_for_keys=False) - chan = client.get_transport().open_session() - chan.get_pty() - chan.exec_command(cmd) - stdout_f = chan.makefile() - output = StringIO() - while True: - out = stdout_f.readline() - if not out: - break - self._log(out) - output.write(out) - exit_status = stdout_f.channel.recv_exit_status() - return exit_status, output.getvalue() - - @abc.abstractmethod - def running(self): - """ - True if the VM is running. - """ - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.force_stop() - - -# needed as each distro puts the OVMF.fd in a different location -def find_ovmf(): - for p in [ - "/usr/share/ovmf/OVMF.fd", # Debian - "/usr/share/OVMF/OVMF_CODE.fd", # Fedora - ]: - if os.path.exists(p): - return p - raise ValueError("cannot find a OVMF bios") - - -class QEMU(VM): - MEM = "2000" - - def __init__(self, img, arch="", snapshot=True, cdrom=None): - super().__init__() - self._img = pathlib.Path(img) - self._qmp_socket = self._img.with_suffix(".qemp-socket") - self._qemu_p = None - self._snapshot = snapshot - self._cdrom = cdrom - self._ssh_port = None - if not arch: - arch = platform.machine() - self._arch = arch - - def __del__(self): - self.force_stop() - - def _gen_qemu_cmdline(self, snapshot, use_ovmf): - if self._arch in ("arm64", "aarch64"): - qemu_cmdline = [ - "qemu-system-aarch64", - "-machine", "virt", - "-cpu", "cortex-a57", - "-smp", "2", - "-bios", "/usr/share/AAVMF/AAVMF_CODE.fd", - ] - elif self._arch in ("amd64", "x86_64"): - qemu_cmdline = [ - "qemu-system-x86_64", - "-M", "accel=kvm", - # get "illegal instruction" inside the VM otherwise - "-cpu", "host", - ] - if use_ovmf: - qemu_cmdline.extend(["-bios", find_ovmf()]) - else: - raise ValueError(f"unsupported architecture {self._arch}") - - # common part - qemu_cmdline += [ - "-m", self.MEM, - "-serial", "stdio", - "-monitor", "none", - "-netdev", f"user,id=net.0,hostfwd=tcp::{self._ssh_port}-:22", - "-device", "rtl8139,netdev=net.0", - "-qmp", f"unix:{self._qmp_socket},server,nowait", - ] - if not os.environ.get("OSBUILD_TEST_QEMU_GUI"): - qemu_cmdline.append("-nographic") - if self._cdrom: - qemu_cmdline.extend(["-cdrom", self._cdrom]) - if snapshot: - qemu_cmdline.append("-snapshot") - qemu_cmdline.append(self._img) - return qemu_cmdline - - # XXX: move args to init() so that __enter__ can use them? - def start(self, wait_event="ssh", snapshot=True, use_ovmf=False): - if self.running(): - return - self._ssh_port = get_free_port() - self._address = "localhost" - - # XXX: use systemd-run to ensure cleanup? - # pylint: disable=consider-using-with - self._qemu_p = subprocess.Popen( - self._gen_qemu_cmdline(snapshot, use_ovmf), - stdout=sys.stdout, - stderr=sys.stderr, - ) - # XXX: also check that qemu is working and did not crash - ev = wait_event.split(":") - if ev == ["ssh"]: - self.wait_ssh_ready() - self._log(f"vm ready at port {self._ssh_port}") - elif ev[0] == "qmp": - qmp_event = ev[1] - self.wait_qmp_event(qmp_event) - self._log(f"qmp event {qmp_event}") - else: - raise ValueError(f"unsupported wait_event {wait_event}") - - def _wait_qmp_socket(self, timeout_sec): - for _ in range(timeout_sec): - if os.path.exists(self._qmp_socket): - return True - time.sleep(1) - raise TimeoutError(f"no {self._qmp_socket} after {timeout_sec} seconds") - - def wait_qmp_event(self, qmp_event): - # import lazy to avoid requiring it for all operations - import qmp # pylint: disable=import-outside-toplevel - self._wait_qmp_socket(30) - mon = qmp.QEMUMonitorProtocol(os.fspath(self._qmp_socket)) - mon.connect() - while True: - event = mon.pull_event(wait=True) - self._log(f"DEBUG: got event {event}") - if event["event"] == qmp_event: - return - - def force_stop(self): - if self._qemu_p: - self._qemu_p.kill() - self._qemu_p.wait() - self._qemu_p = None - self._address = None - self._ssh_port = None - - def running(self): - return self._qemu_p is not None - - -class AWS(VM): - - _instance_type = "t3.medium" # set based on architecture when we add arm tests - - def __init__(self, ami_id): - super().__init__() - self._ssh_port = 22 - self._ami_id = ami_id - self._ec2_instance = None - self._ec2_security_group = None - self._ec2_resource = boto3.resource("ec2", region_name=AWS_REGION) - - def start(self): - if self.running(): - return - sec_group_ids = [] - if not self._ec2_security_group: - self._set_ssh_security_group() - sec_group_ids = [self._ec2_security_group.id] - try: - self._log(f"Creating ec2 instance from {self._ami_id}") - instances = self._ec2_resource.create_instances( - ImageId=self._ami_id, - InstanceType=self._instance_type, - SecurityGroupIds=sec_group_ids, - MinCount=1, MaxCount=1 - ) - self._ec2_instance = instances[0] - self._log(f"Waiting for instance {self._ec2_instance.id} to start") - self._ec2_instance.wait_until_running() - self._ec2_instance.reload() # make sure the instance info is up to date - self._address = self._ec2_instance.public_ip_address - self._log(f"Instance is running at {self._address}") - self.wait_ssh_ready() - self._log("SSH is ready") - except ClientError as err: - err_code = err.response["Error"]["Code"] - err_msg = err.response["Error"]["Message"] - self._log(f"Couldn't create instance with image {self._ami_id} and type {self._instance_type}.") - self._log(f"Error {err_code}: {err_msg}") - raise - - def _set_ssh_security_group(self): - group_name = f"bootc-image-builder-test-{str(uuid.uuid4())}" - group_desc = "bootc-image-builder test security group: SSH rule" - try: - self._log(f"Creating security group {group_name}") - self._ec2_security_group = self._ec2_resource.create_security_group(GroupName=group_name, - Description=group_desc) - ip_permissions = [ - { - "IpProtocol": "tcp", - "FromPort": self._ssh_port, - "ToPort": self._ssh_port, - "IpRanges": [{"CidrIp": "0.0.0.0/0"}], - } - ] - self._log(f"Authorizing inbound rule for {group_name} ({self._ec2_security_group})") - self._ec2_security_group.authorize_ingress(IpPermissions=ip_permissions) - self._log("Security group created") - except ClientError as err: - err_code = err.response["Error"]["Code"] - err_msg = err.response["Error"]["Message"] - self._log(f"Couldn't create security group {group_name} or authorize inbound rule.") - self._log(f"Error {err_code}: {err_msg}") - raise - - def force_stop(self): - if self._ec2_instance: - self._log(f"Terminating instance {self._ec2_instance.id}") - try: - self._ec2_instance.terminate() - self._ec2_instance.wait_until_terminated() - self._ec2_instance = None - self._address = None - except ClientError as err: - err_code = err.response["Error"]["Code"] - err_msg = err.response["Error"]["Message"] - self._log(f"Couldn't terminate instance {self._ec2_instance.id}.") - self._log(f"Error {err_code}: {err_msg}") - else: - self._log("No EC2 instance defined. Skipping termination.") - - if self._ec2_security_group: - self._log(f"Deleting security group {self._ec2_security_group.id}") - try: - self._ec2_security_group.delete() - self._ec2_security_group = None - except ClientError as err: - err_code = err.response["Error"]["Code"] - err_msg = err.response["Error"]["Message"] - self._log(f"Couldn't delete security group {self._ec2_security_group.id}.") - self._log(f"Error {err_code}: {err_msg}") - else: - self._log("No security group defined. Skipping deletion.") - - def running(self): - return self._ec2_instance is not None From 285f4117d37d405c723e497059f0e8257ae799e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Nov 2025 04:08:10 +0000 Subject: [PATCH 237/279] build(deps): bump golangci/golangci-lint-action from 8 to 9 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 8 to 9. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v8...v9) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-version: '9' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 63b336d1..8707d24a 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -46,7 +46,7 @@ jobs: run: echo "GOLANGCI_LINT_VERSION=$(awk -F '=' '/^GOLANGCI_LINT_VERSION *=/{print $2}' Makefile)" >> "$GITHUB_OUTPUT" - name: Run golangci-lint - uses: golangci/golangci-lint-action@v8 + uses: golangci/golangci-lint-action@v9 with: version: ${{ steps.golangci_lint_version.outputs.GOLANGCI_LINT_VERSION }} args: --timeout 5m0s From 981f549cdbe304c6c44d855233a828ac8ac30edb Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Sat, 8 Nov 2025 09:43:17 +0100 Subject: [PATCH 238/279] pyproject: add missing depedencies Now that this is used elsewhere to import vmtest we need to add the vmtest depdencies here. --- .github.com/workflows/bibtests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 8707d24a..d4f7607c 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -133,6 +133,7 @@ jobs: run: | # make sure test deps are available for root sudo -E pip install --user -r test/requirements.txt + sudo -E pip install --user . - name: Workarounds for GH runner diskspace run: | # use custom basetemp here because /var/tmp is on a smaller disk From e54d3f981a4d954fb8df1ccb4b4f83737b823721 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 04:07:33 +0000 Subject: [PATCH 239/279] build(deps): bump actions/setup-python from 5 to 6 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5 to 6. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/setup-python dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index d4f7607c..6c93da86 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -100,7 +100,7 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} - name: Setup up python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 - name: Apt update run: sudo apt update - name: Install test dependencies From 5900624dbbe5205f981e336ffe0e01ba52101546 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Thu, 13 Nov 2025 10:16:10 +0100 Subject: [PATCH 240/279] main: show validation warnings by default This integrates the new checks for the blueprint options that we have in the bootc image type. Show them to stderr as warnings by default to not break existing workflows (in ibcli they are a hard error unless --ignore-warnings is given). --- test/bib/test_manifest.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test/bib/test_manifest.py b/test/bib/test_manifest.py index e523410d..fee8acaa 100644 --- a/test/bib/test_manifest.py +++ b/test/bib/test_manifest.py @@ -1080,3 +1080,23 @@ def test_ova_manifest_smoke(build_container, tc): "image.vmdk" ] } + + +def test_manifest_warns_on_unsupported(tmp_path, build_container): + # no need to parameterize this test, toml is the same for all containers + container_ref = "quay.io/centos-bootc/centos-bootc:stream9" + testutil.pull_container(container_ref) + + config_toml_path = tmp_path / "config.toml" + config_toml_path.write_text(textwrap.dedent("""\ + [[customizations.repositories]] + id = "foo" + """)) + res = subprocess.run([ + *testutil.podman_run_common, + "-v", f"{config_toml_path}:/config.toml:ro", + build_container, + "manifest", f"{container_ref}", + ], check=True, capture_output=True, text=True) + assert ('blueprint validation failed for image type "qcow2": ' + 'customizations.repositories: not supported' in res.stderr) From bbeb5379762db1c8e4aa1b1ce35efb891cca7165 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 18 Nov 2025 18:26:13 +0100 Subject: [PATCH 241/279] vm: drop paramiko And replace with ssh/scp binaries. This is probably more robust than paramiko and more "standard". As a side effect we do no longer request an explicit tty. We could force that with "-t" in ssh but we did that with paramiko and that caused issues that e.g. "systemctl" would detect a tty and go into pager mode. So lets start without and we can always add "-t" to our ssh invocation to force it. --- .github.com/workflows/bibtests.yaml | 2 +- test/bib/test_build_iso.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 6c93da86..09aeed9e 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -106,7 +106,7 @@ jobs: - name: Install test dependencies run: | sudo apt update - sudo apt install -y python3-pytest python3-paramiko python3-boto3 flake8 pylint libosinfo-bin squashfs-tools + sudo apt install -y python3-pytest python3-boto3 flake8 pylint libosinfo-bin squashfs-tools sshpass - name: Diskspace (before) run: | df -h diff --git a/test/bib/test_build_iso.py b/test/bib/test_build_iso.py index 7142bdca..1293bd50 100644 --- a/test/bib/test_build_iso.py +++ b/test/bib/test_build_iso.py @@ -202,4 +202,4 @@ def test_bootc_installer_iso_installs(tmp_path, build_container, container_ref): assert exit_status == 0 exit_status, output = vm.run("bootc status", user="root", keyfile=ssh_keyfile_private_path) assert exit_status == 0 - assert f"Booted image: {container_ref}" in output + assert f"image: {container_ref}" in output From e83b61962ea5eb017693c263c3c114c1b36d1b3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 04:09:05 +0000 Subject: [PATCH 242/279] build(deps): bump actions/checkout from 5 to 6 Bumps [actions/checkout](https://github.com/actions/checkout) from 5 to 6. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github.com/workflows/bibtests.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index 09aeed9e..c2038ad0 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -30,7 +30,7 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: ref: ${{ github.event.pull_request.head.sha }} @@ -59,7 +59,7 @@ jobs: name: "🐚 Shellcheck" runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: ref: ${{ github.event.pull_request.head.sha }} - name: Run ShellCheck @@ -77,7 +77,7 @@ jobs: test_files: ${{ steps.collect.outputs.test_files }} steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: ref: ${{ github.event.pull_request.head.sha }} - name: Collect test files @@ -96,7 +96,7 @@ jobs: matrix: test_file: ${{ fromJson(needs.collect_tests.outputs.test_files) }} steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: ref: ${{ github.event.pull_request.head.sha }} - name: Setup up python From 6218aa670cd6f84d3dce3240f7d235038f8c13d9 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 21 Nov 2025 11:51:13 +0100 Subject: [PATCH 243/279] test: use vmtest from images Now that the "images" library contains the vmtest testing helpers we use that and remove ouuse that and remove our own copy.. --- .github.com/workflows/bibtests.yaml | 1 - test/bib/requirements.txt | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github.com/workflows/bibtests.yaml b/.github.com/workflows/bibtests.yaml index c2038ad0..77f553bb 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github.com/workflows/bibtests.yaml @@ -133,7 +133,6 @@ jobs: run: | # make sure test deps are available for root sudo -E pip install --user -r test/requirements.txt - sudo -E pip install --user . - name: Workarounds for GH runner diskspace run: | # use custom basetemp here because /var/tmp is on a smaller disk diff --git a/test/bib/requirements.txt b/test/bib/requirements.txt index 9be09ce7..5a58554d 100644 --- a/test/bib/requirements.txt +++ b/test/bib/requirements.txt @@ -1,6 +1,6 @@ pytest==7.4.3 flake8==6.1.0 -paramiko==2.12.0 boto3==1.33.13 qmp==1.1.0 pylint==3.2.5 +vmtest @ git+https://github.com/osbuild/images.git From abe7b061d9f57d5e516d16953283a37b7cec4a80 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 24 Nov 2025 12:38:17 +0100 Subject: [PATCH 244/279] test: port to improve vmtest.vm.run() code This ports the existing code to the new vmtest.vm.run() code as proposed in images PR#2036. --- test/bib/test_build_disk.py | 54 ++++++++++++++++--------------------- test/bib/test_build_iso.py | 11 +++----- 2 files changed, 27 insertions(+), 38 deletions(-) diff --git a/test/bib/test_build_disk.py b/test/bib/test_build_disk.py index 03f3b822..7672bd80 100644 --- a/test/bib/test_build_disk.py +++ b/test/bib/test_build_disk.py @@ -544,8 +544,8 @@ def test_build_container_works(image_type): def assert_kernel_args(test_vm, image_type): - exit_status, kcmdline = test_vm.run("cat /proc/cmdline", user=image_type.username, password=image_type.password) - assert exit_status == 0 + ret = test_vm.run(["cat", "/proc/cmdline"], user=image_type.username, password=image_type.password) + kcmdline = ret.stdout # the kernel arg string must have a space as the prefix and either a space # as suffix or be the last element of the kernel commandline assert re.search(f" {re.escape(image_type.kargs)}( |$)", kcmdline) @@ -560,18 +560,16 @@ def test_image_boots(image_type): def assert_disk_image_boots(image_type): with QEMU(image_type.img_path, arch=image_type.img_arch) as test_vm: # user/password login works - exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) - assert exit_status == 0 + test_vm.run("true", user=image_type.username, password=image_type.password) # root/ssh login also works - exit_status, output = test_vm.run("id", user="root", keyfile=image_type.ssh_keyfile_private_path) - assert exit_status == 0 - assert "uid=0" in output + ret = test_vm.run("id", user="root", keyfile=image_type.ssh_keyfile_private_path) + assert "uid=0" in ret.stdout # check generic image options assert_kernel_args(test_vm, image_type) # ensure bootc points to the right image - _, output = test_vm.run("bootc status", user="root", keyfile=image_type.ssh_keyfile_private_path) + ret = test_vm.run(["bootc", "status"], user="root", keyfile=image_type.ssh_keyfile_private_path) # XXX: read the fully yaml instead? - assert f"image: {image_type.container_ref}" in output + assert f"image: {image_type.container_ref}" in ret.stdout if image_type.disk_config: assert_disk_customizations(image_type, test_vm) @@ -579,12 +577,10 @@ def assert_disk_image_boots(image_type): assert_fs_customizations(image_type, test_vm) # check file/dir customizations - exit_status, output = test_vm.run("stat /etc/some-file", user=image_type.username, password=image_type.password) - assert exit_status == 0 - assert "File: /etc/some-file" in output - _, output = test_vm.run("stat /etc/some-dir", user=image_type.username, password=image_type.password) - assert exit_status == 0 - assert "File: /etc/some-dir" in output + ret = test_vm.run(["stat", "/etc/some-file"], user=image_type.username, password=image_type.password) + assert "File: /etc/some-file" in ret.stdout + ret = test_vm.run(["stat", "/etc/some-dir"], user=image_type.username, password=image_type.password) + assert "File: /etc/some-dir" in ret.stdout @pytest.mark.parametrize("image_type", gen_testcases("ami-boot"), indirect=["image_type"]) @@ -599,11 +595,9 @@ def test_ami_boots_in_aws(image_type, force_aws_upload): # 4.30 GiB / 10.00 GiB [------------>____________] 43.02% 58.04 MiB p/s assert "] 100.00%" in image_type.bib_output with AWS(image_type.metadata["ami_id"]) as test_vm: - exit_status, _ = test_vm.run("true", user=image_type.username, password=image_type.password) - assert exit_status == 0 - exit_status, output = test_vm.run("echo hello", user=image_type.username, password=image_type.password) - assert exit_status == 0 - assert "hello" in output + test_vm.run("true", user=image_type.username, password=image_type.password) + ret = test_vm.run(["echo", "hello"], user=image_type.username, password=image_type.password) + assert "hello" in ret.stdout def log_has_osbuild_selinux_denials(log): @@ -686,12 +680,11 @@ def assert_fs_customizations(image_type, test_vm): """ # check the minsize specified in the build configuration for each mountpoint against the sizes in the image # TODO: replace 'df' call with 'parted --json' and find the partition size for each mountpoint - exit_status, output = test_vm.run("df --all --output=target,size", user="root", - keyfile=image_type.ssh_keyfile_private_path) - assert exit_status == 0 + ret = test_vm.run(["df", "--all", "--output=target,size"], user="root", + keyfile=image_type.ssh_keyfile_private_path) # parse the output of 'df' to a mountpoint -> size dict for convenience mountpoint_sizes = {} - for line in output.splitlines()[1:]: + for line in ret.stdout.splitlines()[1:]: fields = line.split() # some filesystems to not report a size with --all if fields[1] == "-": @@ -712,13 +705,12 @@ def assert_fs_customizations(image_type, test_vm): def assert_disk_customizations(image_type, test_vm): - exit_status, output = test_vm.run("findmnt --json", user="root", - keyfile=image_type.ssh_keyfile_private_path) - assert exit_status == 0 - findmnt = json.loads(output) - exit_status, swapon_output = test_vm.run("swapon --show", user="root", - keyfile=image_type.ssh_keyfile_private_path) - assert exit_status == 0 + ret = test_vm.run(["findmnt", "--json"], user="root", + keyfile=image_type.ssh_keyfile_private_path) + findmnt = json.loads(ret.stdout) + swapon_ret = test_vm.run(["swapon", "--show"], user="root", + keyfile=image_type.ssh_keyfile_private_path) + swapon_output = swapon_ret.stdout if dc := image_type.disk_config: if dc == "lvm": mnts = [mnt for mnt in findmnt["filesystems"][0]["children"] diff --git a/test/bib/test_build_iso.py b/test/bib/test_build_iso.py index 1293bd50..8d93603a 100644 --- a/test/bib/test_build_iso.py +++ b/test/bib/test_build_iso.py @@ -39,8 +39,7 @@ def test_iso_installs(image_type): # boot test disk and do extremly simple check with QEMU(test_disk_path) as vm: vm.start(use_ovmf=True) - exit_status, _ = vm.run("true", user=image_type.username, password=image_type.password) - assert exit_status == 0 + vm.run("true", user=image_type.username, password=image_type.password) assert_kernel_args(vm, image_type) @@ -198,8 +197,6 @@ def test_bootc_installer_iso_installs(tmp_path, build_container, container_ref): # boot test disk and do extremly simple check with QEMU(test_disk_path) as vm: vm.start(use_ovmf=True) - exit_status, _ = vm.run("true", user=username, password=password) - assert exit_status == 0 - exit_status, output = vm.run("bootc status", user="root", keyfile=ssh_keyfile_private_path) - assert exit_status == 0 - assert f"image: {container_ref}" in output + vm.run("true", user=username, password=password) + ret = vm.run(["bootc", "status"], user="root", keyfile=ssh_keyfile_private_path) + assert f"image: {container_ref}" in ret.stdout From fba04acb9c8e36cb66f2f5d9b2d5b15375252adb Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 8 Dec 2025 11:52:24 +0100 Subject: [PATCH 245/279] workflow: adjust bib tests workflow --- {.github.com => .github}/workflows/bibtests.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) rename {.github.com => .github}/workflows/bibtests.yaml (97%) diff --git a/.github.com/workflows/bibtests.yaml b/.github/workflows/bibtests.yaml similarity index 97% rename from .github.com/workflows/bibtests.yaml rename to .github/workflows/bibtests.yaml index 77f553bb..bdd77b13 100644 --- a/.github.com/workflows/bibtests.yaml +++ b/.github/workflows/bibtests.yaml @@ -1,5 +1,5 @@ --- -name: Tests +name: Bib tests on: pull_request: @@ -12,7 +12,7 @@ on: merge_group: env: - GO_VERSION: 1.22 + GO_VERSION: 1.23 concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -83,12 +83,11 @@ jobs: - name: Collect test files id: collect run: | - TEST_FILES=$(ls test/test_*.py | sort) + TEST_FILES=$(ls test/bib/test_*.py | sort) JSON_FILES=$(echo "${TEST_FILES}" | jq -R | jq -cs ) echo "test_files=${JSON_FILES}" >> $GITHUB_OUTPUT integration: - # TODO: run this also via tmt/testing-farm name: "Integration" runs-on: ubuntu-24.04 needs: collect_tests From 61b755478be44532a70648ebe6e043655a50fc55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Fri, 10 Nov 2023 17:00:44 +0100 Subject: [PATCH 246/279] Initialize repository --- Containerfile.bib | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 Containerfile.bib diff --git a/Containerfile.bib b/Containerfile.bib new file mode 100644 index 00000000..f1df79bc --- /dev/null +++ b/Containerfile.bib @@ -0,0 +1,16 @@ +FROM registry.fedoraproject.org/fedora:39 AS builder +RUN dnf install -y git-core golang gpgme-devel libassuan-devel +COPY build.sh . +RUN ./build.sh + +FROM registry.fedoraproject.org/fedora:39 +RUN dnf install -y osbuild osbuild-ostree && dnf clean all +COPY --from=builder images/osbuild-deploy-container /usr/bin/osbuild-deploy-container +COPY entrypoint.sh / +COPY --from=builder images/dnf-json . + +ENTRYPOINT ["/entrypoint.sh"] +VOLUME /output +VOLUME /store +VOLUME /rpmmd + From 551f27035fe51d526111897d4b691d72e7ce3ec5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Thu, 23 Nov 2023 10:48:38 +0100 Subject: [PATCH 247/279] Make the container working on an SELinux-enforcing systems When building an image, we need to make sure that the target system is correctly labeled. This becomes challenging if the target system contains labels that are unknown to the host because the process setting the label needs to have CAP_MAC_ADMIN if the host is SELinux-enforcing. CAP_MAC_ADMIN isn't a common capability on a SELinux-enforcing system. Even unconfined_t doesn't have it (same for spc_t - label used by --privileged containers). Thus, we need to ensure that we transition to a domain that actually has it. This commit relabels osbuild as install_t, a domain that has CAP_MAP_ADMIN. A bit of mount-dancing is needed in order to achieve that, see prepare.sh. I decided to make prepare.sh a separate script. This is useful for debugging: host # podman run -it \ --privileged \ --security-opt label=type:unconfined_t \ --entrypoint bash \ localhost/osbuild-deploy-container container # ./prepare.sh This way, you get the same environment as if you run the container the default way. See https://github.com/osbuild/osbuild-deploy-container/issues/6#issuecomment-1822809036 and links in this comment for further information. --- Containerfile.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index f1df79bc..4f9610dd 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -6,7 +6,7 @@ FROM registry.fedoraproject.org/fedora:39 RUN dnf install -y osbuild osbuild-ostree && dnf clean all COPY --from=builder images/osbuild-deploy-container /usr/bin/osbuild-deploy-container -COPY entrypoint.sh / +COPY prepare.sh entrypoint.sh / COPY --from=builder images/dnf-json . ENTRYPOINT ["/entrypoint.sh"] From e4050016c34851818df08d05dfbaa2ee82c92730 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Sun, 26 Nov 2023 23:26:00 +0100 Subject: [PATCH 248/279] Containerfile: Install osbuild from COPR to pull a fix for the loop bug Builds are failing on certain systems on: AttributeError: 'Loop' object has no attribute 'fd' osbuild.host.RemoteError: FileNotFoundError: [Errno 2] No such file or directory: 'loop1' This happens because /dev in a container is a statically created tmpfs. Thus, when a new loop device is allocated, it doesn't appear in the container. A newer version of osbuild fixes that by always making sure that the loop device exists (by simply calling `mknod`). Pull the fix from COPR to unblock us, instead of waiting for the fix to flow into stable Fedora. `dnf copr enable` requires `dnf-plugins-core` that's not installed by default. Let's just use a static .repo file instead, it feels a tad bit simpler. --- Containerfile.bib | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Containerfile.bib b/Containerfile.bib index 4f9610dd..0b24b2c7 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -4,6 +4,11 @@ RUN ./build.sh FROM registry.fedoraproject.org/fedora:39 +# Install newer osbuild to fix the loop bug, see +# - https://github.com/osbuild/osbuild-deploy-container/issues/7 +# - https://github.com/osbuild/osbuild-deploy-container/issues/9 +# - https://github.com/osbuild/osbuild/pull/1468 +COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ RUN dnf install -y osbuild osbuild-ostree && dnf clean all COPY --from=builder images/osbuild-deploy-container /usr/bin/osbuild-deploy-container COPY prepare.sh entrypoint.sh / From 488ec97a826762e27d0783c5f2e7ee92385d28ec Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Fri, 24 Nov 2023 18:27:51 +0100 Subject: [PATCH 249/279] Containerfile: copy binary from new location --- Containerfile.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index 0b24b2c7..2fa0dad0 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -10,7 +10,7 @@ # - https://github.com/osbuild/osbuild/pull/1468 COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ RUN dnf install -y osbuild osbuild-ostree && dnf clean all -COPY --from=builder images/osbuild-deploy-container /usr/bin/osbuild-deploy-container +COPY --from=builder bin/osbuild-deploy-container /usr/bin/osbuild-deploy-container COPY prepare.sh entrypoint.sh / COPY --from=builder images/dnf-json . From 9277149600cabf933dd236bde58bfaa65782c7b4 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 27 Nov 2023 16:36:10 +0100 Subject: [PATCH 250/279] Containerfile: build executable from source in repo --- Containerfile.bib | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 2fa0dad0..9b38a7f4 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -1,6 +1,9 @@ FROM registry.fedoraproject.org/fedora:39 AS builder RUN dnf install -y git-core golang gpgme-devel libassuan-devel -COPY build.sh . +RUN mkdir /build +COPY build.sh /build +COPY odc /build/odc +WORKDIR /build RUN ./build.sh FROM registry.fedoraproject.org/fedora:39 @@ -10,7 +13,7 @@ # - https://github.com/osbuild/osbuild/pull/1468 COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ RUN dnf install -y osbuild osbuild-ostree && dnf clean all -COPY --from=builder bin/osbuild-deploy-container /usr/bin/osbuild-deploy-container +COPY --from=builder /build/bin/osbuild-deploy-container /usr/bin/osbuild-deploy-container COPY prepare.sh entrypoint.sh / COPY --from=builder images/dnf-json . From dbf9f5aaa7ccf4dbebd1a2dd0c29f60e7b0324a8 Mon Sep 17 00:00:00 2001 From: Achilleas Koutsou Date: Mon, 27 Nov 2023 17:10:48 +0100 Subject: [PATCH 251/279] Containerfile: install osbuild-depsolve-dnf for dnf-json The dnf depsolver, dnf-json, is now packages as osbuild-depsolve-dnf. Let's install it from the package instead of relying on the images repository. --- Containerfile.bib | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 9b38a7f4..c904ad65 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -12,10 +12,9 @@ # - https://github.com/osbuild/osbuild-deploy-container/issues/9 # - https://github.com/osbuild/osbuild/pull/1468 COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ -RUN dnf install -y osbuild osbuild-ostree && dnf clean all +RUN dnf install -y osbuild osbuild-ostree osbuild-depsolve-dnf && dnf clean all COPY --from=builder /build/bin/osbuild-deploy-container /usr/bin/osbuild-deploy-container COPY prepare.sh entrypoint.sh / -COPY --from=builder images/dnf-json . ENTRYPOINT ["/entrypoint.sh"] VOLUME /output From adb379978d75a2364df1a622cf86ecdc9439a82c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Tue, 5 Dec 2023 00:00:18 +0100 Subject: [PATCH 252/279] rename osbuild-deploy-container to bootc-image-builder We want to emphasize the fact that this tool is meant for bootc-enabled container and that it's built on top of the Image Builder technology. Thus, we decided to rename this project to bootc-image-builder. --- Containerfile.bib | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index c904ad65..eba5a13d 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -8,12 +8,12 @@ FROM registry.fedoraproject.org/fedora:39 # Install newer osbuild to fix the loop bug, see -# - https://github.com/osbuild/osbuild-deploy-container/issues/7 -# - https://github.com/osbuild/osbuild-deploy-container/issues/9 +# - https://github.com/osbuild/bootc-image-builder/issues/7 +# - https://github.com/osbuild/bootc-image-builder/issues/9 # - https://github.com/osbuild/osbuild/pull/1468 COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ RUN dnf install -y osbuild osbuild-ostree osbuild-depsolve-dnf && dnf clean all -COPY --from=builder /build/bin/osbuild-deploy-container /usr/bin/osbuild-deploy-container +COPY --from=builder /build/bin/bootc-image-builder /usr/bin/bootc-image-builder COPY prepare.sh entrypoint.sh / ENTRYPOINT ["/entrypoint.sh"] From 4d5efaf47f5744d8ce3d6d17193d399900bab8bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Tue, 5 Dec 2023 00:16:49 +0100 Subject: [PATCH 253/279] rename odc to bib Following the change from the last commit, let's also use the new abbreviation "bib" instead of "odc". --- Containerfile.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index eba5a13d..4b4c134e 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -2,7 +2,7 @@ RUN dnf install -y git-core golang gpgme-devel libassuan-devel RUN mkdir /build COPY build.sh /build -COPY odc /build/odc +COPY bib /build/bib WORKDIR /build RUN ./build.sh From b7395da4728779693e66dea0ab01f8944b36689b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Fri, 15 Dec 2023 16:48:14 +0100 Subject: [PATCH 254/279] Containerfile: Enable basic caching of go deps If we firstly copy go.mod and go.sum in and call go mod download, we get a cached layer with the go dependencies. This is useful for developers, because you only need to redownload the dependencies if go.mod or go.sum actually change. --- Containerfile.bib | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 4b4c134e..3d801536 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -1,6 +1,7 @@ FROM registry.fedoraproject.org/fedora:39 AS builder -RUN dnf install -y git-core golang gpgme-devel libassuan-devel -RUN mkdir /build +RUN dnf install -y git-core golang gpgme-devel libassuan-devel && mkdir -p /build/bib +COPY bib/go.mod bib/go.sum /build/bib +RUN cd /build/bib && go mod download COPY build.sh /build COPY bib /build/bib WORKDIR /build From 18765247dac07be3d0968d68a24fd0b294857e54 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 5 Jan 2024 17:01:04 -0500 Subject: [PATCH 255/279] Move `prepare.sh` into Go code It's just ugly to have bits of this in shell script in this way; it's not complex code and doing it in Go helps keep the development environment simpler. Signed-off-by: Colin Walters --- Containerfile.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index 3d801536..a8e21181 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -15,7 +15,7 @@ COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ RUN dnf install -y osbuild osbuild-ostree osbuild-depsolve-dnf && dnf clean all COPY --from=builder /build/bin/bootc-image-builder /usr/bin/bootc-image-builder -COPY prepare.sh entrypoint.sh / +COPY entrypoint.sh / ENTRYPOINT ["/entrypoint.sh"] VOLUME /output From e44d969935c6ee73c8d249f81b163573b991e2f3 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 3 Jan 2024 16:57:53 +0100 Subject: [PATCH 256/279] bib: use container based buildroot when building bootc disk images Instead of using the trandition rpm based buildroot this commit switches the buildroot to the target bootc container. The rational is that the low-level tooling like mkfs etc should match the target system. --- Containerfile.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index a8e21181..a0fdf23d 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -13,7 +13,7 @@ # - https://github.com/osbuild/bootc-image-builder/issues/9 # - https://github.com/osbuild/osbuild/pull/1468 COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ -RUN dnf install -y osbuild osbuild-ostree osbuild-depsolve-dnf && dnf clean all +RUN dnf install -y osbuild osbuild-ostree osbuild-depsolve-dnf podman qemu-img && dnf clean all COPY --from=builder /build/bin/bootc-image-builder /usr/bin/bootc-image-builder COPY entrypoint.sh / From 1b617e6389160ba62187139fb61b39817bc38412 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Feb 2024 16:04:41 -0500 Subject: [PATCH 257/279] Add selinux-policy-targeted and distribution-gpg-keys This is part of reusing the bib container to build base images; rpm-ostree wants to have *a* policy to bootstrap from currently. For distribution-gpg-keys, it allows `.repo` files to reference those unconditionally. Also, reuse a "make package lists declarative" model I've used in other places. It'd be nice to try to semi-standardize this of course, or even better upstream it into dnf. We're looking at adding a different Containerfile in https://github.com/osbuild/bootc-image-builder/pull/174 and this way it can source the same requirements. --- Containerfile.bib | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index a0fdf23d..09907746 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -13,7 +13,8 @@ # - https://github.com/osbuild/bootc-image-builder/issues/9 # - https://github.com/osbuild/osbuild/pull/1468 COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ -RUN dnf install -y osbuild osbuild-ostree osbuild-depsolve-dnf podman qemu-img && dnf clean all +COPY ./package-requires.txt . +RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && dnf clean all COPY --from=builder /build/bin/bootc-image-builder /usr/bin/bootc-image-builder COPY entrypoint.sh / From 31f9147402bfeb6d815a11d54b8d53231594e09c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 9 Feb 2024 11:52:40 -0500 Subject: [PATCH 258/279] Drain logic from entrypoint into Go code This is prep for adding multiple entrypoints; right now the Go code has a `build` verb that is always injected by this wrapper. Move the default values into the Go code. A future change will likely make things so that users are required to pass `build` (or perhaps `build-disk`) and this shell script goes away entirely. --- Containerfile.bib | 1 + 1 file changed, 1 insertion(+) diff --git a/Containerfile.bib b/Containerfile.bib index 09907746..9490b1f4 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -20,6 +20,7 @@ ENTRYPOINT ["/entrypoint.sh"] VOLUME /output +WORKDIR /output VOLUME /store VOLUME /rpmmd From d613e1c29f6c98eda2786cd0af47f0d2bdf14e2a Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Wed, 28 Feb 2024 14:23:05 +0000 Subject: [PATCH 259/279] build: remove build flags Remove some of the build tags since some of these features are now needed for local containers, such as `devicemapper` and `overlay` --- Containerfile.bib | 1 - 1 file changed, 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index 9490b1f4..bea48230 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -23,4 +23,3 @@ WORKDIR /output VOLUME /store VOLUME /rpmmd - From 850ebe933a132b4b2155239d5856f649786c9dd2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 15 Mar 2024 19:32:49 -0400 Subject: [PATCH 260/279] build: fast-track newer rpm-ostree To pick up the recent opt-usrlocal change. --- Containerfile.bib | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index bea48230..4b922356 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -14,7 +14,9 @@ # - https://github.com/osbuild/osbuild/pull/1468 COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ COPY ./package-requires.txt . -RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && dnf clean all +RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && \ + dnf -y upgrade https://kojipkgs.fedoraproject.org//packages/rpm-ostree/2024.4/3.fc39/$(arch)/rpm-ostree-{,libs-}2024.4-3.fc39.$(arch).rpm \ +&& dnf clean all COPY --from=builder /build/bin/bootc-image-builder /usr/bin/bootc-image-builder COPY entrypoint.sh / From b17bb0d120efc851f2cbae6173752cd89910fa5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Tue, 19 Mar 2024 17:58:32 +0100 Subject: [PATCH 261/279] bib: make /var/lib/containers/storage a volume 6e965d2413f7bdc8844bf09c9c9968c6c170d96d changed bib to always go through a container storage. However, if the container runtime running bib was using overlayfs (the default), we ended up in a situation that /var/lib/containers/storage was overlayfs. When bib itself tried to worked with these containers, it called podman/skopeo that tried to create overlayfs inside overlayfs, which is a forbidden combination. This worked by accident with centos-boocs/fedora and centos containers, because they contain fuse-overlayfs, and podman/skopeo apparently silently switched to using fuse. This isn't only slow, but it also didn't work with container images without fuse-overlayfs. One example of such an image is the iot-bootable-container. E.g. registry.gitlab.com/redhat/services/products/image-builder/ci/images/iot-bootable-container:fedora-39-x86_64-49d623cc26287730f87d4c9eebadefd2b180dea8a41b00efe7f3b1c636f221d7 Let's fix this by making /var/lib/containers/storage a proper volume, which means that this directory will be bind-mounted to a real filesystem by the container engine, instead of being on an overlay. I tested this by successfully building a disk image from the iot-bootable-container container image above. --- Containerfile.bib | 1 + 1 file changed, 1 insertion(+) diff --git a/Containerfile.bib b/Containerfile.bib index 4b922356..abe872af 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -25,3 +25,4 @@ WORKDIR /output VOLUME /store VOLUME /rpmmd +VOLUME /var/lib/containers/storage From 6e1f7fc9776021374feef982a0069988e96fd857 Mon Sep 17 00:00:00 2001 From: John Eckersberg Date: Fri, 22 Mar 2024 15:37:54 -0400 Subject: [PATCH 262/279] Add additional metadata labels as required downstream Signed-off-by: John Eckersberg --- Containerfile.bib | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Containerfile.bib b/Containerfile.bib index abe872af..11f4eaf5 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -26,3 +26,9 @@ VOLUME /store VOLUME /rpmmd VOLUME /var/lib/containers/storage + +LABEL description="This tools allows to build and deploy disk-images from bootc container inputs." +LABEL io.k8s.description="This tools allows to build and deploy disk-images from bootc container inputs." +LABEL io.k8s.display-name="Bootc Image Builder" +LABEL io.openshift.tags="base fedora39" +LABEL summary="A container to create disk-images from bootc container inputs" From 6e602a45f10f4bd96cee3b345515a43877d33c4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20Sch=C3=BCller?= Date: Tue, 2 Apr 2024 16:32:37 +0200 Subject: [PATCH 263/279] Containerfile: Use GOPROXY to avoid CI problems HMS-3099 Avoid CI problems with rate-limits and might also speed up the tests --- Containerfile.bib | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Containerfile.bib b/Containerfile.bib index 11f4eaf5..833f3045 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -1,6 +1,8 @@ FROM registry.fedoraproject.org/fedora:39 AS builder RUN dnf install -y git-core golang gpgme-devel libassuan-devel && mkdir -p /build/bib COPY bib/go.mod bib/go.sum /build/bib +ARG GOPROXY=https://proxy.golang.org,direct +RUN go env -w GOPROXY=$GOPROXY RUN cd /build/bib && go mod download COPY build.sh /build COPY bib /build/bib From 19f3178a38943a58acd711b4713897b6f70a6471 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Mon, 15 Apr 2024 09:55:09 +0200 Subject: [PATCH 264/279] Containerfile: remove outdated rpm-ostree upgrade I'm getting the following error, so this is no longer needed: The same or higher version of rpm-ostree is already installed, cannot update it. --- Containerfile.bib | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 833f3045..38b383f4 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -16,9 +16,7 @@ # - https://github.com/osbuild/osbuild/pull/1468 COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ COPY ./package-requires.txt . -RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && \ - dnf -y upgrade https://kojipkgs.fedoraproject.org//packages/rpm-ostree/2024.4/3.fc39/$(arch)/rpm-ostree-{,libs-}2024.4-3.fc39.$(arch).rpm \ -&& dnf clean all +RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && dnf clean all COPY --from=builder /build/bin/bootc-image-builder /usr/bin/bootc-image-builder COPY entrypoint.sh / From c68801d81a11003e205e27b4e83158c6895f84d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Wed, 17 Apr 2024 15:25:35 +0200 Subject: [PATCH 265/279] bib: factor out the ISO packages into YAML definitions --- Containerfile.bib | 1 + 1 file changed, 1 insertion(+) diff --git a/Containerfile.bib b/Containerfile.bib index 38b383f4..1db52bb4 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -19,6 +19,7 @@ RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && dnf clean all COPY --from=builder /build/bin/bootc-image-builder /usr/bin/bootc-image-builder COPY entrypoint.sh / +COPY bib/data /usr/share/bootc-image-builder ENTRYPOINT ["/entrypoint.sh"] VOLUME /output From 624ae8f8467d968dc976e0f0a1dbadd8ff0c59f7 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 8 Apr 2024 15:17:38 +0200 Subject: [PATCH 266/279] bib: detect missing qemu-user early This commit checks early if cross architecture building support via `qemu-user-static` (or similar tooling) is missing and errors in a more user friendly way. Note that there is no integration test right now because testing this for real requires mutating the very global state of `echo 0 > /proc/sys/fs/binfmt_misc/qemu-aarch64` which would make the test non-parallelizable and even risks failing other cross-arch tests running on the same host (because binfmt-misc is not namespaced (yet)). --- Containerfile.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index 1db52bb4..e42d5deb 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -17,7 +17,7 @@ COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ COPY ./package-requires.txt . RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && dnf clean all -COPY --from=builder /build/bin/bootc-image-builder /usr/bin/bootc-image-builder +COPY --from=builder /build/bin/* /usr/bin/ COPY entrypoint.sh / COPY bib/data /usr/share/bootc-image-builder From da3d8d6b43f09813f2ab34e4b21617b03100ba7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Thu, 2 May 2024 12:17:01 +0200 Subject: [PATCH 267/279] many: update to Fedora 40 Fedora 40 is out, let's switch to it. Note that we are still fast- tracking osbuild from the "unpinned" COPR, but let's figure that out in a follow-up. Baby steps! --- Containerfile.bib | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index e42d5deb..44f2c27b 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/fedora:39 AS builder +FROM registry.fedoraproject.org/fedora:40 AS builder RUN dnf install -y git-core golang gpgme-devel libassuan-devel && mkdir -p /build/bib COPY bib/go.mod bib/go.sum /build/bib ARG GOPROXY=https://proxy.golang.org,direct @@ -9,12 +9,9 @@ WORKDIR /build RUN ./build.sh -FROM registry.fedoraproject.org/fedora:39 -# Install newer osbuild to fix the loop bug, see -# - https://github.com/osbuild/bootc-image-builder/issues/7 -# - https://github.com/osbuild/bootc-image-builder/issues/9 -# - https://github.com/osbuild/osbuild/pull/1468 -COPY ./group_osbuild-osbuild-fedora-39.repo /etc/yum.repos.d/ +FROM registry.fedoraproject.org/fedora:40 +# Fast-track osbuild so we don't depend on the "slow" Fedora release process to implement new features in bib +COPY ./group_osbuild-osbuild-fedora.repo /etc/yum.repos.d/ COPY ./package-requires.txt . RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && dnf clean all COPY --from=builder /build/bin/* /usr/bin/ @@ -31,5 +28,5 @@ LABEL description="This tools allows to build and deploy disk-images from bootc container inputs." LABEL io.k8s.description="This tools allows to build and deploy disk-images from bootc container inputs." LABEL io.k8s.display-name="Bootc Image Builder" -LABEL io.openshift.tags="base fedora39" +LABEL io.openshift.tags="base fedora40" LABEL summary="A container to create disk-images from bootc container inputs" From a54d8ff2b291817be0b975341a630e9b87e4ef76 Mon Sep 17 00:00:00 2001 From: Manjunath Kumatagi Date: Sat, 25 May 2024 14:28:38 +0530 Subject: [PATCH 268/279] Fix the linting error --- Containerfile.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index 44f2c27b..68a82973 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -1,6 +1,6 @@ FROM registry.fedoraproject.org/fedora:40 AS builder RUN dnf install -y git-core golang gpgme-devel libassuan-devel && mkdir -p /build/bib -COPY bib/go.mod bib/go.sum /build/bib +COPY bib/go.mod bib/go.sum /build/bib/ ARG GOPROXY=https://proxy.golang.org,direct RUN go env -w GOPROXY=$GOPROXY RUN cd /build/bib && go mod download From f27c1e8f53d12394789f5c92dfa840f478ee76de Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 1 Jul 2024 11:41:15 +0200 Subject: [PATCH 269/279] build: include git revision in main "bootc-image-binary" For easier tracing of what revision was used by who this commit adds the git revision to the main `bootc-image-builder` binary. To do this we need to include the git tree in the build container, go will do the rest and pick up the build info from the git tree, see https://shibumi.dev/posts/go-18-feature/ --- Containerfile.bib | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Containerfile.bib b/Containerfile.bib index 68a82973..81bb30b4 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -6,6 +6,8 @@ RUN cd /build/bib && go mod download COPY build.sh /build COPY bib /build/bib +# the ".git" dir will allow go build to automatically include build info +COPY .git /build/.git WORKDIR /build RUN ./build.sh From ee7d2bd516dd2afe541654aa3a79a9e59455167d Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 5 Aug 2024 17:22:48 +0200 Subject: [PATCH 270/279] Containerfile: copy the entire "." dir into /build When `tmt` runs it will not include the `.git` dir when it prepares the source. This was breaking the tmt tests. This commit just makes the containerfile copy the entire current working dir so that we get `.git` if it's there or it is skipped otherwise. Closes: https://github.com/osbuild/bootc-image-builder/issues/583 --- Containerfile.bib | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 81bb30b4..5b9fb3d4 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -4,10 +4,9 @@ ARG GOPROXY=https://proxy.golang.org,direct RUN go env -w GOPROXY=$GOPROXY RUN cd /build/bib && go mod download -COPY build.sh /build -COPY bib /build/bib -# the ".git" dir will allow go build to automatically include build info -COPY .git /build/.git +# Copy the entire dir to avoid having to conditionally include ".git" as that +# will not be available when tests are run under tmt +COPY . /build WORKDIR /build RUN ./build.sh From 3bf9bc951b2b5012d9214fca7da5c5b7b3c8f20a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Budai?= Date: Wed, 14 Aug 2024 13:17:43 +0200 Subject: [PATCH 271/279] bib: make build the default subcommand Prior this commit, the bootc-image-builder container image had a custom entrypoint that hardcoded the use of the build subcommand. This meant that if a user wanted to use a different subcommand, they had to overwrite the entrypoint. This commit changes the cobra code in bib to fallback to build if no subcommand was given. This is slighly ugly, but it allows us to remove the custom entrypoint, streamlining the use of subcommands. Let's see an example of calling the version subcommand: Before: podman run --rm -it --entrypoint=/usr/bin/bootc-image-builder \ quay.io/centos-bootc/bootc-image-builder:latest version After: sudo podman run --rm -it \ quay.io/centos-bootc/bootc-image-builder:latest version Kudos to https://github.com/IKukhta for his code from https://github.com/spf13/cobra/issues/823#issuecomment-870027246 --- Containerfile.bib | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 5b9fb3d4..8e0d35c8 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -16,10 +16,9 @@ COPY ./package-requires.txt . RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && dnf clean all COPY --from=builder /build/bin/* /usr/bin/ -COPY entrypoint.sh / COPY bib/data /usr/share/bootc-image-builder -ENTRYPOINT ["/entrypoint.sh"] +ENTRYPOINT ["/usr/bin/bootc-image-builder"] VOLUME /output WORKDIR /output VOLUME /store From 6e813886a75d46c274732f1516ac0e1b01c63727 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 6 Jan 2025 17:33:53 -0500 Subject: [PATCH 272/279] build-sys: Bump to Fedora 41 Fedora N-1 (40 now) is stable, but will eventually get stale. In my case we happen to be using this image as a "builder" image and I added a new feature to rpm-ostree, which I didn't ship to F40. I may still do so. However it's clearly a good idea for us to keep updated. This all said, it's actually not clear to me that Fedora is the right default base image - it may make sense to target e.g. c10s or c9s? This all relates to https://gitlab.com/fedora/bootc/tracker/-/issues/2 as well. Signed-off-by: Colin Walters --- Containerfile.bib | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 8e0d35c8..26bd03fd 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/fedora:40 AS builder +FROM registry.fedoraproject.org/fedora:41 AS builder RUN dnf install -y git-core golang gpgme-devel libassuan-devel && mkdir -p /build/bib COPY bib/go.mod bib/go.sum /build/bib/ ARG GOPROXY=https://proxy.golang.org,direct @@ -10,7 +10,7 @@ WORKDIR /build RUN ./build.sh -FROM registry.fedoraproject.org/fedora:40 +FROM registry.fedoraproject.org/fedora:41 # Fast-track osbuild so we don't depend on the "slow" Fedora release process to implement new features in bib COPY ./group_osbuild-osbuild-fedora.repo /etc/yum.repos.d/ COPY ./package-requires.txt . From 6438c41028bba6a64513157703e0fc44b5c70bea Mon Sep 17 00:00:00 2001 From: Simon de Vlieger Date: Wed, 7 May 2025 12:29:25 +0200 Subject: [PATCH 273/279] container: fedora 42 Let's base our upstream container on Fedora 42 now that it is GA. Signed-off-by: Simon de Vlieger --- Containerfile.bib | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 26bd03fd..b8c806c0 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/fedora:41 AS builder +FROM registry.fedoraproject.org/fedora:42 AS builder RUN dnf install -y git-core golang gpgme-devel libassuan-devel && mkdir -p /build/bib COPY bib/go.mod bib/go.sum /build/bib/ ARG GOPROXY=https://proxy.golang.org,direct @@ -10,7 +10,7 @@ WORKDIR /build RUN ./build.sh -FROM registry.fedoraproject.org/fedora:41 +FROM registry.fedoraproject.org/fedora:42 # Fast-track osbuild so we don't depend on the "slow" Fedora release process to implement new features in bib COPY ./group_osbuild-osbuild-fedora.repo /etc/yum.repos.d/ COPY ./package-requires.txt . From a2fe94264ad5ae25f66561871c4923dedb6d4095 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 4 Jul 2025 12:37:51 +0200 Subject: [PATCH 274/279] Containerfile: fix io.openshift.tags Trivial drive-by to update LABEL io.openshift.tags="base fedora40" -> 42 --- Containerfile.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Containerfile.bib b/Containerfile.bib index b8c806c0..00b86030 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -28,5 +28,5 @@ LABEL description="This tools allows to build and deploy disk-images from bootc container inputs." LABEL io.k8s.description="This tools allows to build and deploy disk-images from bootc container inputs." LABEL io.k8s.display-name="Bootc Image Builder" -LABEL io.openshift.tags="base fedora40" +LABEL io.openshift.tags="base fedora42" LABEL summary="A container to create disk-images from bootc container inputs" From 5b9bb110053dfbb76cabd230690107d7cdd701bc Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Mon, 8 Dec 2025 13:42:17 +0100 Subject: [PATCH 275/279] test: build bib tests using bib container --- test/bib/containerbuild.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bib/containerbuild.py b/test/bib/containerbuild.py index 76fda8ba..02d5d1d2 100644 --- a/test/bib/containerbuild.py +++ b/test/bib/containerbuild.py @@ -43,7 +43,7 @@ def build_container_fixture(): subprocess.check_call([ "podman", "build", "--cache-ttl=1h", - "-f", "Containerfile", + "-f", "Containerfile.bib", "-t", container_tag, ]) return container_tag From fa962bd05b07e210ffe61874c9540b6438ecbb53 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 9 Dec 2025 10:17:05 +0100 Subject: [PATCH 276/279] bib: detect missing qemu-user early This commit checks early if cross architecture building support via `qemu-user-static` (or similar tooling) is missing and errors in a more user friendly way. Note that there is no integration test right now because testing this for real requires mutating the very global state of `echo 0 > /proc/sys/fs/binfmt_misc/qemu-aarch64` which would make the test non-parallelizable and even risks failing other cross-arch tests running on the same host (because binfmt-misc is not namespaced (yet)). --- cmd/cross-arch/canary.go | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 cmd/cross-arch/canary.go diff --git a/cmd/cross-arch/canary.go b/cmd/cross-arch/canary.go new file mode 100644 index 00000000..07a48394 --- /dev/null +++ b/cmd/cross-arch/canary.go @@ -0,0 +1,5 @@ +package main + +func main() { + println("ok") +} From 01872de7c7a832c74ed6eefcfaa2579429846691 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Tue, 9 Dec 2025 11:04:01 +0100 Subject: [PATCH 277/279] Containerfile.bib: adjust for the ibcli environment Adjust the bib containerfile to the environment it lives in in ibcli. This is not ideal as we now have drift between ibcli/bib version of the Containerfile. As much of this should be merged back into bib so that ideally we would just copy the files and even have a test that errors if there is drift. --- Containerfile.bib | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/Containerfile.bib b/Containerfile.bib index 00b86030..bd155188 100644 --- a/Containerfile.bib +++ b/Containerfile.bib @@ -1,6 +1,6 @@ -FROM registry.fedoraproject.org/fedora:42 AS builder -RUN dnf install -y git-core golang gpgme-devel libassuan-devel && mkdir -p /build/bib -COPY bib/go.mod bib/go.sum /build/bib/ +FROM registry.fedoraproject.org/fedora:43 AS builder +RUN dnf install -y git-core golang gpgme-devel libassuan-devel libvirt-devel && mkdir -p /build/bib +COPY go.mod go.sum /build/bib/ ARG GOPROXY=https://proxy.golang.org,direct RUN go env -w GOPROXY=$GOPROXY RUN cd /build/bib && go mod download @@ -8,15 +8,29 @@ # will not be available when tests are run under tmt COPY . /build WORKDIR /build -RUN ./build.sh +# keep in sync with: +# https://github.com/containers/podman/blob/2981262215f563461d449b9841741339f4d9a894/Makefile#L51 +# disable cgo as +# a) gcc crashes on fedora41/arm64 regularly +# b) we don't really need it +RUN --mount=type=cache,target=/root/.cache/go-build \ + go build -tags "containers_image_openpgp exclude_graphdriver_btrfs exclude_graphdriver_devicemapper" ./cmd/image-builder +RUN --mount=type=cache,target=/root/.cache/go-build \ + for arch in amd64 arm64; do \ + [ "$arch" = "$(go env GOARCH)" ] && continue; \ + GOARCH="$arch" go build -ldflags="-s -w" -o ../bin/bib-canary-"$arch" ./cmd/cross-arch/; \ + done -FROM registry.fedoraproject.org/fedora:42 + +FROM registry.fedoraproject.org/fedora:43 # Fast-track osbuild so we don't depend on the "slow" Fedora release process to implement new features in bib -COPY ./group_osbuild-osbuild-fedora.repo /etc/yum.repos.d/ -COPY ./package-requires.txt . -RUN grep -vE '^#' package-requires.txt | xargs dnf install -y && rm -f package-requires.txt && dnf clean all -COPY --from=builder /build/bin/* /usr/bin/ -COPY bib/data /usr/share/bootc-image-builder +RUN dnf install -y dnf-plugins-core \ + && dnf copr enable -y @osbuild/osbuild \ + && dnf install -y libxcrypt-compat wget osbuild osbuild-ostree osbuild-depsolve-dnf osbuild-lvm2 openssl subscription-manager libvirt-libs \ + && dnf clean all + +# copy as bootc-image-builder +COPY --from=builder /build/image-builder /usr/bin/bootc-image-builder ENTRYPOINT ["/usr/bin/bootc-image-builder"] VOLUME /output From ada569460c7799e5d0cf3126f2511eee9b0fe16e Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 12 Dec 2025 15:16:24 +0100 Subject: [PATCH 278/279] workflow: adjust bibtest.yml to ibcli env This means dropping shellcheck, golang-ci and unit tests which are already run by other workflows (or not needed like shellcheck). --- .github/workflows/bibtests.yaml | 56 ++------------------------------- 1 file changed, 2 insertions(+), 54 deletions(-) diff --git a/.github/workflows/bibtests.yaml b/.github/workflows/bibtests.yaml index bdd77b13..ef5085e7 100644 --- a/.github/workflows/bibtests.yaml +++ b/.github/workflows/bibtests.yaml @@ -19,58 +19,6 @@ concurrency: cancel-in-progress: true jobs: - lint: - name: "⌨ Lint & unittests" - runs-on: ubuntu-latest - steps: - - name: Set up Go ${{ env.GO_VERSION }} - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - id: go - - - name: Check out code into the Go module directory - uses: actions/checkout@v6 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Apt update - run: sudo apt update - - # This is needed for the container upload dependencies - - name: Install libgpgme devel package - run: sudo apt install -y libgpgme-dev libbtrfs-dev libdevmapper-dev - - - name: Extract golangci-lint version from Makefile - id: golangci_lint_version - run: echo "GOLANGCI_LINT_VERSION=$(awk -F '=' '/^GOLANGCI_LINT_VERSION *=/{print $2}' Makefile)" >> "$GITHUB_OUTPUT" - - - name: Run golangci-lint - uses: golangci/golangci-lint-action@v9 - with: - version: ${{ steps.golangci_lint_version.outputs.GOLANGCI_LINT_VERSION }} - args: --timeout 5m0s - working-directory: bib - - - name: Run unit tests - run: (cd bib && go test -race ./...) - - shellcheck: - name: "🐚 Shellcheck" - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v6 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Run ShellCheck - uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 - with: - ignore: vendor # We don't want to fix the code in vendored dependencies - env: - # don't check /etc/os-release sourcing, allow useless cats to live inside our codebase, and - # allow seemingly unreachable commands - SHELLCHECK_OPTS: -e SC1091 -e SC2002 -e SC2317 - collect_tests: runs-on: ubuntu-latest outputs: @@ -131,7 +79,7 @@ jobs: - name: Install python test deps run: | # make sure test deps are available for root - sudo -E pip install --user -r test/requirements.txt + sudo -E pip install --user -r test/bib/requirements.txt - name: Workarounds for GH runner diskspace run: | # use custom basetemp here because /var/tmp is on a smaller disk @@ -156,7 +104,7 @@ jobs: # podman needs (parts of) the environment but will break when # XDG_RUNTIME_DIR is set. # TODO: figure out what exactly podman needs - sudo -E XDG_RUNTIME_DIR= PYTHONPATH=. pytest-3 --basetemp=/mnt/var/tmp/bib-tests ${{ matrix.test_file }} + sudo -E XDG_RUNTIME_DIR= PYTHONPATH=. pytest-3 -v --basetemp=/mnt/var/tmp/bib-tests ${{ matrix.test_file }} - name: Diskspace (after) if: ${{ always() }} run: | From d28c77556782f24ab19c524d5da2ea89aa3bb693 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Fri, 12 Dec 2025 16:35:27 +0100 Subject: [PATCH 279/279] workflow: use jlumbroso/free-disk-space action --- .github/workflows/bibtests.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/bibtests.yaml b/.github/workflows/bibtests.yaml index ef5085e7..79212a61 100644 --- a/.github/workflows/bibtests.yaml +++ b/.github/workflows/bibtests.yaml @@ -58,6 +58,12 @@ jobs: run: | df -h sudo du -sh * /var/tmp /tmp /var/lib/containers | sort -sh + - name: Free Disk Space + uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be + with: + tool-cache: true + # The following line runs apt remove which is slow + large-packages: false - name: Workaround podman issues in GH actions run: | # see https://github.com/osbuild/bootc-image-builder/issues/446