From ab1739ed4daf12b0abf015625ee7caf2e7986dfd Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 21 Mar 2025 11:53:17 +0100 Subject: [PATCH 001/480] Change yq with jq for image filtering The yq tool might not be installed on every system, so more safe for use would be jq. --- roles/env_op_images/tasks/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/env_op_images/tasks/main.yml b/roles/env_op_images/tasks/main.yml index 0cab9af374..7822587d95 100644 --- a/roles/env_op_images/tasks/main.yml +++ b/roles/env_op_images/tasks/main.yml @@ -60,22 +60,22 @@ oc get ClusterServiceVersion -l operators.coreos.com/openstack-operator.openstack-operators --all-namespaces - -o yaml | - yq e ' + -o json | + jq -r ' [.items[]? | .spec.install.spec.deployments[]? | .spec.template.spec.containers[]? | .env[]? | select(.name? | test("^RELATED_IMAGE")) | - select(.name == "*manager*" or .name == "*MANAGER*") | - {(.name): .value}]' + select(.name | contains("MANAGER")) | + {(.name): .value} ]' register: _sa_images_content args: executable: /bin/bash - name: Extract env variable name and images ansible.builtin.set_fact: - cifmw_openstack_service_images_content: "{{ _sa_images_content.stdout | from_yaml }}" + cifmw_openstack_service_images_content: "{{ _sa_images_content.stdout | from_json }}" - name: Get all the pods in openstack-operator namespace vars: From 3246ff43b020b9960e27e7ce90617a5187a0ff07 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 20 Mar 2025 15:15:19 +0100 Subject: [PATCH 002/480] Add IBM nodesets that will enforce spawning CI on IBM hosts Some CI jobs would be executed directly on the IBM private cloud until Zuul CI will not make round robin between all providers. --- zuul.d/nodeset.yaml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/zuul.d/nodeset.yaml b/zuul.d/nodeset.yaml index 00180463e5..22a6ae37c8 100644 --- a/zuul.d/nodeset.yaml +++ b/zuul.d/nodeset.yaml @@ -598,3 +598,28 @@ nodes: - name: controller label: centos-9-stream-crc-2-48-0-xl + +### Molecule jobs - force use IBM hosts ### +- nodeset: + name: centos-9-crc-2-48-0-xl-ibm + nodes: + - name: controller + label: centos-9-stream-crc-2-48-0-xl-ibm + +- nodeset: + name: centos-9-crc-2-48-0-xxl-ibm + nodes: + - name: controller + label: centos-9-stream-crc-2-48-0-xxl-ibm + +- nodeset: + name: centos-9-crc-2-48-0-3xl-ibm + nodes: + - name: controller + label: centos-9-stream-crc-2-48-0-3xl-ibm + +- nodeset: + name: centos-9-crc-2-39-0-6xlarge-ibm + nodes: + - name: controller + label: centos-9-stream-crc-2-39-0-6xlarge-ibm From 1b7569863fee0d4e078adc17e91a37cb12d03c02 Mon Sep 17 00:00:00 2001 From: Jiri Macku Date: Mon, 24 Feb 2025 16:22:44 +0100 Subject: [PATCH 003/480] Add retry to vm start --- roles/libvirt_manager/tasks/start_one_vm.yml | 10 ++++++++++ roles/libvirt_manager/tasks/start_vms.yml | 6 ++---- 2 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 roles/libvirt_manager/tasks/start_one_vm.yml diff --git a/roles/libvirt_manager/tasks/start_one_vm.yml b/roles/libvirt_manager/tasks/start_one_vm.yml new file mode 100644 index 0000000000..e03187fbb6 --- /dev/null +++ b/roles/libvirt_manager/tasks/start_one_vm.yml @@ -0,0 +1,10 @@ +--- +- name: Start vm + community.libvirt.virt: + name: "cifmw-{{ vm }}" + state: running + uri: "qemu:///system" + register: _vm_start_result + retries: 5 + delay: 30 + until: _vm_start_result is not failed diff --git a/roles/libvirt_manager/tasks/start_vms.yml b/roles/libvirt_manager/tasks/start_vms.yml index da1fb7cc85..5ab369f8f9 100644 --- a/roles/libvirt_manager/tasks/start_vms.yml +++ b/roles/libvirt_manager/tasks/start_vms.yml @@ -20,10 +20,8 @@ {{ _cifmw_libvirt_manager_layout.vms[vm_type] }} - community.libvirt.virt: - state: running - name: "cifmw-{{ vm }}" - uri: "qemu:///system" + ansible.builtin.include_tasks: + file: start_one_vm.yml loop: "{{ cifmw_libvirt_manager_all_vms | dict2items }}" loop_control: loop_var: _vm From 9a1f06019cbfa5f7000ddcecb40895b7bf1668ef Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Thu, 20 Mar 2025 11:19:51 +0100 Subject: [PATCH 004/480] Remove owners file as we use codeowners now --- OWNERS | 30 ------------------------------ zuul.d/adoption.yaml | 2 -- zuul.d/base.yaml | 2 -- zuul.d/end-to-end.yaml | 1 - 4 files changed, 35 deletions(-) delete mode 100644 OWNERS diff --git a/OWNERS b/OWNERS deleted file mode 100644 index 2de4f58d3d..0000000000 --- a/OWNERS +++ /dev/null @@ -1,30 +0,0 @@ -approvers: - - abays - - bshewale - - cescgina - - evallesp - - frenzyfriday - - fultonj - - lewisdenny - - pablintino - -reviewers: - - adrianfusco - - afazekas - - arxcruz - - bshewale - - cescgina - - dasm - - dpinhas - - dsariel - - eurijon - - frenzyfriday - - hjensas - - lewisdenny - - marios - - katarimanojk - - pojadhav - - queria - - rachael-george - - rlandy - - viroel diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index c75bc1f9c8..c435b374b9 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -152,8 +152,6 @@ - ^LICENSE$ - ^.github/.*$ - ^LICENSE$ - - ^OWNERS$ - - ^OWNERS_ALIASES$ - ^PROJECT$ - ^README.md$ - ^kuttl-test.yaml$ diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 1bf8a7f998..bb8caa3249 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -31,8 +31,6 @@ - .*/*.md - ^.github/.*$ - ^LICENSE$ - - ^OWNERS$ - - ^OWNERS_ALIASES$ - ^PROJECT$ - ^README.md$ - ^renovate.json$ diff --git a/zuul.d/end-to-end.yaml b/zuul.d/end-to-end.yaml index 176ffe3bc1..ab74f09a7e 100644 --- a/zuul.d/end-to-end.yaml +++ b/zuul.d/end-to-end.yaml @@ -49,7 +49,6 @@ - ^ci/templates - ^docs - ^.*/*.md - - ^OWNERS - ^.github vars: cifmw_extras: From 5264230f3b00baf5547835aa46ca025cb75dc442 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 20 Mar 2025 14:47:31 +0100 Subject: [PATCH 005/480] Use only IBM hosts for some molecule CI jobs The IBM hosts have less usage so some part of jobs might less fail. Temporary use only IBM hosts for some molecule CI jobs until Zuul will round robin jobs between all providers. Depends-On: https://github.com/openstack-k8s-operators/ci-framework/pull/2812 --- ci/config/molecule.yaml | 26 +++++++++++++------------- zuul.d/molecule.yaml | 26 +++++++++++++------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/ci/config/molecule.yaml b/ci/config/molecule.yaml index 80fbf3aea1..67821bec97 100644 --- a/ci/config/molecule.yaml +++ b/ci/config/molecule.yaml @@ -8,16 +8,16 @@ timeout: 3600 - job: name: cifmw-molecule-openshift_login - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm - job: name: cifmw-molecule-openshift_provisioner_node - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm - job: name: cifmw-molecule-openshift_setup - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm - job: name: cifmw-molecule-rhol_crc - nodeset: centos-9-crc-2-48-0-xxl + nodeset: centos-9-crc-2-48-0-xxl-ibm timeout: 5400 - job: name: cifmw-molecule-operator_deploy @@ -45,13 +45,13 @@ - job: name: cifmw-molecule-install_openstack_ca parent: cifmw-molecule-base-crc - nodeset: centos-9-crc-2-48-0-3xl + nodeset: centos-9-crc-2-48-0-3xl-ibm timeout: 5400 extra-vars: crc_parameters: "--memory 29000 --disk-size 100 --cpus 8" - job: name: cifmw-molecule-reproducer - nodeset: centos-9-crc-2-48-0-xxl + nodeset: centos-9-crc-2-48-0-xxl-ibm timeout: 5400 files: - ^roles/dnsmasq/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* @@ -62,10 +62,10 @@ - ^roles/rhol_crc/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - job: name: cifmw-molecule-cert_manager - nodeset: centos-9-crc-2-48-0-xxl + nodeset: centos-9-crc-2-48-0-xxl-ibm - job: name: cifmw-molecule-env_op_images - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm - job: name: cifmw_molecule-pkg_build files: @@ -82,19 +82,19 @@ - ^roles/repo_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - job: name: cifmw-molecule-manage_secrets - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm - job: name: cifmw-molecule-ci_local_storage - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm - job: name: cifmw-molecule-networking_mapper nodeset: 4x-centos-9-medium - job: name: cifmw-molecule-openshift_obs - nodeset: centos-9-crc-2-48-0-xxl + nodeset: centos-9-crc-2-48-0-xxl-ibm - job: name: cifmw-molecule-sushy_emulator - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm - job: name: cifmw-molecule-shiftstack - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index a30140fcfc..0b82ef1470 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -52,7 +52,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cert_manager - nodeset: centos-9-crc-2-48-0-xxl + nodeset: centos-9-crc-2-48-0-xxl-ibm parent: cifmw-molecule-base vars: TEST_RUN: cert_manager @@ -77,7 +77,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_local_storage - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm parent: cifmw-molecule-base vars: TEST_RUN: ci_local_storage @@ -364,7 +364,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-env_op_images - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm parent: cifmw-molecule-base vars: TEST_RUN: env_op_images @@ -422,7 +422,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-install_openstack_ca - nodeset: centos-9-crc-2-48-0-3xl + nodeset: centos-9-crc-2-48-0-3xl-ibm parent: cifmw-molecule-base-crc timeout: 5400 vars: @@ -474,7 +474,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-manage_secrets - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm parent: cifmw-molecule-base vars: TEST_RUN: manage_secrets @@ -520,7 +520,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_login - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm parent: cifmw-molecule-base vars: TEST_RUN: openshift_login @@ -532,7 +532,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_obs - nodeset: centos-9-crc-2-48-0-xxl + nodeset: centos-9-crc-2-48-0-xxl-ibm parent: cifmw-molecule-base vars: TEST_RUN: openshift_obs @@ -544,7 +544,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_provisioner_node - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm parent: cifmw-molecule-base vars: TEST_RUN: openshift_provisioner_node @@ -556,7 +556,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_setup - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm parent: cifmw-molecule-base vars: TEST_RUN: openshift_setup @@ -674,7 +674,7 @@ - ^roles/sushy_emulator/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/rhol_crc/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* name: cifmw-molecule-reproducer - nodeset: centos-9-crc-2-48-0-xxl + nodeset: centos-9-crc-2-48-0-xxl-ibm parent: cifmw-molecule-base timeout: 5400 vars: @@ -687,7 +687,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-rhol_crc - nodeset: centos-9-crc-2-48-0-xxl + nodeset: centos-9-crc-2-48-0-xxl-ibm parent: cifmw-molecule-base timeout: 5400 vars: @@ -722,7 +722,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-shiftstack - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm parent: cifmw-molecule-base vars: TEST_RUN: shiftstack @@ -745,7 +745,7 @@ - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-sushy_emulator - nodeset: centos-9-crc-2-48-0-xl + nodeset: centos-9-crc-2-48-0-xl-ibm parent: cifmw-molecule-base vars: TEST_RUN: sushy_emulator From ac52dce6781b753b3e6a50ecdb24b8bd848a30ab Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Tue, 18 Mar 2025 09:08:29 +0100 Subject: [PATCH 006/480] [Test of] Adds GH action to sync branches --- .../workflows/sync_branches_periodically.yml | 12 ++++++ .../sync_branches_reusable_workflow.yml | 39 +++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 .github/workflows/sync_branches_periodically.yml create mode 100644 .github/workflows/sync_branches_reusable_workflow.yml diff --git a/.github/workflows/sync_branches_periodically.yml b/.github/workflows/sync_branches_periodically.yml new file mode 100644 index 0000000000..3afb031a75 --- /dev/null +++ b/.github/workflows/sync_branches_periodically.yml @@ -0,0 +1,12 @@ +--- +name: Periodically sync branches +on: + schedule: + - cron: '0 21 * * 1' + +jobs: + trigger_sync: + uses: openstack-k8s-operators/ci-framework/.github/workflows/sync_branches_reusable_workflow.yml@main + with: + main-branch: main + follower-branch: ananya-do-not-use-tmp diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml new file mode 100644 index 0000000000..4171dd5c6b --- /dev/null +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -0,0 +1,39 @@ +--- +name: Sync a follower branch with Main +on: + workflow_call: + inputs: + main-branch: + required: true + type: string + follower-branch: + required: true + type: string + +jobs: + sync: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - name: Checkout main branch + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: + ${{ inputs.main-branch }} + + - name: Checkout, rebase and push to follower branch + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: + ${{ inputs.follower-branch }} + - run: | + # Details about the GH action bot comes from + # https://api.github.com/users/github-actions%5Bbot%5D + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git rebase origin/${{ inputs.main-branch }} + git push origin ${{ inputs.follower-branch }} From de111ed6e1eb42432937be40fe4976fb9c41de2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Wed, 12 Mar 2025 13:30:38 +0100 Subject: [PATCH 007/480] Partial revert commit 4188627 PR#2839 introduced an issue where pre and post hooks are executed multiple times. Let's revert the loop on pre/post hook execution to restore the previous behaviour. I belive a `stage.name` can still be used for `cifmw_architecture_user_kustomize` after this revert. --- roles/kustomize_deploy/tasks/execute_step.yml | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/roles/kustomize_deploy/tasks/execute_step.yml b/roles/kustomize_deploy/tasks/execute_step.yml index 6b4669c07a..251dbea744 100644 --- a/roles/kustomize_deploy/tasks/execute_step.yml +++ b/roles/kustomize_deploy/tasks/execute_step.yml @@ -122,13 +122,9 @@ - not cifmw_kustomize_deploy_generate_crs_only | bool vars: hooks: "{{ stage.pre_stage_run | default([]) }}" - step: "{{ item }}" + step: "pre_{{ _stage_name_id }}_run" ansible.builtin.include_role: name: run_hook - loop: - - "pre_{{ _stage_name_id }}_run" - - "pre_{{ _stage_name }}_run" - - "pre_{{ _stage_name | replace( '-', '_') }}_run" - name: "Generate values.yaml for {{ stage.path }}" when: @@ -319,10 +315,6 @@ - not cifmw_kustomize_deploy_generate_crs_only | bool vars: hooks: "{{ stage.post_stage_run | default([]) }}" - step: "{{ item }}" + step: "post_{{ _stage_name_id }}_run" ansible.builtin.include_role: name: run_hook - loop: - - "post_{{ _stage_name_id }}_run" - - "post_{{ _stage_name }}_run" - - "post_{{ _stage_name | replace('-', '_') }}_run" From 29673422c7b83132ec74dd86aa262f426995b369 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Mon, 24 Mar 2025 12:29:18 +0100 Subject: [PATCH 008/480] Adds GH action to sync branches --- .../workflows/sync_branches_periodically.yml | 11 +++--- .../sync_branches_reusable_workflow.yml | 39 ------------------- 2 files changed, 6 insertions(+), 44 deletions(-) delete mode 100644 .github/workflows/sync_branches_reusable_workflow.yml diff --git a/.github/workflows/sync_branches_periodically.yml b/.github/workflows/sync_branches_periodically.yml index 3afb031a75..dbe46e6864 100644 --- a/.github/workflows/sync_branches_periodically.yml +++ b/.github/workflows/sync_branches_periodically.yml @@ -1,12 +1,13 @@ --- -name: Periodically sync branches +name: Olive Branch sync + on: schedule: - cron: '0 21 * * 1' jobs: - trigger_sync: - uses: openstack-k8s-operators/ci-framework/.github/workflows/sync_branches_reusable_workflow.yml@main + trigger-sync: + uses: openstack-k8s-operators/openstack-k8s-operators-ci/.github/workflows/release-branch-sync.yaml@main with: - main-branch: main - follower-branch: ananya-do-not-use-tmp + source_branch: main + target_branch: olive diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml deleted file mode 100644 index 4171dd5c6b..0000000000 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -name: Sync a follower branch with Main -on: - workflow_call: - inputs: - main-branch: - required: true - type: string - follower-branch: - required: true - type: string - -jobs: - sync: - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - steps: - - name: Checkout main branch - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: - ${{ inputs.main-branch }} - - - name: Checkout, rebase and push to follower branch - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: - ${{ inputs.follower-branch }} - - run: | - # Details about the GH action bot comes from - # https://api.github.com/users/github-actions%5Bbot%5D - git config user.name "github-actions[bot]" - git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - git rebase origin/${{ inputs.main-branch }} - git push origin ${{ inputs.follower-branch }} From 9392e65c6141982a7fdf158c20d8a646a3defde8 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Fri, 21 Mar 2025 12:07:24 +0100 Subject: [PATCH 009/480] Revert "Use zuul clonned repos for upstream in build_openstack_packages role" This is breaking builds with downstream driver in two ways: - Not using --local requires the repos to have proper remote definition. - pre-run scripts as patch_rebaser [1] also require proper remotes. For the first, we may handle it via change in dlrn [2], but for the second one I'm not sure about the best way. [1] https://github.com/release-depot/patch_rebaser/ [2] https://softwarefactory-project.io/r/c/DLRN/+/33366 This reverts commit f79ca6f89ae08948f74e34fdd32a691a7cef9f06. --- .../tasks/parse_and_build_pkgs.yml | 1 - .../tasks/run_dlrn.yml | 26 ++++++++++++++----- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml b/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml index 34380f1cde..92352610b7 100644 --- a/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml +++ b/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml @@ -19,7 +19,6 @@ 'project': item.project.name, 'branch': item.branch, 'change': item.change, - 'src_dir': item.project.src_dir, 'refspec': '/'.join(['refs', 'changes', item.change[-2:], item.change, diff --git a/roles/build_openstack_packages/tasks/run_dlrn.yml b/roles/build_openstack_packages/tasks/run_dlrn.yml index 31da6ed4a9..3ef88d9feb 100644 --- a/roles/build_openstack_packages/tasks/run_dlrn.yml +++ b/roles/build_openstack_packages/tasks/run_dlrn.yml @@ -114,15 +114,29 @@ dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/data/{{ project_name_mapped.stdout }}' version: '{{ _change.branch }}' - - name: "Symlink {{ project_name_mapped.stdout }} from Zuul clonned repos" # noqa: name[template] + - name: "Clone {{ project_name_mapped.stdout }} from Github" # noqa: name[template] when: - cifmw_bop_openstack_project_path | length == 0 - not repo_status.stat.exists - - "'src_dir' in _change" - ansible.builtin.file: - src: '{{ ansible_user_dir }}/{{ _change.src_dir }}' - path: '{{ cifmw_bop_build_repo_dir }}/DLRN/data/{{ project_name_mapped.stdout }}' - state: link + - "'host' in _change" + - "'github.com' in _change.host" + ansible.builtin.git: + repo: '{{ _change.host }}/{{ _change.project }}' + dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/data/{{ project_name_mapped.stdout }}' + refspec: "+refs/pull/*:refs/remotes/origin/pr/*" + version: 'origin/pr/{{ _change.change }}/head' + + - name: "Clone Openstack {{ project_name_mapped.stdout }}" # noqa: name[template] + when: + - cifmw_bop_openstack_project_path | length == 0 + - not repo_status.stat.exists + - "'host' in _change" + - "'opendev' in _change.host" + ansible.builtin.git: + repo: '{{ _change.host }}/{{ _change.project }}' + dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/data/{{ project_name_mapped.stdout }}' + refspec: "{{ _change.refspec }}" + version: 'FETCH_HEAD' - name: "Update packages.yml to use zuul repo for {{ project_name_mapped.stdout }}" # noqa: name[template], command-instead-of-module vars: From 93b065db81c42d12cf36bd25885d9afc549330a2 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Fri, 21 Mar 2025 13:57:52 +0100 Subject: [PATCH 010/480] List the zuul changes in reverse order when building packages After reverting [1] we still have problems when building multiple in-flight patches in any form: 1. Having multiple depends-on on the same package does not work. 2. Piling multiple reviews and adding the depends-on to the top one is also not working. This patch is reversing the order of the related zuul changes to fix the case (2) as it will find and process the top change for the repo. --- roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml b/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml index 92352610b7..4f138c76ac 100644 --- a/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml +++ b/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml @@ -1,6 +1,6 @@ --- - name: Parse Zuul changes - with_items: "{{ zuul['items'] }}" + with_items: "{{ zuul['items'] | reverse | list }}" when: - zuul is defined - "'change_url' in item" From 9fae0d135b18fc2b9ecb8c391ae9928cc9c06a95 Mon Sep 17 00:00:00 2001 From: Lewis Denny Date: Wed, 19 Mar 2025 13:18:31 +1000 Subject: [PATCH 011/480] Move baremetal and Tufu jobs to 4.18 nested --- zuul.d/base.yaml | 2 +- zuul.d/edpm.yaml | 2 +- zuul.d/end-to-end.yaml | 2 +- zuul.d/tofu.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index bb8caa3249..2b0d1e6081 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -251,7 +251,7 @@ # - job: name: cifmw-base-crc - nodeset: centos-9-crc-2-39-0-3xl + nodeset: centos-9-crc-2-48-0-3xl timeout: 10800 abstract: true parent: base-simple-crc diff --git a/zuul.d/edpm.yaml b/zuul.d/edpm.yaml index 6d5ec73a83..2e0a2871db 100644 --- a/zuul.d/edpm.yaml +++ b/zuul.d/edpm.yaml @@ -11,7 +11,7 @@ # Virtual Baremetal job with CRC and single compute node. - job: name: cifmw-crc-podified-edpm-baremetal - nodeset: centos-9-crc-2-39-0-6xlarge + nodeset: centos-9-crc-2-48-0-6xlarge parent: cifmw-base-crc-openstack run: ci/playbooks/edpm_baremetal_deployment/run.yml vars: diff --git a/zuul.d/end-to-end.yaml b/zuul.d/end-to-end.yaml index ab74f09a7e..a92c576f85 100644 --- a/zuul.d/end-to-end.yaml +++ b/zuul.d/end-to-end.yaml @@ -2,7 +2,7 @@ # cifmw base job - job: name: cifmw-end-to-end-base - nodeset: centos-9-crc-2-39-0-3xl + nodeset: centos-9-crc-2-48-0-3xl parent: base-simple-crc vars: crc_parameters: "--memory 24000 --disk-size 120 --cpus 8" diff --git a/zuul.d/tofu.yaml b/zuul.d/tofu.yaml index 0ce68588ba..a96151fab3 100644 --- a/zuul.d/tofu.yaml +++ b/zuul.d/tofu.yaml @@ -6,7 +6,7 @@ - ^ci/playbooks/molecule.* - ^ci_framework/playbooks/run_tofu.yml name: cifmw-molecule-tofu - nodeset: centos-9-crc-2-39-0-xl + nodeset: centos-9-crc-2-48-0-xl parent: cifmw-molecule-base vars: TEST_RUN: tofu From 33e77699206e457064b1a01863f5e702b95efc2e Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Fri, 14 Mar 2025 09:25:28 -0400 Subject: [PATCH 012/480] Set extra mounts for test-operator This PR exposes the extraMounts parameter for all test-operator related CRs (Tempest, Tobiko, AnsibleTest, HorizonTest). This parameter can be used to specify additional volume mounts for the test pods spawned by test-operator. --- roles/test_operator/README.md | 4 ++++ roles/test_operator/defaults/main.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 1e87dd1639..ab1222f96b 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -65,6 +65,7 @@ cifmw_test_operator_stages: * `cifmw_test_operator_tempest_network_attachments`: (List) List of network attachment definitions to attach to the tempest pods spawned by test-operator. Default value: `[]`. * `cifmw_test_operator_tempest_extra_rpms`: (List) . A list of URLs that point to RPMs that should be installed before the execution of tempest. Note that this parameter has no effect when `cifmw_test_operator_tempest_external_plugin` is used. Default value: `[]` * `cifmw_test_operator_tempest_extra_configmaps_mounts`: (List) A list of configmaps that should be mounted into the tempest test pods. Default value: `[]` +* `cifmw_test_operator_tempest_extra_mounts`: (List) A list of additional volume mounts for the tempest test pods. Each item specifies a volume name, mount path, and other mount properties. Default value: `[]` * `cifmw_test_operator_tempest_debug`: (Bool) Run Tempest in debug mode, it keeps the operator pod sleeping infinity (it must only set to `true`only for debugging purposes). Default value: `false` * `cifmw_test_operator_tempest_resources`: (Dict) A dictionary that specifies resources (cpu, memory) for the test pods. When untouched it clears the default values set on the test-operator side. This means that the tempest test pods run with unspecified resource limits. Default value: `{requests: {}, limits: {}}` * `cifmw_tempest_tempestconf_config`: Deprecated, please use `cifmw_test_operator_tempest_tempestconf_config` instead @@ -122,6 +123,7 @@ Default value: {} * `cifmw_test_operator_tobiko_network_attachments`: (List) List of network attachment definitions to attach to the tobiko pods spawned by test-operator. Default value: `[]`. * `cifmw_test_operator_tobiko_workflow`: (List) Definition of a Tobiko workflow that consists of multiple steps. Each step can contain all values from Spec section of [Tobiko CR](https://openstack-k8s-operators.github.io/test-operator/crds.html#tobiko-custom-resource). * `cifmw_test_operator_tobiko_resources`: (Dict) A dictionary that specifies resources (cpu, memory) for the test pods. When kept untouched it defaults to the resource limits specified on the test-operator side. Default value: `{}` +* `cifmw_test_operator_tobiko_extra_mounts`: (List) A list of additional volume mounts for the tobiko test pods. Each item specifies a volume name, mount path, and other mount properties. Default value: `[]` * `cifmw_test_operator_tobiko_config`: (Dict) Definition of Tobiko CRD instance that is passed to the test-operator (see [the test-operator documentation](https://openstack-k8s-operators.github.io/test-operator/crds.html#tobiko-custom-resource)). Default value: ``` apiVersion: test.openstack.org/v1beta1 @@ -168,6 +170,7 @@ Default value: {} * `cifmw_test_operator_ansibletest_debug`: (Bool) Run ansible playbook with -vvvv. Default value: `false` * `cifmw_test_operator_ansibletest_workflow`: (List) A parameter that contains a workflow definition. Default value: `[]` * `cifmw_test_operator_ansibletest_extra_configmaps_mounts`: (List) Extra configmaps for mounting in the pod. Default value: `[]` +* `cifmw_test_operator_ansibletest_extra_mounts`: (List) A list of additional volume mounts for the ansibletest test pods. Each item specifies a volume name, mount path, and other mount properties. Default value: `[]` * `cifmw_test_operator_ansibletest_resources`: (Dict) A dictionary that specifies resources (cpu, memory) for the test pods. When kept untouched it defaults to the resource limits specified on the test-operator side. Default value: `{}` * `cifmw_test_operator_ansibletest_config`: Definition of AnsibleTest CRD instance that is passed to the test-operator (see [the test-operator documentation](https://openstack-k8s-operators.github.io/test-operator/crds.html)). Default value: ``` @@ -215,6 +218,7 @@ Default value: {} * `cifmw_test_operator_horizontest_logs_directory_name`: (String) The name of the directory to store test logs. Default value: `horizon` * `cifmw_test_operator_horizontest_horizon_test_dir`: (String) The directory path for Horizon tests. Default value: `/var/lib/horizontest` * `cifmw_test_operator_horizontest_resources`: (Dict) A dictionary that specifies resources (cpu, memory) for the test pods. When kept untouched it defaults to the resource limits specified on the test-operator side. Default value: `{}` +* `cifmw_test_operator_horizontest_extra_mounts`: (List) A list of additional volume mounts for the horizontest test pods. Each item specifies a volume name, mount path, and other mount properties. Default value: `[]` * `cifmw_test_operator_horizontest_debug`: (Bool) Run HorizonTest in debug mode, it keeps the operator pod sleeping infinitely (it must only set to `true` only for debugging purposes). Default value: `false` * `cifmw_test_operator_horizontest_extra_flag`: (String) The extra flag to modify pytest command to include/exclude tests. Default value: `not pagination` * `cifmw_test_operator_horizontest_project_name_xpath`: (String) The xpath to select project name based on dashboard theme. Default value: `//span[@class='rcueicon rcueicon-folder-open']/ancestor::li` diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index c453324516..d1953c9c02 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -131,6 +131,7 @@ cifmw_test_operator_tempest_config: tolerations: "{{ cifmw_test_operator_tolerations | default(omit) }}" nodeSelector: "{{ cifmw_test_operator_node_selector | default(omit) }}" extraConfigmapsMounts: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_configmaps_mounts | default(omit) }}" + extraMounts: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_mounts | default(omit) }}" resources: "{{ stage_vars_dict.cifmw_test_operator_tempest_resources }}" tempestRun: includeList: | @@ -188,6 +189,7 @@ cifmw_test_operator_tobiko_config: nodeSelector: "{{ cifmw_test_operator_node_selector | default(omit) }}" debug: "{{ stage_vars_dict.cifmw_test_operator_tobiko_debug }}" networkAttachments: "{{ stage_vars_dict.cifmw_test_operator_tobiko_network_attachments }}" + extraMounts: "{{ stage_vars_dict.cifmw_test_operator_tobiko_extra_mounts | default(omit) }}" resources: "{{ stage_vars_dict.cifmw_test_operator_tobiko_resources }}" # preventCreate: preventCreate is generated by the test_operator role based on the value of stage_vars_dict.cifmw_test_operator_tobiko_prevent_create # numProcesses: numProcesses is generated by the test_operator role based on the value of stage_vars_dict.cifmw_test_operator_tobiko_num_processes @@ -227,6 +229,7 @@ cifmw_test_operator_ansibletest_config: SELinuxLevel: "{{ cifmw_test_operator_selinux_level }}" containerImage: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_image }}:{{ stage_vars_dict.cifmw_test_operator_ansibletest_image_tag }}" extraConfigmapsMounts: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_extra_configmaps_mounts }}" + extraMounts: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_extra_mounts | default(omit) }}" storageClass: "{{ cifmw_test_operator_storage_class }}" privileged: "{{ cifmw_test_operator_privileged }}" computeSSHKeySecretName: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_compute_ssh_key_secret_name }}" @@ -294,4 +297,5 @@ cifmw_test_operator_horizontest_config: extraFlag: "{{ stage_vars_dict.cifmw_test_operator_horizontest_extra_flag }}" projectNameXpath: "{{ stage_vars_dict.cifmw_test_operator_horizontest_project_name_xpath }}" horizonTestDir: "{{ stage_vars_dict.cifmw_test_operator_horizontest_horizon_test_dir }}" + extraMounts: "{{ stage_vars_dict.cifmw_test_operator_horizontest_extra_mounts | default(omit) }}" resources: "{{ stage_vars_dict.cifmw_test_operator_horizontest_resources }}" From 3c44d02f7a179f2e9561fc26f5c520fba61787c4 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Tue, 25 Mar 2025 15:04:59 +0530 Subject: [PATCH 013/480] Use zuul clonned repos for upstream in build_openstack_packages role In build_openstack_packages role for building rpms from gerrit and github, we are clonning the repo and checking out specific change. With these tasks, we are not able to build dlrn rpms if we have multiple prs/crs from same project. But we want to build rpms from all changes. In order to fix, Zuul knows how to checkout proper repos with changes under test and from Depends-on. DLRN always looks for clonned repos in DLRN data directory. In order to use zuul clonned sources with DLRN, We are creating symlink to dlrn data project directory and let's dlrn do the job for upstream repos only. Note: we reverted the similar pr[1] here as original change was lacking proper condition[2] leading to breaking downstream build openstack packages role. By adding proper conditional for upstream, it fixes the issue. Links: [1]. https://github.com/openstack-k8s-operators/ci-framework/pull/2818 [2]. https://github.com/openstack-k8s-operators/ci-framework/commit/f79ca6f89ae08948f74e34fdd32a691a7cef9f06#diff-fedeaff036de20345e170c4f65374926975f17139c058369f2e909565054e1adR118-L134 Signed-off-by: Chandan Kumar (raukadah) --- .../tasks/parse_and_build_pkgs.yml | 1 + .../tasks/run_dlrn.yml | 26 +++++-------------- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml b/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml index 4f138c76ac..6560a3826a 100644 --- a/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml +++ b/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml @@ -19,6 +19,7 @@ 'project': item.project.name, 'branch': item.branch, 'change': item.change, + 'src_dir': item.project.src_dir, 'refspec': '/'.join(['refs', 'changes', item.change[-2:], item.change, diff --git a/roles/build_openstack_packages/tasks/run_dlrn.yml b/roles/build_openstack_packages/tasks/run_dlrn.yml index 3ef88d9feb..6ba082dec2 100644 --- a/roles/build_openstack_packages/tasks/run_dlrn.yml +++ b/roles/build_openstack_packages/tasks/run_dlrn.yml @@ -114,29 +114,15 @@ dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/data/{{ project_name_mapped.stdout }}' version: '{{ _change.branch }}' - - name: "Clone {{ project_name_mapped.stdout }} from Github" # noqa: name[template] + - name: "Symlink {{ project_name_mapped.stdout }} from Zuul clonned repos for upstream" # noqa: name[template] when: - cifmw_bop_openstack_project_path | length == 0 - not repo_status.stat.exists - - "'host' in _change" - - "'github.com' in _change.host" - ansible.builtin.git: - repo: '{{ _change.host }}/{{ _change.project }}' - dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/data/{{ project_name_mapped.stdout }}' - refspec: "+refs/pull/*:refs/remotes/origin/pr/*" - version: 'origin/pr/{{ _change.change }}/head' - - - name: "Clone Openstack {{ project_name_mapped.stdout }}" # noqa: name[template] - when: - - cifmw_bop_openstack_project_path | length == 0 - - not repo_status.stat.exists - - "'host' in _change" - - "'opendev' in _change.host" - ansible.builtin.git: - repo: '{{ _change.host }}/{{ _change.project }}' - dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/data/{{ project_name_mapped.stdout }}' - refspec: "{{ _change.refspec }}" - version: 'FETCH_HEAD' + - cifmw_bop_osp_release is not defined + ansible.builtin.file: + src: '{{ ansible_user_dir }}/{{ _change.src_dir }}' + path: '{{ cifmw_bop_build_repo_dir }}/DLRN/data/{{ project_name_mapped.stdout }}' + state: link - name: "Update packages.yml to use zuul repo for {{ project_name_mapped.stdout }}" # noqa: name[template], command-instead-of-module vars: From c5309b2f49c340ec3e775fd8114f499b8a1404c4 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Tue, 25 Mar 2025 15:58:06 +0100 Subject: [PATCH 014/480] Adds gh actions to sync branches --- .../workflows/sync_branches_periodically.yml | 4 +-- .../sync_branches_reusable_workflow.yml | 33 +++++++++++++++++++ .../sync_branches_with_ext_trigger.yml | 18 ++++++++++ 3 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/sync_branches_reusable_workflow.yml create mode 100644 .github/workflows/sync_branches_with_ext_trigger.yml diff --git a/.github/workflows/sync_branches_periodically.yml b/.github/workflows/sync_branches_periodically.yml index dbe46e6864..5f06fff94e 100644 --- a/.github/workflows/sync_branches_periodically.yml +++ b/.github/workflows/sync_branches_periodically.yml @@ -3,11 +3,11 @@ name: Olive Branch sync on: schedule: - - cron: '0 21 * * 1' + - cron: '0 19 * * *' jobs: trigger-sync: uses: openstack-k8s-operators/openstack-k8s-operators-ci/.github/workflows/release-branch-sync.yaml@main with: source_branch: main - target_branch: olive + target_branch: ananya-do-not-use-tmp # Hardcoded till testing finishes diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml new file mode 100644 index 0000000000..483c04f852 --- /dev/null +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -0,0 +1,33 @@ +--- +name: Sync a follower branch with Main +on: + workflow_call: + inputs: + main-branch: + required: true + type: string + follower-branch: + required: true + type: string + +jobs: + sync: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - name: Checkout, rebase and push to target branch + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: + ${{ inputs.target-branch }} + - run: | + # Details about the GH action bot comes from + # https://api.github.com/users/openshift-merge-robot + git config user.name "openshift-merge-robot" + git config user.email "30189218+openshift-merge-robot@users.noreply.github.com" + git fetch + git rebase origin/${{ inputs.source-branch }} + git push origin ${{ inputs.target-branch }} diff --git a/.github/workflows/sync_branches_with_ext_trigger.yml b/.github/workflows/sync_branches_with_ext_trigger.yml new file mode 100644 index 0000000000..ce4e4bf942 --- /dev/null +++ b/.github/workflows/sync_branches_with_ext_trigger.yml @@ -0,0 +1,18 @@ +name: Sync branches with external trigger + +on: + workflow_dispatch: + inputs: + source-branch: + required: false + default: 'main' + target-branch: + required: false + default: 'ananya-do-not-use-tmp' + +jobs: + trigger-sync: + uses: openstack-k8s-operators/openstack-k8s-operators-ci/.github/workflows/release-branch-sync.yaml@main + with: + source_branch: ${{ inputs.source-branch }} + target_branch: ananya-do-not-use-tmp # Hardcoded till testing finishes From c50dea3577d8fcc871861a8957c7c04dd8506ef4 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Fri, 21 Mar 2025 11:36:59 +0530 Subject: [PATCH 015/480] Enable risky-file-permissions linter --- .ansible-lint | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ansible-lint b/.ansible-lint index eca5d61350..3234711638 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -41,10 +41,10 @@ enable_list: - no-log-password - no-same-owner - name[play] + - risky-file-permissions skip_list: - jinja[spacing] # We don't really want to get that one. Too picky - no-changed-when # once we get the oc module we can re-enable it - - risky-file-permissions # Seems to fail on 0644 on files ?! - schema[meta] # Apparently "CentOS 9" isn't known... ?! - schema[vars] # weird issue with some "vars" in playbooks - yaml[line-length] # We have long lines, yes. From 63d1635ed4433a532ec3954127a63b66a8f79f18 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Fri, 21 Mar 2025 11:38:15 +0530 Subject: [PATCH 016/480] Add mode for files and dirs The aim is to enable risky-file-permissions linter. --- deploy-osp-adoption.yml | 2 ++ docs/source/files/bootstrap-hypervisor.yml | 2 +- hooks/playbooks/adoption_ironic_post_oc.yml | 5 +++++ hooks/playbooks/barbican-enable-luna.yml | 1 + hooks/playbooks/control_plane_ceph_backends.yml | 1 + hooks/playbooks/control_plane_hci_pre_deploy.yml | 1 + hooks/playbooks/control_plane_horizon.yml | 1 + hooks/playbooks/control_plane_ironic.yml | 1 + hooks/playbooks/federation-controlplane-config.yml | 1 + hooks/playbooks/fetch_compute_facts.yml | 4 ++++ hooks/playbooks/ironic_enroll_nodes.yml | 1 + hooks/playbooks/kustomize_cr.yml | 3 +++ hooks/playbooks/kuttl_openstack_prep.yml | 1 + hooks/playbooks/link2file.yml | 1 + playbooks/dcn.yml | 1 + playbooks/nfs.yml | 3 +++ playbooks/unique-id.yml | 1 + playbooks/update.yml | 3 +++ roles/adoption_osp_deploy/tasks/deploy_overcloud.yml | 1 + roles/adoption_osp_deploy/tasks/prepare_undercloud.yml | 2 ++ roles/artifacts/tasks/ansible_logs.yml | 1 + roles/artifacts/tasks/main.yml | 1 + roles/build_openstack_packages/tasks/create_repo.yml | 2 ++ roles/build_openstack_packages/tasks/downstream.yml | 2 ++ roles/build_openstack_packages/tasks/install_dlrn.yml | 2 ++ roles/cert_manager/tasks/olm_manifest.yml | 1 + roles/ci_dcn_site/tasks/ceph.yml | 1 + roles/ci_dcn_site/tasks/scaledown_site.yml | 2 ++ roles/ci_local_storage/tasks/main.yml | 1 + roles/ci_lvms_storage/tasks/main.yml | 1 + roles/ci_multus/molecule/resources/clean.yml | 1 + roles/ci_multus/tasks/main.yml | 2 ++ roles/ci_network/tasks/main.yml | 1 + roles/ci_nmstate/tasks/nmstate_k8s_install.yml | 1 + .../tasks/nmstate_unmanaged_provision_node.yml | 1 + roles/cifmw_cephadm/tasks/dashboard/validation.yml | 2 ++ roles/cifmw_external_dns/tasks/requirements.yml | 1 + roles/compliance/tasks/create_scap_report.yml | 1 + roles/copy_container/molecule/default/converge.yml | 1 + roles/copy_container/tasks/main.yml | 1 + .../molecule/check_cluster_status/tasks/test.yml | 1 + roles/devscripts/tasks/139_configs.yml | 1 + roles/devscripts/tasks/main.yml | 1 + roles/dlrn_promote/tasks/get_hash_from_commit.yaml | 1 + roles/edpm_build_images/tasks/main.yml | 1 + roles/edpm_kustomize/tasks/kustomize.yml | 1 + roles/edpm_kustomize/tasks/main.yml | 1 + roles/env_op_images/tasks/main.yml | 1 + roles/federation/tasks/run_keycloak_setup.yml | 4 ++++ roles/federation/tasks/run_openstack_auth_test.yml | 2 ++ roles/federation/tasks/run_openstack_setup.yml | 2 ++ roles/hive/tasks/main.yml | 1 + roles/install_ca/tasks/main.yml | 1 + roles/install_yamls/tasks/main.yml | 2 ++ roles/kustomize_deploy/tasks/install_operators.yml | 1 + .../molecule/generate_network_data/tasks/test.yml | 10 +++++++--- roles/libvirt_manager/tasks/clean_layout.yml | 2 ++ roles/libvirt_manager/tasks/deploy_layout.yml | 3 +++ .../libvirt_manager/tasks/generate_networking_data.yml | 1 + roles/libvirt_manager/tasks/get_image.yml | 1 + roles/mirror_registry/tasks/main.yml | 1 + roles/nat64_appliance/molecule/default/converge.yml | 6 ++++++ roles/networking_mapper/tasks/_gather_facts.yml | 1 + roles/openshift_login/tasks/main.yml | 3 ++- roles/pkg_build/tasks/main.yml | 3 +++ roles/reproducer/tasks/generate_bm_info.yml | 1 + roles/update/tasks/reboot_hypervisor_using_cr.yml | 1 + 67 files changed, 111 insertions(+), 5 deletions(-) diff --git a/deploy-osp-adoption.yml b/deploy-osp-adoption.yml index 45b6aae89a..1dee12e853 100644 --- a/deploy-osp-adoption.yml +++ b/deploy-osp-adoption.yml @@ -85,6 +85,7 @@ ansible.builtin.file: path: "{{ cifmw_basedir }}/artifacts/parameters" state: "directory" + mode: "0755" - name: Save variables for use with hooks vars: @@ -96,6 +97,7 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/parameters/adoption_osp.yml" content: "{{ _content | to_nice_yaml }}" + mode: "0644" - name: Set inventory_file for localhost to use with hooks ansible.builtin.set_fact: inventory_file: "{{ hostvars[_target_host]['inventory_file'] }}" diff --git a/docs/source/files/bootstrap-hypervisor.yml b/docs/source/files/bootstrap-hypervisor.yml index 337c9eea2a..96cc0bb90b 100644 --- a/docs/source/files/bootstrap-hypervisor.yml +++ b/docs/source/files/bootstrap-hypervisor.yml @@ -56,7 +56,7 @@ dest: "/etc/sudoers.d/{{ _user }}" owner: root group: root - mode: 0640 + mode: "0640" - name: Install basic packages become: true diff --git a/hooks/playbooks/adoption_ironic_post_oc.yml b/hooks/playbooks/adoption_ironic_post_oc.yml index 198ee8fd51..a97d4164a9 100644 --- a/hooks/playbooks/adoption_ironic_post_oc.yml +++ b/hooks/playbooks/adoption_ironic_post_oc.yml @@ -55,6 +55,7 @@ ansible.builtin.file: state: directory path: "{{ ansible_user_dir }}/ironic-python-agent" + mode: "0755" loop: - osp-undercloud-0 - osp-controller-0 @@ -82,6 +83,7 @@ src: "{{ ansible_user_dir }}/ironic-python-agent/ironic-python-agent.kernel" dest: /var/lib/ironic/httpboot/agent.kernel remote_src: true + mode: "0644" loop: - osp-controller-0 - osp-controller-1 @@ -93,6 +95,7 @@ src: "{{ ansible_user_dir }}/ironic-python-agent/ironic-python-agent.initramfs" dest: /var/lib/ironic/httpboot/agent.ramdisk remote_src: true + mode: "0644" loop: - osp-controller-0 - osp-controller-1 @@ -166,11 +169,13 @@ ansible.builtin.file: state: directory path: "{{ ansible_user_dir }}/ci-framework-data/parameters" + mode: "0755" - name: Write ironic_nodes.yaml on osp-unercloud-o ansible.builtin.copy: content: "{{ _ironic_nodes_slurp.content | b64decode }}" dest: "{{ ansible_user_dir }}/ci-framework-data/parameters/ironic_nodes.yaml" + mode: "0644" - name: Run baremetal create command to enroll the nodes in the Ironic service environment: diff --git a/hooks/playbooks/barbican-enable-luna.yml b/hooks/playbooks/barbican-enable-luna.yml index c3a6a2b8f5..d319e25c52 100644 --- a/hooks/playbooks/barbican-enable-luna.yml +++ b/hooks/playbooks/barbican-enable-luna.yml @@ -46,6 +46,7 @@ login_secret: "{{ cifmw_hsm_login_secret | default('barbican-luna-login', true) }}" ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/93-barbican-luna.yaml" + mode: "0644" content: |- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization diff --git a/hooks/playbooks/control_plane_ceph_backends.yml b/hooks/playbooks/control_plane_ceph_backends.yml index 49324a05c2..9d04193788 100644 --- a/hooks/playbooks/control_plane_ceph_backends.yml +++ b/hooks/playbooks/control_plane_ceph_backends.yml @@ -25,3 +25,4 @@ ansible.builtin.template: dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/90-ceph-backends-kustomization.yaml" src: "config_ceph_backends.yaml.j2" + mode: "0644" diff --git a/hooks/playbooks/control_plane_hci_pre_deploy.yml b/hooks/playbooks/control_plane_hci_pre_deploy.yml index 97d04349e3..7bf373686e 100644 --- a/hooks/playbooks/control_plane_hci_pre_deploy.yml +++ b/hooks/playbooks/control_plane_hci_pre_deploy.yml @@ -32,3 +32,4 @@ - op: add path: /spec/swift/enabled value: {{ cifmw_services_swift_enabled | default('false') }} + mode: "0644" diff --git a/hooks/playbooks/control_plane_horizon.yml b/hooks/playbooks/control_plane_horizon.yml index 010e1eace7..852298c741 100644 --- a/hooks/playbooks/control_plane_horizon.yml +++ b/hooks/playbooks/control_plane_horizon.yml @@ -26,3 +26,4 @@ - op: add path: /spec/horizon/template/memcachedInstance value: memcached + mode: "0644" diff --git a/hooks/playbooks/control_plane_ironic.yml b/hooks/playbooks/control_plane_ironic.yml index 7f278107d2..b0faee9fb1 100644 --- a/hooks/playbooks/control_plane_ironic.yml +++ b/hooks/playbooks/control_plane_ironic.yml @@ -24,3 +24,4 @@ - op: add path: /spec/ironic/enabled value: {{ cifmw_services_ironic_enabled | default('false') }} + mode: "0644" diff --git a/hooks/playbooks/federation-controlplane-config.yml b/hooks/playbooks/federation-controlplane-config.yml index bd9b9b76f9..845d3958de 100644 --- a/hooks/playbooks/federation-controlplane-config.yml +++ b/hooks/playbooks/federation-controlplane-config.yml @@ -37,6 +37,7 @@ remote_id_attribute=HTTP_OIDC_ISS [auth] methods = password,token,oauth1,mapped,application_credential,openid + mode: "0644" - name: Get ingress operator CA cert ansible.builtin.slurp: diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index 96c3c183f8..283d7030fa 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -17,6 +17,7 @@ ansible.builtin.copy: dest: "/etc/yum.repos.d/" src: "{{ cifmw_basedir }}/artifacts/repositories/" + mode: "0644" - name: Build dataset hook hosts: localhost @@ -106,6 +107,7 @@ "values": [] } ] + mode: "0644" - name: Prepare EDPM deploy related facts and keys when: @@ -135,6 +137,7 @@ vars: dns_servers: "{{ ((['192.168.122.10'] + ansible_facts['dns']['nameservers']) | unique)[0:2] }}" ansible.builtin.copy: + mode: "0644" dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/dataplane/99-kustomization.yaml" content: |- apiVersion: kustomize.config.k8s.io/v1beta1 @@ -270,3 +273,4 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/{{ step }}_{{ hook_name }}.yml" content: "{{ file_content | to_nice_yaml }}" + mode: "0644" diff --git a/hooks/playbooks/ironic_enroll_nodes.yml b/hooks/playbooks/ironic_enroll_nodes.yml index b27f333a6f..e4edb57799 100644 --- a/hooks/playbooks/ironic_enroll_nodes.yml +++ b/hooks/playbooks/ironic_enroll_nodes.yml @@ -61,6 +61,7 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/parameters/ironic_nodes.yaml" content: "{{ ironic_nodes | to_yaml }}" + mode: "0644" - name: Enroll ironic nodes ansible.builtin.shell: | diff --git a/hooks/playbooks/kustomize_cr.yml b/hooks/playbooks/kustomize_cr.yml index 4ee5ad7eac..752b71d5ce 100644 --- a/hooks/playbooks/kustomize_cr.yml +++ b/hooks/playbooks/kustomize_cr.yml @@ -27,6 +27,7 @@ ansible.builtin.copy: src: "{{ cifmw_kustomize_cr_file_path }}/{{ cifmw_kustomize_cr_file_name }}" dest: "{{ cifmw_kustomize_cr_artifact_dir }}/{{ cifmw_kustomize_cr_file_name }}" + mode: "0644" remote_src: true - name: Generate kustomization file @@ -34,6 +35,7 @@ ansible.builtin.template: src: "{{ playbook_dir }}/{{ cifmw_kustomize_cr_template }}" dest: "{{ cifmw_kustomize_cr_artifact_dir }}/kustomization.yaml" + mode: "0644" - name: Run oc kustomize environment: @@ -47,3 +49,4 @@ ansible.builtin.copy: dest: "{{ cifmw_kustomize_cr_artifact_dir }}/kustomized_{{ cifmw_kustomize_cr_file_name }}" content: "{{ kustomized_cr.stdout }}" + mode: "0644" diff --git a/hooks/playbooks/kuttl_openstack_prep.yml b/hooks/playbooks/kuttl_openstack_prep.yml index 6d9ab067d5..21a6797046 100644 --- a/hooks/playbooks/kuttl_openstack_prep.yml +++ b/hooks/playbooks/kuttl_openstack_prep.yml @@ -42,3 +42,4 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/parameters/{{ step }}_{{ hook_name }}.yml" content: "{{ file_content | to_nice_yaml }}" + mode: "0644" diff --git a/hooks/playbooks/link2file.yml b/hooks/playbooks/link2file.yml index 97142dcbae..0e613c47d5 100644 --- a/hooks/playbooks/link2file.yml +++ b/hooks/playbooks/link2file.yml @@ -58,6 +58,7 @@ ansible.builtin.copy: src: "{{ item.stat.lnk_source }}" dest: "{{ _file_path }}" + mode: "0644" loop: "{{ _file_info.results }}" loop_control: label: "{{ item.item }}" diff --git a/playbooks/dcn.yml b/playbooks/dcn.yml index ff700f4a3d..bf5a956f96 100644 --- a/playbooks/dcn.yml +++ b/playbooks/dcn.yml @@ -106,5 +106,6 @@ ansible.builtin.copy: src: "{{ item.path }}" dest: "/home/zuul/ci-framework-data/artifacts/manifests/openstack/cr" + mode: "0644" loop: "{{ dcn_crs.files }}" when: dcn_crs.matched > 0 diff --git a/playbooks/nfs.yml b/playbooks/nfs.yml index 9dd4ee4065..5d20b62b6a 100644 --- a/playbooks/nfs.yml +++ b/playbooks/nfs.yml @@ -48,6 +48,7 @@ option: vers3 value: n backup: true + mode: "0644" - name: Disable NFSv3-related services ansible.builtin.systemd_service: @@ -89,6 +90,7 @@ 'cifmw_nfs_network_range': cifmw_nfs_network_out.stdout | from_json | json_query('cidr') } | to_nice_yaml }} + mode: "0644" # NOTE: This represents a workaround because there's an edpm-nftables role # in edpm-ansible already. That role should contain the implementation @@ -125,6 +127,7 @@ option: host value: "{{ cifmw_nfs_network_out.stdout | from_json | json_query('address') }}" backup: true + mode: "0644" - name: Enable and restart nfs-server service ansible.builtin.systemd: diff --git a/playbooks/unique-id.yml b/playbooks/unique-id.yml index c3f1c7d390..9b9709534c 100644 --- a/playbooks/unique-id.yml +++ b/playbooks/unique-id.yml @@ -38,6 +38,7 @@ ansible.builtin.copy: dest: "{{ _unique_id_file }}" content: "{{ cifmw_run_id | default(_unique_id) | lower }}" + mode: "0644" # Since the user might pass their own run ID, we can just consume it. # If, for a subsequent run, the user doesn't pass the run ID, we will diff --git a/playbooks/update.yml b/playbooks/update.yml index 5a35158e04..2a6895f76c 100644 --- a/playbooks/update.yml +++ b/playbooks/update.yml @@ -24,6 +24,7 @@ remote_src: true src: "{{ cifmw_basedir }}/artifacts/repositories/" dest: "{{ cifmw_basedir }}/artifacts/before_update_repos/" + mode: "0644" - name: Run repo_setup ansible.builtin.include_role: @@ -48,6 +49,7 @@ ansible.builtin.copy: dest: "/etc/yum.repos.d/" src: "{{ cifmw_basedir }}/artifacts/repositories/" + mode: "0644" - name: Run Ceph update if part of the deployment hosts: "{{ (groups[cifmw_ceph_target | default('computes')] | default([]))[:1] }}" @@ -73,6 +75,7 @@ ansible.builtin.copy: content: "{{ cephconf['content'] | b64decode }}" dest: "/tmp/ceph.conf" + mode: "0644" - name: Extract the CephFSID from ceph.conf ansible.builtin.set_fact: diff --git a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml index 3bc1e7558d..1ee13ba8b1 100644 --- a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml @@ -52,6 +52,7 @@ ansible.builtin.copy: src: "{{ _roles_file }}" dest: "{{ _roles_file_dest }}" + mode: "0644" - name: Run overcloud deploy delegate_to: "osp-undercloud-0" diff --git a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml index aa0c1cdbd7..74b411aa08 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml @@ -66,6 +66,7 @@ ansible.builtin.copy: src: "{{ _container_prapare_path }}" dest: "{{ ansible_user_dir }}/containers-prepare-parameters.yaml" + mode: "0644" when: cifmw_adoption_osp_deploy_scenario.container_prepare_params is defined # Adoption requires Ceph 7 (Reef) as a requirement. Instead of performing a Ceph @@ -239,4 +240,5 @@ option: "{{ item.option }}" value: "{{ item.value }}" state: "present" + mode: "0644" loop: "{{ _undercloud_conf.config }}" diff --git a/roles/artifacts/tasks/ansible_logs.yml b/roles/artifacts/tasks/ansible_logs.yml index 169a550f6e..1355019bb8 100644 --- a/roles/artifacts/tasks/ansible_logs.yml +++ b/roles/artifacts/tasks/ansible_logs.yml @@ -10,4 +10,5 @@ src: "{{ item.path }}" dest: "{{ cifmw_artifacts_basedir }}/logs/" remote_src: true + mode: "0644" loop: "{{ files_to_copy.files }}" diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index a0ecb5cc50..7519b41063 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -33,6 +33,7 @@ ansible.builtin.file: path: "{{ cifmw_artifacts_basedir }}/{{ item }}" state: directory + mode: "0755" loop: - artifacts - logs diff --git a/roles/build_openstack_packages/tasks/create_repo.yml b/roles/build_openstack_packages/tasks/create_repo.yml index 016fc3586d..0dc056c8f1 100644 --- a/roles/build_openstack_packages/tasks/create_repo.yml +++ b/roles/build_openstack_packages/tasks/create_repo.yml @@ -39,6 +39,7 @@ remote_src: true src: "{{ _repodir.path }}/" dest: "{{ cifmw_bop_gating_repo_dest }}" + mode: "0644" - name: Add gating.repo file to install the required built packages ansible.builtin.copy: @@ -50,6 +51,7 @@ gpgcheck=0 priority=1 dest: "{{ cifmw_bop_gating_repo_dest }}/gating.repo" + mode: "0644" - name: Serve gating repo ansible.builtin.import_tasks: serve_gating_repo.yml diff --git a/roles/build_openstack_packages/tasks/downstream.yml b/roles/build_openstack_packages/tasks/downstream.yml index 260a0b5ef0..751126be43 100644 --- a/roles/build_openstack_packages/tasks/downstream.yml +++ b/roles/build_openstack_packages/tasks/downstream.yml @@ -26,12 +26,14 @@ remote_src: true src: "{{ ansible_user_dir }}/{{ cifmw_bop_initial_dlrn_config }}.cfg" dest: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}.cfg" + mode: "0644" - name: Copy patch_rebaser.ini to patch_rebaser repo ansible.builtin.copy: remote_src: true src: "{{ ansible_user_dir }}/patch_rebaser.ini" dest: "{{ cifmw_bop_build_repo_dir }}/patch_rebaser/patch_rebaser/patch_rebaser.ini" + mode: "0644" - name: Copy Downstream scripts to DLRN repo ansible.builtin.copy: diff --git a/roles/build_openstack_packages/tasks/install_dlrn.yml b/roles/build_openstack_packages/tasks/install_dlrn.yml index 1a08a1729a..11cd72ed26 100644 --- a/roles/build_openstack_packages/tasks/install_dlrn.yml +++ b/roles/build_openstack_packages/tasks/install_dlrn.yml @@ -126,6 +126,7 @@ ansible.builtin.template: src: projects.ini.j2 dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/projects.ini' + mode: "0644" - name: Copy the DLRN scripts in the virtualenv to the scripts dir ansible.posix.synchronize: @@ -159,6 +160,7 @@ remote_src: true src: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}.cfg" dest: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}-local.cfg" + mode: "0644" - name: Remove last """ from local mock config # noqa: command-instead-of-module ansible.builtin.command: diff --git a/roles/cert_manager/tasks/olm_manifest.yml b/roles/cert_manager/tasks/olm_manifest.yml index 90ba2331ca..48e8ad2645 100644 --- a/roles/cert_manager/tasks/olm_manifest.yml +++ b/roles/cert_manager/tasks/olm_manifest.yml @@ -3,6 +3,7 @@ ansible.builtin.copy: dest: "{{ cifmw_cert_manager_manifests_dir }}/cert-manager-{{ item.kind | lower }}-olm.yaml" content: "{{ item | to_nice_yaml }}" + mode: "0644" loop: - "{{ cifmw_cert_manager_olm_operator_group }}" - "{{ cifmw_cert_manager_olm_subscription }}" diff --git a/roles/ci_dcn_site/tasks/ceph.yml b/roles/ci_dcn_site/tasks/ceph.yml index c27815d741..b44d837a5c 100644 --- a/roles/ci_dcn_site/tasks/ceph.yml +++ b/roles/ci_dcn_site/tasks/ceph.yml @@ -36,6 +36,7 @@ create: true backup: true insertbefore: EOF + mode: "0644" - name: Ensure Ceph bootstrap host can ping itself register: _cmd_result diff --git a/roles/ci_dcn_site/tasks/scaledown_site.yml b/roles/ci_dcn_site/tasks/scaledown_site.yml index 407b9a188d..23ba5da09b 100644 --- a/roles/ci_dcn_site/tasks/scaledown_site.yml +++ b/roles/ci_dcn_site/tasks/scaledown_site.yml @@ -200,11 +200,13 @@ ansible.builtin.file: path: "/tmp/ceph_conf_files" state: directory + mode: "0750" - name: Save secret data to files ansible.builtin.copy: content: "{{ secret_info.resources[0].data[key] | b64decode | regex_replace('(?m)^\\s*\\n', '') }}" dest: "/tmp/ceph_conf_files/{{ key }}" + mode: "0640" loop: "{{ secret_info.resources[0].data.keys() }}" loop_control: loop_var: key diff --git a/roles/ci_local_storage/tasks/main.yml b/roles/ci_local_storage/tasks/main.yml index 6daf1e0061..169f581619 100644 --- a/roles/ci_local_storage/tasks/main.yml +++ b/roles/ci_local_storage/tasks/main.yml @@ -33,6 +33,7 @@ ansible.builtin.copy: dest: "{{ cifmw_cls_manifests_dir }}/storage-class.yaml" content: "{{ cifmw_cls_storage_manifest | to_nice_yaml }}" + mode: "0644" - name: Get k8s nodes ansible.builtin.import_tasks: fetch_names.yml diff --git a/roles/ci_lvms_storage/tasks/main.yml b/roles/ci_lvms_storage/tasks/main.yml index 362a200d6f..e3699aba4a 100644 --- a/roles/ci_lvms_storage/tasks/main.yml +++ b/roles/ci_lvms_storage/tasks/main.yml @@ -26,6 +26,7 @@ ansible.builtin.file: path: "{{ cifmw_lvms_manifests_dir }}" state: directory + mode: "0755" - name: Put the manifest files in place ansible.builtin.template: diff --git a/roles/ci_multus/molecule/resources/clean.yml b/roles/ci_multus/molecule/resources/clean.yml index 2f9abfbd4b..e88c90ee19 100644 --- a/roles/ci_multus/molecule/resources/clean.yml +++ b/roles/ci_multus/molecule/resources/clean.yml @@ -23,6 +23,7 @@ src: "{{ cifmw_ci_multus_manifests_dir }}" dest: "{{ cifmw_ci_multus_manifests_dir }}.backup" remote_src: true + mode: "0755" - name: Call cleanup ansible.builtin.import_role: diff --git a/roles/ci_multus/tasks/main.yml b/roles/ci_multus/tasks/main.yml index 5edcdfb30f..84d8a8c572 100644 --- a/roles/ci_multus/tasks/main.yml +++ b/roles/ci_multus/tasks/main.yml @@ -18,6 +18,7 @@ ansible.builtin.file: path: "{{ cifmw_ci_multus_manifests_dir }}" state: directory + mode: "0755" - name: Build list of networks from cifmw_networking_env_definition block: @@ -117,6 +118,7 @@ ansible.builtin.template: src: "nad.yml.j2" dest: "{{ cifmw_ci_multus_manifests_dir }}/ci_multus_nads.yml" + mode: "0644" - name: Create resources in OCP when: not cifmw_ci_multus_dryrun diff --git a/roles/ci_network/tasks/main.yml b/roles/ci_network/tasks/main.yml index 179d4b8b47..27d1be8494 100644 --- a/roles/ci_network/tasks/main.yml +++ b/roles/ci_network/tasks/main.yml @@ -42,6 +42,7 @@ section: "{{ nm_conf.section }}" option: "{{ nm_conf.option }}" value: "{{ nm_conf.value }}" + mode: "0644" loop: "{{ cifmw_network_nm_config }}" loop_control: loop_var: nm_conf diff --git a/roles/ci_nmstate/tasks/nmstate_k8s_install.yml b/roles/ci_nmstate/tasks/nmstate_k8s_install.yml index ed707c6763..8ce164cbb3 100644 --- a/roles/ci_nmstate/tasks/nmstate_k8s_install.yml +++ b/roles/ci_nmstate/tasks/nmstate_k8s_install.yml @@ -3,6 +3,7 @@ ansible.builtin.file: path: "{{ cifmw_ci_nmstate_manifests_dir }}" state: directory + mode: "0755" - name: Create the nmstate namespace kubernetes.core.k8s: diff --git a/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml b/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml index 470811463b..6cc009fad3 100644 --- a/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml +++ b/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml @@ -25,6 +25,7 @@ ansible.builtin.file: path: "{{ cifmw_ci_nmstate_configs_dir }}" state: directory + mode: "0755" - name: "Save nmstate state for {{ cifmw_ci_nmstate_unmanaged_host }}" ansible.builtin.copy: diff --git a/roles/cifmw_cephadm/tasks/dashboard/validation.yml b/roles/cifmw_cephadm/tasks/dashboard/validation.yml index b8e6569b89..1559ba30a9 100644 --- a/roles/cifmw_cephadm/tasks/dashboard/validation.yml +++ b/roles/cifmw_cephadm/tasks/dashboard/validation.yml @@ -25,6 +25,7 @@ ansible.builtin.get_url: url: "{{ cifmw_cephadm_urischeme_dashboard | default('http') }}://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" dest: "/tmp/dash_response" + mode: "0644" validate_certs: false register: dashboard_response failed_when: dashboard_response.failed == true @@ -37,6 +38,7 @@ ansible.builtin.get_url: url: "{{ cifmw_cephadm_urischeme_dashboard | default('http') }}://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" dest: "/tmp/dash_http_response" + mode: "0644" validate_certs: false username: admin password: admin diff --git a/roles/cifmw_external_dns/tasks/requirements.yml b/roles/cifmw_external_dns/tasks/requirements.yml index af123b118d..21799f008a 100644 --- a/roles/cifmw_external_dns/tasks/requirements.yml +++ b/roles/cifmw_external_dns/tasks/requirements.yml @@ -56,6 +56,7 @@ ansible.builtin.file: path: "{{ cifmw_external_dns_manifests_dir }}" state: directory + mode: "0755" - name: Stat cifmw_external_dns_certificate on target hosts ansible.builtin.stat: diff --git a/roles/compliance/tasks/create_scap_report.yml b/roles/compliance/tasks/create_scap_report.yml index 5cb8a1e9eb..74bf07f531 100644 --- a/roles/compliance/tasks/create_scap_report.yml +++ b/roles/compliance/tasks/create_scap_report.yml @@ -31,6 +31,7 @@ ansible.builtin.copy: src: "{{ bzip_file.path }}" dest: "{{ base_name }}.xml.bz2" + mode: "0644" - name: Unzip the file ansible.builtin.command: "bunzip2 {{ base_name }}.xml.bz2" diff --git a/roles/copy_container/molecule/default/converge.yml b/roles/copy_container/molecule/default/converge.yml index c17b388b1d..a80508c0bd 100644 --- a/roles/copy_container/molecule/default/converge.yml +++ b/roles/copy_container/molecule/default/converge.yml @@ -43,6 +43,7 @@ ansible.builtin.copy: dest: "/tmp/copy-quay-config.yaml" content: "{{ _data }}" + mode: "0644" - name: Copy containers from RDO quay to local registry ansible.builtin.command: diff --git a/roles/copy_container/tasks/main.yml b/roles/copy_container/tasks/main.yml index 53947623e1..fb95f13034 100644 --- a/roles/copy_container/tasks/main.yml +++ b/roles/copy_container/tasks/main.yml @@ -42,6 +42,7 @@ ansible.builtin.copy: src: copy-quay/ dest: "{{ temporary_copy_container_dir.path }}" + mode: "0755" - name: Build the copy-container register: go_build diff --git a/roles/devscripts/molecule/check_cluster_status/tasks/test.yml b/roles/devscripts/molecule/check_cluster_status/tasks/test.yml index 99866dbe2d..b764da7f13 100644 --- a/roles/devscripts/molecule/check_cluster_status/tasks/test.yml +++ b/roles/devscripts/molecule/check_cluster_status/tasks/test.yml @@ -95,6 +95,7 @@ ansible.builtin.copy: dest: "/home/dev-scripts/.ocp_cert_not_after" content: "{{ _date }}" + mode: "0644" - name: Ensure freshly built config ansible.builtin.include_role: diff --git a/roles/devscripts/tasks/139_configs.yml b/roles/devscripts/tasks/139_configs.yml index a6e7aeba67..e899e7673b 100644 --- a/roles/devscripts/tasks/139_configs.yml +++ b/roles/devscripts/tasks/139_configs.yml @@ -38,3 +38,4 @@ src: templates/conf_ciuser.j2 dest: >- {{ cifmw_devscripts_repo_dir }}/config_{{ cifmw_devscripts_user }}.sh + mode: "0644" diff --git a/roles/devscripts/tasks/main.yml b/roles/devscripts/tasks/main.yml index 61407b0e68..6a87bf0237 100644 --- a/roles/devscripts/tasks/main.yml +++ b/roles/devscripts/tasks/main.yml @@ -64,6 +64,7 @@ dest: "{{ cifmw_devscripts_logs_dir }}/{{ item.path | basename }}" remote_src: true src: "{{ item.path }}" + mode: "0644" loop: "{{ _deploy_logs.files }}" loop_control: label: "{{ item.path }}" diff --git a/roles/dlrn_promote/tasks/get_hash_from_commit.yaml b/roles/dlrn_promote/tasks/get_hash_from_commit.yaml index 86ab6b4582..185fac9df1 100644 --- a/roles/dlrn_promote/tasks/get_hash_from_commit.yaml +++ b/roles/dlrn_promote/tasks/get_hash_from_commit.yaml @@ -3,6 +3,7 @@ ansible.builtin.get_url: url: "{{ commit_url }}/commit.yaml" dest: "{{ cifmw_dlrn_promote_workspace }}/commit.yaml" + mode: "0644" force: true register: result until: diff --git a/roles/edpm_build_images/tasks/main.yml b/roles/edpm_build_images/tasks/main.yml index 2c4f1d821b..02309ad893 100644 --- a/roles/edpm_build_images/tasks/main.yml +++ b/roles/edpm_build_images/tasks/main.yml @@ -31,6 +31,7 @@ url: "{{ cifmw_discovered_image_url }}" dest: "{{ cifmw_edpm_build_images_basedir }}" timeout: 20 + mode: "0644" register: result until: result is success retries: 60 diff --git a/roles/edpm_kustomize/tasks/kustomize.yml b/roles/edpm_kustomize/tasks/kustomize.yml index da1fda1060..5c5ecd2fd8 100644 --- a/roles/edpm_kustomize/tasks/kustomize.yml +++ b/roles/edpm_kustomize/tasks/kustomize.yml @@ -33,6 +33,7 @@ } ) | to_nice_yaml }} + mode: "0644" - name: Apply the already existing kustomization if present environment: diff --git a/roles/edpm_kustomize/tasks/main.yml b/roles/edpm_kustomize/tasks/main.yml index 0243bbe17d..1065381ed5 100644 --- a/roles/edpm_kustomize/tasks/main.yml +++ b/roles/edpm_kustomize/tasks/main.yml @@ -55,6 +55,7 @@ remote_src: true src: "{{ cifmw_edpm_kustomize_cr_path | dirname }}/kustomization.yaml" dest: "{{ cifmw_edpm_kustomize_cr_path | dirname }}/kustomization.initial.yaml" + mode: "0644" - name: Prepare and load the ci-framework kustomize template file vars: diff --git a/roles/env_op_images/tasks/main.yml b/roles/env_op_images/tasks/main.yml index 7822587d95..39a6e55d55 100644 --- a/roles/env_op_images/tasks/main.yml +++ b/roles/env_op_images/tasks/main.yml @@ -139,3 +139,4 @@ ansible.builtin.copy: dest: "{{ cifmw_env_op_images_dir }}/artifacts/{{ cifmw_env_op_images_file }}" content: "{{ _content | to_nice_yaml }}" + mode: "0644" diff --git a/roles/federation/tasks/run_keycloak_setup.yml b/roles/federation/tasks/run_keycloak_setup.yml index 769fd46526..24ecdff734 100644 --- a/roles/federation/tasks/run_keycloak_setup.yml +++ b/roles/federation/tasks/run_keycloak_setup.yml @@ -25,6 +25,7 @@ ansible.builtin.copy: src: "{{ [ ansible_user_dir, '.crc', 'machines', 'src', 'kubeconfig' ] | path_join }}" dest: "{{ [ ansible_user_dir, '.kube', 'config' ] | path_join }}" + mode: "0640" when: cifmw_federation_deploy_type == "crc" - name: Create namespace @@ -38,6 +39,7 @@ ansible.builtin.template: src: rhsso-operator-olm.yaml.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'rhsso-operator-olm.yaml' ] | path_join }}" + mode: "0644" - name: Install federation rhsso operator environment: @@ -89,6 +91,7 @@ ansible.builtin.template: src: sso.yaml.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'sso.yaml' ] | path_join }}" + mode: "0644" - name: Install federation sso pod environment: @@ -130,3 +133,4 @@ ansible.builtin.copy: src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'tls.crt'] | path_join }}" dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" + mode: "0644" diff --git a/roles/federation/tasks/run_openstack_auth_test.yml b/roles/federation/tasks/run_openstack_auth_test.yml index ffbbda6e1e..f87b2d9a53 100644 --- a/roles/federation/tasks/run_openstack_auth_test.yml +++ b/roles/federation/tasks/run_openstack_auth_test.yml @@ -31,6 +31,7 @@ ansible.builtin.template: src: kctestuser1.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser1_username ] | path_join }}" + mode: "0644" - name: Copy federation test user1 cloudrc file into pod kubernetes.core.k8s_cp: @@ -43,6 +44,7 @@ ansible.builtin.copy: src: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" + mode: "0444" - name: Get ingress operator CA cert ansible.builtin.slurp: diff --git a/roles/federation/tasks/run_openstack_setup.yml b/roles/federation/tasks/run_openstack_setup.yml index 593177a24d..a4abd325c4 100644 --- a/roles/federation/tasks/run_openstack_setup.yml +++ b/roles/federation/tasks/run_openstack_setup.yml @@ -18,6 +18,7 @@ ansible.builtin.copy: src: /home/zuul/.crc/machines/crc/kubeconfig dest: /home/zuul/.kube/config + mode: "0640" when: cifmw_federation_deploy_type == "crc" - name: Run federation create domain @@ -37,6 +38,7 @@ ansible.builtin.template: src: rules.json.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_rules_file ] | path_join }}" + mode: "0644" - name: Copy federation rules json file into pod kubernetes.core.k8s_cp: diff --git a/roles/hive/tasks/main.yml b/roles/hive/tasks/main.yml index 457a649d82..867b18908d 100644 --- a/roles/hive/tasks/main.yml +++ b/roles/hive/tasks/main.yml @@ -30,6 +30,7 @@ ansible.builtin.file: path: "{{ cifmw_hive_artifacts_dir }}" state: directory + mode: "0755" - name: "Performing {{ cifmw_hive_platform }} {{cifmw_hive_action }}" # noqa: name[template] ansible.builtin.include_tasks: "{{ cifmw_hive_platform }}_{{ cifmw_hive_action }}.yml" diff --git a/roles/install_ca/tasks/main.yml b/roles/install_ca/tasks/main.yml index aac0b232a1..9c5c0cbab6 100644 --- a/roles/install_ca/tasks/main.yml +++ b/roles/install_ca/tasks/main.yml @@ -29,6 +29,7 @@ url: "{{ cifmw_install_ca_url }}" dest: "{{ cifmw_install_ca_trust_dir }}" validate_certs: "{{ cifmw_install_ca_url_validate_certs | default(omit) }}" + mode: "0644" - name: Install custom CA bundle from inline register: ca_inline diff --git a/roles/install_yamls/tasks/main.yml b/roles/install_yamls/tasks/main.yml index 0f3ed9536b..0a70460e67 100644 --- a/roles/install_yamls/tasks/main.yml +++ b/roles/install_yamls/tasks/main.yml @@ -120,6 +120,7 @@ {% for k,v in cifmw_install_yamls_environment.items() %} export {{ k }}={{ v }} {% endfor %} + mode: "0644" - name: Set install_yamls default values tags: @@ -166,6 +167,7 @@ 'cifmw_install_yamls_defaults': cifmw_install_yamls_defaults } | to_nice_yaml }} + mode: "0644" - name: Create empty cifmw_install_yamls_environment if needed tags: diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index 9d8e459e4a..37d9c6405c 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -51,6 +51,7 @@ 'values.yaml' ) | path_join }} + mode: "0644" - name: Generate the OLM kustomization file ansible.builtin.copy: diff --git a/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml b/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml index 3c3300e205..04f360b08f 100644 --- a/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml +++ b/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml @@ -151,6 +151,7 @@ remote_src: true src: "{{ cifmw_basedir }}/{{ item }}" dest: "{{ _dest }}/" + mode: "0755" loop: - artifacts - logs @@ -160,11 +161,14 @@ failed_when: false ansible.builtin.copy: remote_src: true - src: "{{ item }}" + src: "{{ item.src }}" dest: "{{ _dest }}/" + mode: "{{ item.mode }}" loop: - - /etc/cifmw-dnsmasq.conf - - /etc/cifmw-dnsmasq.d + - { src: "/etc/cifmw-dnsmasq.conf", mode: "0644" } + - { src: "/etc/cifmw-dnsmasq.d", mode: "0755" } + loop_control: + label: "{{ item.src }}" - name: Clean environment vars: diff --git a/roles/libvirt_manager/tasks/clean_layout.yml b/roles/libvirt_manager/tasks/clean_layout.yml index e56816b35e..d7467c2dd4 100644 --- a/roles/libvirt_manager/tasks/clean_layout.yml +++ b/roles/libvirt_manager/tasks/clean_layout.yml @@ -82,6 +82,7 @@ marker: "## {mark} {{ vm }} {{ inventory_hostname }}" state: absent create: true + mode: "0600" loop: "{{ cleanup_vms }}" # KEEP this for now to ensure smoother migration @@ -93,6 +94,7 @@ marker: "## {mark} {{ vm }}" state: absent create: true + mode: "0600" loop: "{{ cleanup_vms }}" - name: Get network list diff --git a/roles/libvirt_manager/tasks/deploy_layout.yml b/roles/libvirt_manager/tasks/deploy_layout.yml index fc590981e8..9705c4e116 100644 --- a/roles/libvirt_manager/tasks/deploy_layout.yml +++ b/roles/libvirt_manager/tasks/deploy_layout.yml @@ -95,6 +95,7 @@ ansible.builtin.template: dest: "{{ cifmw_libvirt_manager_basedir }}/reproducer-inventory/{{ item }}-group.yml" src: inventory.yml.j2 + mode: "0644" loop: "{{ _cifmw_libvirt_manager_layout.vms.keys() }}" loop_control: label: "{{ item }}" @@ -103,6 +104,7 @@ ansible.builtin.template: dest: "{{ cifmw_libvirt_manager_basedir }}/reproducer-inventory/all-group.yml" src: "all-inventory.yml.j2" + mode: "0644" - name: Ensure storage pool is present. when: @@ -316,6 +318,7 @@ dest: >- {{ cifmw_libvirt_manager_basedir }}/artifacts/virtual-nodes.yml content: "{{ content | to_nice_yaml }}" + mode: "0644" - name: Ensure we get proper access to CRC when: diff --git a/roles/libvirt_manager/tasks/generate_networking_data.yml b/roles/libvirt_manager/tasks/generate_networking_data.yml index 77fb0dc5ea..5d614d8ad2 100644 --- a/roles/libvirt_manager/tasks/generate_networking_data.yml +++ b/roles/libvirt_manager/tasks/generate_networking_data.yml @@ -79,6 +79,7 @@ ansible.builtin.copy: dest: "{{ _nic_info }}" content: "{{ cifmw_libvirt_manager_mac_map | to_nice_yaml }}" + mode: "0644" # END MAC pre-generation management # # START generate all IPs using networking_mapper role/module diff --git a/roles/libvirt_manager/tasks/get_image.yml b/roles/libvirt_manager/tasks/get_image.yml index d8eb33b05d..9b1f13f58f 100644 --- a/roles/libvirt_manager/tasks/get_image.yml +++ b/roles/libvirt_manager/tasks/get_image.yml @@ -25,6 +25,7 @@ ansible.builtin.get_url: url: "{{ image_data.image_url }}" dest: "{{ image_data.image_local_dir }}/{{ image_data.disk_file_name }}" + mode: "0644" checksum: >- {% if image_data.sha256_image_name -%} sha256:{{ image_data.sha256_image_name }} diff --git a/roles/mirror_registry/tasks/main.yml b/roles/mirror_registry/tasks/main.yml index 6f2ac78bde..2adceaaed9 100644 --- a/roles/mirror_registry/tasks/main.yml +++ b/roles/mirror_registry/tasks/main.yml @@ -28,6 +28,7 @@ owner: "{{ ansible_user_id }}" group: "{{ ansible_user_id }}" state: directory + mode: "0755" - name: Download mirror-registry tools ansible.builtin.unarchive: diff --git a/roles/nat64_appliance/molecule/default/converge.yml b/roles/nat64_appliance/molecule/default/converge.yml index 014a76bf83..c321d6d1c2 100644 --- a/roles/nat64_appliance/molecule/default/converge.yml +++ b/roles/nat64_appliance/molecule/default/converge.yml @@ -50,6 +50,7 @@ url: "{{ cifmw_discovered_image_url }}" dest: "{{ cifmw_basedir }}" timeout: 20 + mode: "0644" register: result until: result is success retries: 60 @@ -423,26 +424,31 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/test_node_info.log" content: "{{ _test_node_debug_info.stdout }}" + mode: "0644" - name: Write nat64-appliance info to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_node_info.log" content: "{{ _nat64_appliance_debug_info.stdout }}" + mode: "0644" - name: Write nat64-appliance journal to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_journal.log" content: "{{ _nat64_appliance_journal.stdout }}" + mode: "0644" - name: Write nat64-appliance DNS64 debug to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_dns64_debug.log" content: "{{ _nat64_appliance_dns64_debug.stdout }}" + mode: "0644" - name: Write hypervisor info to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/hypervisor_info.log" content: "{{ _hypervisor_info.stdout }}" + mode: "0644" - name: Ping example.com (delegate to test-node) delegate_to: test-node diff --git a/roles/networking_mapper/tasks/_gather_facts.yml b/roles/networking_mapper/tasks/_gather_facts.yml index 25564e6058..d16438b336 100644 --- a/roles/networking_mapper/tasks/_gather_facts.yml +++ b/roles/networking_mapper/tasks/_gather_facts.yml @@ -77,3 +77,4 @@ items2dict | to_nice_yaml }} + mode: "0644" diff --git a/roles/openshift_login/tasks/main.yml b/roles/openshift_login/tasks/main.yml index 1c2cf634ef..f2a9f9d1a8 100644 --- a/roles/openshift_login/tasks/main.yml +++ b/roles/openshift_login/tasks/main.yml @@ -98,7 +98,7 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/openshift-login-params.yml" content: "{{ cifmw_openshift_login_params_content | from_yaml | to_nice_yaml }}" - + mode: "0600" - name: Update the install-yamls-params with KUBECONFIG when: cifmw_install_yamls_environment is defined block: @@ -120,3 +120,4 @@ }, recursive=true) | to_nice_yaml }} dest: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml" + mode: "0600" diff --git a/roles/pkg_build/tasks/main.yml b/roles/pkg_build/tasks/main.yml index ba20c937fe..727c6e7f34 100644 --- a/roles/pkg_build/tasks/main.yml +++ b/roles/pkg_build/tasks/main.yml @@ -20,6 +20,7 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/{{ item }}" state: directory + mode: "0755" loop: - volumes/packages/gating_repo - artifacts @@ -35,6 +36,7 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/volumes/packages/{{ pkg.name }}" state: directory + mode: "0755" loop: "{{ cifmw_pkg_build_list }}" loop_control: loop_var: 'pkg' @@ -44,6 +46,7 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/logs/build_{{ pkg.name }}" state: directory + mode: "0755" loop: "{{ cifmw_pkg_build_list }}" loop_control: loop_var: 'pkg' diff --git a/roles/reproducer/tasks/generate_bm_info.yml b/roles/reproducer/tasks/generate_bm_info.yml index 7405442fc1..585758b5e7 100644 --- a/roles/reproducer/tasks/generate_bm_info.yml +++ b/roles/reproducer/tasks/generate_bm_info.yml @@ -151,3 +151,4 @@ ) %} {% endfor %} {{ {'nodes': _ironic_nodes } | to_nice_yaml(indent=2) }} + mode: "0644" diff --git a/roles/update/tasks/reboot_hypervisor_using_cr.yml b/roles/update/tasks/reboot_hypervisor_using_cr.yml index b091cdedd2..3d753930a6 100644 --- a/roles/update/tasks/reboot_hypervisor_using_cr.yml +++ b/roles/update/tasks/reboot_hypervisor_using_cr.yml @@ -23,6 +23,7 @@ ansible.builtin.copy: dest: "{{ cifmw_update_artifacts_basedir }}/{{ cifmw_reboot_dep_name }}.yaml" content: "{{ _content | to_nice_yaml }}" + mode: "0644" vars: _content: apiVersion: dataplane.openstack.org/v1beta1 From 0a1a1f837303b19e6340c0592946d31715b30565 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Ciecierski?= Date: Fri, 28 Feb 2025 16:14:59 +0100 Subject: [PATCH 017/480] Tempest config cleanup must be bool According to object patching failure spec.cleanup in body must be of type boolean. Cast var to bool type. --- roles/test_operator/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index d1953c9c02..d7b533aad4 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -145,7 +145,7 @@ cifmw_test_operator_tempest_config: extraRPMs: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_rpms | default([]) }}" extraImages: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_images | default([]) }}" tempestconfRun: "{{ cifmw_tempest_tempestconf_config_defaults | combine(stage_vars_dict.cifmw_test_operator_tempest_tempestconf_config | default({})) }}" - cleanup: "{{ stage_vars_dict.cifmw_test_operator_tempest_cleanup }}" + cleanup: "{{ stage_vars_dict.cifmw_test_operator_tempest_cleanup | bool }}" workflow: "{{ stage_vars_dict.cifmw_test_operator_tempest_workflow }}" debug: "{{ stage_vars_dict.cifmw_test_operator_tempest_debug }}" From c42a29986180c579626d10883c9371ca1c3637b5 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Thu, 27 Mar 2025 07:48:40 +0530 Subject: [PATCH 018/480] Revert "Add mode for files and dirs" This reverts commit 63d1635ed4433a532ec3954127a63b66a8f79f18. Revert "Enable risky-file-permissions linter" This reverts commit c50dea3577d8fcc871861a8957c7c04dd8506ef4. These commits broke the package builds in content provider jobs with:- Failed to get information on remote file (/home/zuul/ci-framework-data/artifacts/repositories/gating.repo): Permission denied Related-Issue: OSPRH-15047 --- .ansible-lint | 2 +- deploy-osp-adoption.yml | 2 -- docs/source/files/bootstrap-hypervisor.yml | 2 +- hooks/playbooks/adoption_ironic_post_oc.yml | 5 ----- hooks/playbooks/barbican-enable-luna.yml | 1 - hooks/playbooks/control_plane_ceph_backends.yml | 1 - hooks/playbooks/control_plane_hci_pre_deploy.yml | 1 - hooks/playbooks/control_plane_horizon.yml | 1 - hooks/playbooks/control_plane_ironic.yml | 1 - hooks/playbooks/federation-controlplane-config.yml | 1 - hooks/playbooks/fetch_compute_facts.yml | 4 ---- hooks/playbooks/ironic_enroll_nodes.yml | 1 - hooks/playbooks/kustomize_cr.yml | 3 --- hooks/playbooks/kuttl_openstack_prep.yml | 1 - hooks/playbooks/link2file.yml | 1 - playbooks/dcn.yml | 1 - playbooks/nfs.yml | 3 --- playbooks/unique-id.yml | 1 - playbooks/update.yml | 3 --- roles/adoption_osp_deploy/tasks/deploy_overcloud.yml | 1 - roles/adoption_osp_deploy/tasks/prepare_undercloud.yml | 2 -- roles/artifacts/tasks/ansible_logs.yml | 1 - roles/artifacts/tasks/main.yml | 1 - roles/build_openstack_packages/tasks/create_repo.yml | 2 -- roles/build_openstack_packages/tasks/downstream.yml | 2 -- roles/build_openstack_packages/tasks/install_dlrn.yml | 2 -- roles/cert_manager/tasks/olm_manifest.yml | 1 - roles/ci_dcn_site/tasks/ceph.yml | 1 - roles/ci_dcn_site/tasks/scaledown_site.yml | 2 -- roles/ci_local_storage/tasks/main.yml | 1 - roles/ci_lvms_storage/tasks/main.yml | 1 - roles/ci_multus/molecule/resources/clean.yml | 1 - roles/ci_multus/tasks/main.yml | 2 -- roles/ci_network/tasks/main.yml | 1 - roles/ci_nmstate/tasks/nmstate_k8s_install.yml | 1 - .../tasks/nmstate_unmanaged_provision_node.yml | 1 - roles/cifmw_cephadm/tasks/dashboard/validation.yml | 2 -- roles/cifmw_external_dns/tasks/requirements.yml | 1 - roles/compliance/tasks/create_scap_report.yml | 1 - roles/copy_container/molecule/default/converge.yml | 1 - roles/copy_container/tasks/main.yml | 1 - .../molecule/check_cluster_status/tasks/test.yml | 1 - roles/devscripts/tasks/139_configs.yml | 1 - roles/devscripts/tasks/main.yml | 1 - roles/dlrn_promote/tasks/get_hash_from_commit.yaml | 1 - roles/edpm_build_images/tasks/main.yml | 1 - roles/edpm_kustomize/tasks/kustomize.yml | 1 - roles/edpm_kustomize/tasks/main.yml | 1 - roles/env_op_images/tasks/main.yml | 1 - roles/federation/tasks/run_keycloak_setup.yml | 4 ---- roles/federation/tasks/run_openstack_auth_test.yml | 2 -- roles/federation/tasks/run_openstack_setup.yml | 2 -- roles/hive/tasks/main.yml | 1 - roles/install_ca/tasks/main.yml | 1 - roles/install_yamls/tasks/main.yml | 2 -- roles/kustomize_deploy/tasks/install_operators.yml | 1 - .../molecule/generate_network_data/tasks/test.yml | 10 +++------- roles/libvirt_manager/tasks/clean_layout.yml | 2 -- roles/libvirt_manager/tasks/deploy_layout.yml | 3 --- .../libvirt_manager/tasks/generate_networking_data.yml | 1 - roles/libvirt_manager/tasks/get_image.yml | 1 - roles/mirror_registry/tasks/main.yml | 1 - roles/nat64_appliance/molecule/default/converge.yml | 6 ------ roles/networking_mapper/tasks/_gather_facts.yml | 1 - roles/openshift_login/tasks/main.yml | 3 +-- roles/pkg_build/tasks/main.yml | 3 --- roles/reproducer/tasks/generate_bm_info.yml | 1 - roles/update/tasks/reboot_hypervisor_using_cr.yml | 1 - 68 files changed, 6 insertions(+), 112 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index 3234711638..eca5d61350 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -41,10 +41,10 @@ enable_list: - no-log-password - no-same-owner - name[play] - - risky-file-permissions skip_list: - jinja[spacing] # We don't really want to get that one. Too picky - no-changed-when # once we get the oc module we can re-enable it + - risky-file-permissions # Seems to fail on 0644 on files ?! - schema[meta] # Apparently "CentOS 9" isn't known... ?! - schema[vars] # weird issue with some "vars" in playbooks - yaml[line-length] # We have long lines, yes. diff --git a/deploy-osp-adoption.yml b/deploy-osp-adoption.yml index 1dee12e853..45b6aae89a 100644 --- a/deploy-osp-adoption.yml +++ b/deploy-osp-adoption.yml @@ -85,7 +85,6 @@ ansible.builtin.file: path: "{{ cifmw_basedir }}/artifacts/parameters" state: "directory" - mode: "0755" - name: Save variables for use with hooks vars: @@ -97,7 +96,6 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/parameters/adoption_osp.yml" content: "{{ _content | to_nice_yaml }}" - mode: "0644" - name: Set inventory_file for localhost to use with hooks ansible.builtin.set_fact: inventory_file: "{{ hostvars[_target_host]['inventory_file'] }}" diff --git a/docs/source/files/bootstrap-hypervisor.yml b/docs/source/files/bootstrap-hypervisor.yml index 96cc0bb90b..337c9eea2a 100644 --- a/docs/source/files/bootstrap-hypervisor.yml +++ b/docs/source/files/bootstrap-hypervisor.yml @@ -56,7 +56,7 @@ dest: "/etc/sudoers.d/{{ _user }}" owner: root group: root - mode: "0640" + mode: 0640 - name: Install basic packages become: true diff --git a/hooks/playbooks/adoption_ironic_post_oc.yml b/hooks/playbooks/adoption_ironic_post_oc.yml index a97d4164a9..198ee8fd51 100644 --- a/hooks/playbooks/adoption_ironic_post_oc.yml +++ b/hooks/playbooks/adoption_ironic_post_oc.yml @@ -55,7 +55,6 @@ ansible.builtin.file: state: directory path: "{{ ansible_user_dir }}/ironic-python-agent" - mode: "0755" loop: - osp-undercloud-0 - osp-controller-0 @@ -83,7 +82,6 @@ src: "{{ ansible_user_dir }}/ironic-python-agent/ironic-python-agent.kernel" dest: /var/lib/ironic/httpboot/agent.kernel remote_src: true - mode: "0644" loop: - osp-controller-0 - osp-controller-1 @@ -95,7 +93,6 @@ src: "{{ ansible_user_dir }}/ironic-python-agent/ironic-python-agent.initramfs" dest: /var/lib/ironic/httpboot/agent.ramdisk remote_src: true - mode: "0644" loop: - osp-controller-0 - osp-controller-1 @@ -169,13 +166,11 @@ ansible.builtin.file: state: directory path: "{{ ansible_user_dir }}/ci-framework-data/parameters" - mode: "0755" - name: Write ironic_nodes.yaml on osp-unercloud-o ansible.builtin.copy: content: "{{ _ironic_nodes_slurp.content | b64decode }}" dest: "{{ ansible_user_dir }}/ci-framework-data/parameters/ironic_nodes.yaml" - mode: "0644" - name: Run baremetal create command to enroll the nodes in the Ironic service environment: diff --git a/hooks/playbooks/barbican-enable-luna.yml b/hooks/playbooks/barbican-enable-luna.yml index d319e25c52..c3a6a2b8f5 100644 --- a/hooks/playbooks/barbican-enable-luna.yml +++ b/hooks/playbooks/barbican-enable-luna.yml @@ -46,7 +46,6 @@ login_secret: "{{ cifmw_hsm_login_secret | default('barbican-luna-login', true) }}" ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/93-barbican-luna.yaml" - mode: "0644" content: |- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization diff --git a/hooks/playbooks/control_plane_ceph_backends.yml b/hooks/playbooks/control_plane_ceph_backends.yml index 9d04193788..49324a05c2 100644 --- a/hooks/playbooks/control_plane_ceph_backends.yml +++ b/hooks/playbooks/control_plane_ceph_backends.yml @@ -25,4 +25,3 @@ ansible.builtin.template: dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/90-ceph-backends-kustomization.yaml" src: "config_ceph_backends.yaml.j2" - mode: "0644" diff --git a/hooks/playbooks/control_plane_hci_pre_deploy.yml b/hooks/playbooks/control_plane_hci_pre_deploy.yml index 7bf373686e..97d04349e3 100644 --- a/hooks/playbooks/control_plane_hci_pre_deploy.yml +++ b/hooks/playbooks/control_plane_hci_pre_deploy.yml @@ -32,4 +32,3 @@ - op: add path: /spec/swift/enabled value: {{ cifmw_services_swift_enabled | default('false') }} - mode: "0644" diff --git a/hooks/playbooks/control_plane_horizon.yml b/hooks/playbooks/control_plane_horizon.yml index 852298c741..010e1eace7 100644 --- a/hooks/playbooks/control_plane_horizon.yml +++ b/hooks/playbooks/control_plane_horizon.yml @@ -26,4 +26,3 @@ - op: add path: /spec/horizon/template/memcachedInstance value: memcached - mode: "0644" diff --git a/hooks/playbooks/control_plane_ironic.yml b/hooks/playbooks/control_plane_ironic.yml index b0faee9fb1..7f278107d2 100644 --- a/hooks/playbooks/control_plane_ironic.yml +++ b/hooks/playbooks/control_plane_ironic.yml @@ -24,4 +24,3 @@ - op: add path: /spec/ironic/enabled value: {{ cifmw_services_ironic_enabled | default('false') }} - mode: "0644" diff --git a/hooks/playbooks/federation-controlplane-config.yml b/hooks/playbooks/federation-controlplane-config.yml index 845d3958de..bd9b9b76f9 100644 --- a/hooks/playbooks/federation-controlplane-config.yml +++ b/hooks/playbooks/federation-controlplane-config.yml @@ -37,7 +37,6 @@ remote_id_attribute=HTTP_OIDC_ISS [auth] methods = password,token,oauth1,mapped,application_credential,openid - mode: "0644" - name: Get ingress operator CA cert ansible.builtin.slurp: diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index 283d7030fa..96c3c183f8 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -17,7 +17,6 @@ ansible.builtin.copy: dest: "/etc/yum.repos.d/" src: "{{ cifmw_basedir }}/artifacts/repositories/" - mode: "0644" - name: Build dataset hook hosts: localhost @@ -107,7 +106,6 @@ "values": [] } ] - mode: "0644" - name: Prepare EDPM deploy related facts and keys when: @@ -137,7 +135,6 @@ vars: dns_servers: "{{ ((['192.168.122.10'] + ansible_facts['dns']['nameservers']) | unique)[0:2] }}" ansible.builtin.copy: - mode: "0644" dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/dataplane/99-kustomization.yaml" content: |- apiVersion: kustomize.config.k8s.io/v1beta1 @@ -273,4 +270,3 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/{{ step }}_{{ hook_name }}.yml" content: "{{ file_content | to_nice_yaml }}" - mode: "0644" diff --git a/hooks/playbooks/ironic_enroll_nodes.yml b/hooks/playbooks/ironic_enroll_nodes.yml index e4edb57799..b27f333a6f 100644 --- a/hooks/playbooks/ironic_enroll_nodes.yml +++ b/hooks/playbooks/ironic_enroll_nodes.yml @@ -61,7 +61,6 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/parameters/ironic_nodes.yaml" content: "{{ ironic_nodes | to_yaml }}" - mode: "0644" - name: Enroll ironic nodes ansible.builtin.shell: | diff --git a/hooks/playbooks/kustomize_cr.yml b/hooks/playbooks/kustomize_cr.yml index 752b71d5ce..4ee5ad7eac 100644 --- a/hooks/playbooks/kustomize_cr.yml +++ b/hooks/playbooks/kustomize_cr.yml @@ -27,7 +27,6 @@ ansible.builtin.copy: src: "{{ cifmw_kustomize_cr_file_path }}/{{ cifmw_kustomize_cr_file_name }}" dest: "{{ cifmw_kustomize_cr_artifact_dir }}/{{ cifmw_kustomize_cr_file_name }}" - mode: "0644" remote_src: true - name: Generate kustomization file @@ -35,7 +34,6 @@ ansible.builtin.template: src: "{{ playbook_dir }}/{{ cifmw_kustomize_cr_template }}" dest: "{{ cifmw_kustomize_cr_artifact_dir }}/kustomization.yaml" - mode: "0644" - name: Run oc kustomize environment: @@ -49,4 +47,3 @@ ansible.builtin.copy: dest: "{{ cifmw_kustomize_cr_artifact_dir }}/kustomized_{{ cifmw_kustomize_cr_file_name }}" content: "{{ kustomized_cr.stdout }}" - mode: "0644" diff --git a/hooks/playbooks/kuttl_openstack_prep.yml b/hooks/playbooks/kuttl_openstack_prep.yml index 21a6797046..6d9ab067d5 100644 --- a/hooks/playbooks/kuttl_openstack_prep.yml +++ b/hooks/playbooks/kuttl_openstack_prep.yml @@ -42,4 +42,3 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/parameters/{{ step }}_{{ hook_name }}.yml" content: "{{ file_content | to_nice_yaml }}" - mode: "0644" diff --git a/hooks/playbooks/link2file.yml b/hooks/playbooks/link2file.yml index 0e613c47d5..97142dcbae 100644 --- a/hooks/playbooks/link2file.yml +++ b/hooks/playbooks/link2file.yml @@ -58,7 +58,6 @@ ansible.builtin.copy: src: "{{ item.stat.lnk_source }}" dest: "{{ _file_path }}" - mode: "0644" loop: "{{ _file_info.results }}" loop_control: label: "{{ item.item }}" diff --git a/playbooks/dcn.yml b/playbooks/dcn.yml index bf5a956f96..ff700f4a3d 100644 --- a/playbooks/dcn.yml +++ b/playbooks/dcn.yml @@ -106,6 +106,5 @@ ansible.builtin.copy: src: "{{ item.path }}" dest: "/home/zuul/ci-framework-data/artifacts/manifests/openstack/cr" - mode: "0644" loop: "{{ dcn_crs.files }}" when: dcn_crs.matched > 0 diff --git a/playbooks/nfs.yml b/playbooks/nfs.yml index 5d20b62b6a..9dd4ee4065 100644 --- a/playbooks/nfs.yml +++ b/playbooks/nfs.yml @@ -48,7 +48,6 @@ option: vers3 value: n backup: true - mode: "0644" - name: Disable NFSv3-related services ansible.builtin.systemd_service: @@ -90,7 +89,6 @@ 'cifmw_nfs_network_range': cifmw_nfs_network_out.stdout | from_json | json_query('cidr') } | to_nice_yaml }} - mode: "0644" # NOTE: This represents a workaround because there's an edpm-nftables role # in edpm-ansible already. That role should contain the implementation @@ -127,7 +125,6 @@ option: host value: "{{ cifmw_nfs_network_out.stdout | from_json | json_query('address') }}" backup: true - mode: "0644" - name: Enable and restart nfs-server service ansible.builtin.systemd: diff --git a/playbooks/unique-id.yml b/playbooks/unique-id.yml index 9b9709534c..c3f1c7d390 100644 --- a/playbooks/unique-id.yml +++ b/playbooks/unique-id.yml @@ -38,7 +38,6 @@ ansible.builtin.copy: dest: "{{ _unique_id_file }}" content: "{{ cifmw_run_id | default(_unique_id) | lower }}" - mode: "0644" # Since the user might pass their own run ID, we can just consume it. # If, for a subsequent run, the user doesn't pass the run ID, we will diff --git a/playbooks/update.yml b/playbooks/update.yml index 2a6895f76c..5a35158e04 100644 --- a/playbooks/update.yml +++ b/playbooks/update.yml @@ -24,7 +24,6 @@ remote_src: true src: "{{ cifmw_basedir }}/artifacts/repositories/" dest: "{{ cifmw_basedir }}/artifacts/before_update_repos/" - mode: "0644" - name: Run repo_setup ansible.builtin.include_role: @@ -49,7 +48,6 @@ ansible.builtin.copy: dest: "/etc/yum.repos.d/" src: "{{ cifmw_basedir }}/artifacts/repositories/" - mode: "0644" - name: Run Ceph update if part of the deployment hosts: "{{ (groups[cifmw_ceph_target | default('computes')] | default([]))[:1] }}" @@ -75,7 +73,6 @@ ansible.builtin.copy: content: "{{ cephconf['content'] | b64decode }}" dest: "/tmp/ceph.conf" - mode: "0644" - name: Extract the CephFSID from ceph.conf ansible.builtin.set_fact: diff --git a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml index 1ee13ba8b1..3bc1e7558d 100644 --- a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml @@ -52,7 +52,6 @@ ansible.builtin.copy: src: "{{ _roles_file }}" dest: "{{ _roles_file_dest }}" - mode: "0644" - name: Run overcloud deploy delegate_to: "osp-undercloud-0" diff --git a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml index 74b411aa08..aa0c1cdbd7 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml @@ -66,7 +66,6 @@ ansible.builtin.copy: src: "{{ _container_prapare_path }}" dest: "{{ ansible_user_dir }}/containers-prepare-parameters.yaml" - mode: "0644" when: cifmw_adoption_osp_deploy_scenario.container_prepare_params is defined # Adoption requires Ceph 7 (Reef) as a requirement. Instead of performing a Ceph @@ -240,5 +239,4 @@ option: "{{ item.option }}" value: "{{ item.value }}" state: "present" - mode: "0644" loop: "{{ _undercloud_conf.config }}" diff --git a/roles/artifacts/tasks/ansible_logs.yml b/roles/artifacts/tasks/ansible_logs.yml index 1355019bb8..169a550f6e 100644 --- a/roles/artifacts/tasks/ansible_logs.yml +++ b/roles/artifacts/tasks/ansible_logs.yml @@ -10,5 +10,4 @@ src: "{{ item.path }}" dest: "{{ cifmw_artifacts_basedir }}/logs/" remote_src: true - mode: "0644" loop: "{{ files_to_copy.files }}" diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index 7519b41063..a0ecb5cc50 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -33,7 +33,6 @@ ansible.builtin.file: path: "{{ cifmw_artifacts_basedir }}/{{ item }}" state: directory - mode: "0755" loop: - artifacts - logs diff --git a/roles/build_openstack_packages/tasks/create_repo.yml b/roles/build_openstack_packages/tasks/create_repo.yml index 0dc056c8f1..016fc3586d 100644 --- a/roles/build_openstack_packages/tasks/create_repo.yml +++ b/roles/build_openstack_packages/tasks/create_repo.yml @@ -39,7 +39,6 @@ remote_src: true src: "{{ _repodir.path }}/" dest: "{{ cifmw_bop_gating_repo_dest }}" - mode: "0644" - name: Add gating.repo file to install the required built packages ansible.builtin.copy: @@ -51,7 +50,6 @@ gpgcheck=0 priority=1 dest: "{{ cifmw_bop_gating_repo_dest }}/gating.repo" - mode: "0644" - name: Serve gating repo ansible.builtin.import_tasks: serve_gating_repo.yml diff --git a/roles/build_openstack_packages/tasks/downstream.yml b/roles/build_openstack_packages/tasks/downstream.yml index 751126be43..260a0b5ef0 100644 --- a/roles/build_openstack_packages/tasks/downstream.yml +++ b/roles/build_openstack_packages/tasks/downstream.yml @@ -26,14 +26,12 @@ remote_src: true src: "{{ ansible_user_dir }}/{{ cifmw_bop_initial_dlrn_config }}.cfg" dest: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}.cfg" - mode: "0644" - name: Copy patch_rebaser.ini to patch_rebaser repo ansible.builtin.copy: remote_src: true src: "{{ ansible_user_dir }}/patch_rebaser.ini" dest: "{{ cifmw_bop_build_repo_dir }}/patch_rebaser/patch_rebaser/patch_rebaser.ini" - mode: "0644" - name: Copy Downstream scripts to DLRN repo ansible.builtin.copy: diff --git a/roles/build_openstack_packages/tasks/install_dlrn.yml b/roles/build_openstack_packages/tasks/install_dlrn.yml index 11cd72ed26..1a08a1729a 100644 --- a/roles/build_openstack_packages/tasks/install_dlrn.yml +++ b/roles/build_openstack_packages/tasks/install_dlrn.yml @@ -126,7 +126,6 @@ ansible.builtin.template: src: projects.ini.j2 dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/projects.ini' - mode: "0644" - name: Copy the DLRN scripts in the virtualenv to the scripts dir ansible.posix.synchronize: @@ -160,7 +159,6 @@ remote_src: true src: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}.cfg" dest: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}-local.cfg" - mode: "0644" - name: Remove last """ from local mock config # noqa: command-instead-of-module ansible.builtin.command: diff --git a/roles/cert_manager/tasks/olm_manifest.yml b/roles/cert_manager/tasks/olm_manifest.yml index 48e8ad2645..90ba2331ca 100644 --- a/roles/cert_manager/tasks/olm_manifest.yml +++ b/roles/cert_manager/tasks/olm_manifest.yml @@ -3,7 +3,6 @@ ansible.builtin.copy: dest: "{{ cifmw_cert_manager_manifests_dir }}/cert-manager-{{ item.kind | lower }}-olm.yaml" content: "{{ item | to_nice_yaml }}" - mode: "0644" loop: - "{{ cifmw_cert_manager_olm_operator_group }}" - "{{ cifmw_cert_manager_olm_subscription }}" diff --git a/roles/ci_dcn_site/tasks/ceph.yml b/roles/ci_dcn_site/tasks/ceph.yml index b44d837a5c..c27815d741 100644 --- a/roles/ci_dcn_site/tasks/ceph.yml +++ b/roles/ci_dcn_site/tasks/ceph.yml @@ -36,7 +36,6 @@ create: true backup: true insertbefore: EOF - mode: "0644" - name: Ensure Ceph bootstrap host can ping itself register: _cmd_result diff --git a/roles/ci_dcn_site/tasks/scaledown_site.yml b/roles/ci_dcn_site/tasks/scaledown_site.yml index 23ba5da09b..407b9a188d 100644 --- a/roles/ci_dcn_site/tasks/scaledown_site.yml +++ b/roles/ci_dcn_site/tasks/scaledown_site.yml @@ -200,13 +200,11 @@ ansible.builtin.file: path: "/tmp/ceph_conf_files" state: directory - mode: "0750" - name: Save secret data to files ansible.builtin.copy: content: "{{ secret_info.resources[0].data[key] | b64decode | regex_replace('(?m)^\\s*\\n', '') }}" dest: "/tmp/ceph_conf_files/{{ key }}" - mode: "0640" loop: "{{ secret_info.resources[0].data.keys() }}" loop_control: loop_var: key diff --git a/roles/ci_local_storage/tasks/main.yml b/roles/ci_local_storage/tasks/main.yml index 169f581619..6daf1e0061 100644 --- a/roles/ci_local_storage/tasks/main.yml +++ b/roles/ci_local_storage/tasks/main.yml @@ -33,7 +33,6 @@ ansible.builtin.copy: dest: "{{ cifmw_cls_manifests_dir }}/storage-class.yaml" content: "{{ cifmw_cls_storage_manifest | to_nice_yaml }}" - mode: "0644" - name: Get k8s nodes ansible.builtin.import_tasks: fetch_names.yml diff --git a/roles/ci_lvms_storage/tasks/main.yml b/roles/ci_lvms_storage/tasks/main.yml index e3699aba4a..362a200d6f 100644 --- a/roles/ci_lvms_storage/tasks/main.yml +++ b/roles/ci_lvms_storage/tasks/main.yml @@ -26,7 +26,6 @@ ansible.builtin.file: path: "{{ cifmw_lvms_manifests_dir }}" state: directory - mode: "0755" - name: Put the manifest files in place ansible.builtin.template: diff --git a/roles/ci_multus/molecule/resources/clean.yml b/roles/ci_multus/molecule/resources/clean.yml index e88c90ee19..2f9abfbd4b 100644 --- a/roles/ci_multus/molecule/resources/clean.yml +++ b/roles/ci_multus/molecule/resources/clean.yml @@ -23,7 +23,6 @@ src: "{{ cifmw_ci_multus_manifests_dir }}" dest: "{{ cifmw_ci_multus_manifests_dir }}.backup" remote_src: true - mode: "0755" - name: Call cleanup ansible.builtin.import_role: diff --git a/roles/ci_multus/tasks/main.yml b/roles/ci_multus/tasks/main.yml index 84d8a8c572..5edcdfb30f 100644 --- a/roles/ci_multus/tasks/main.yml +++ b/roles/ci_multus/tasks/main.yml @@ -18,7 +18,6 @@ ansible.builtin.file: path: "{{ cifmw_ci_multus_manifests_dir }}" state: directory - mode: "0755" - name: Build list of networks from cifmw_networking_env_definition block: @@ -118,7 +117,6 @@ ansible.builtin.template: src: "nad.yml.j2" dest: "{{ cifmw_ci_multus_manifests_dir }}/ci_multus_nads.yml" - mode: "0644" - name: Create resources in OCP when: not cifmw_ci_multus_dryrun diff --git a/roles/ci_network/tasks/main.yml b/roles/ci_network/tasks/main.yml index 27d1be8494..179d4b8b47 100644 --- a/roles/ci_network/tasks/main.yml +++ b/roles/ci_network/tasks/main.yml @@ -42,7 +42,6 @@ section: "{{ nm_conf.section }}" option: "{{ nm_conf.option }}" value: "{{ nm_conf.value }}" - mode: "0644" loop: "{{ cifmw_network_nm_config }}" loop_control: loop_var: nm_conf diff --git a/roles/ci_nmstate/tasks/nmstate_k8s_install.yml b/roles/ci_nmstate/tasks/nmstate_k8s_install.yml index 8ce164cbb3..ed707c6763 100644 --- a/roles/ci_nmstate/tasks/nmstate_k8s_install.yml +++ b/roles/ci_nmstate/tasks/nmstate_k8s_install.yml @@ -3,7 +3,6 @@ ansible.builtin.file: path: "{{ cifmw_ci_nmstate_manifests_dir }}" state: directory - mode: "0755" - name: Create the nmstate namespace kubernetes.core.k8s: diff --git a/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml b/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml index 6cc009fad3..470811463b 100644 --- a/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml +++ b/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml @@ -25,7 +25,6 @@ ansible.builtin.file: path: "{{ cifmw_ci_nmstate_configs_dir }}" state: directory - mode: "0755" - name: "Save nmstate state for {{ cifmw_ci_nmstate_unmanaged_host }}" ansible.builtin.copy: diff --git a/roles/cifmw_cephadm/tasks/dashboard/validation.yml b/roles/cifmw_cephadm/tasks/dashboard/validation.yml index 1559ba30a9..b8e6569b89 100644 --- a/roles/cifmw_cephadm/tasks/dashboard/validation.yml +++ b/roles/cifmw_cephadm/tasks/dashboard/validation.yml @@ -25,7 +25,6 @@ ansible.builtin.get_url: url: "{{ cifmw_cephadm_urischeme_dashboard | default('http') }}://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" dest: "/tmp/dash_response" - mode: "0644" validate_certs: false register: dashboard_response failed_when: dashboard_response.failed == true @@ -38,7 +37,6 @@ ansible.builtin.get_url: url: "{{ cifmw_cephadm_urischeme_dashboard | default('http') }}://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" dest: "/tmp/dash_http_response" - mode: "0644" validate_certs: false username: admin password: admin diff --git a/roles/cifmw_external_dns/tasks/requirements.yml b/roles/cifmw_external_dns/tasks/requirements.yml index 21799f008a..af123b118d 100644 --- a/roles/cifmw_external_dns/tasks/requirements.yml +++ b/roles/cifmw_external_dns/tasks/requirements.yml @@ -56,7 +56,6 @@ ansible.builtin.file: path: "{{ cifmw_external_dns_manifests_dir }}" state: directory - mode: "0755" - name: Stat cifmw_external_dns_certificate on target hosts ansible.builtin.stat: diff --git a/roles/compliance/tasks/create_scap_report.yml b/roles/compliance/tasks/create_scap_report.yml index 74bf07f531..5cb8a1e9eb 100644 --- a/roles/compliance/tasks/create_scap_report.yml +++ b/roles/compliance/tasks/create_scap_report.yml @@ -31,7 +31,6 @@ ansible.builtin.copy: src: "{{ bzip_file.path }}" dest: "{{ base_name }}.xml.bz2" - mode: "0644" - name: Unzip the file ansible.builtin.command: "bunzip2 {{ base_name }}.xml.bz2" diff --git a/roles/copy_container/molecule/default/converge.yml b/roles/copy_container/molecule/default/converge.yml index a80508c0bd..c17b388b1d 100644 --- a/roles/copy_container/molecule/default/converge.yml +++ b/roles/copy_container/molecule/default/converge.yml @@ -43,7 +43,6 @@ ansible.builtin.copy: dest: "/tmp/copy-quay-config.yaml" content: "{{ _data }}" - mode: "0644" - name: Copy containers from RDO quay to local registry ansible.builtin.command: diff --git a/roles/copy_container/tasks/main.yml b/roles/copy_container/tasks/main.yml index fb95f13034..53947623e1 100644 --- a/roles/copy_container/tasks/main.yml +++ b/roles/copy_container/tasks/main.yml @@ -42,7 +42,6 @@ ansible.builtin.copy: src: copy-quay/ dest: "{{ temporary_copy_container_dir.path }}" - mode: "0755" - name: Build the copy-container register: go_build diff --git a/roles/devscripts/molecule/check_cluster_status/tasks/test.yml b/roles/devscripts/molecule/check_cluster_status/tasks/test.yml index b764da7f13..99866dbe2d 100644 --- a/roles/devscripts/molecule/check_cluster_status/tasks/test.yml +++ b/roles/devscripts/molecule/check_cluster_status/tasks/test.yml @@ -95,7 +95,6 @@ ansible.builtin.copy: dest: "/home/dev-scripts/.ocp_cert_not_after" content: "{{ _date }}" - mode: "0644" - name: Ensure freshly built config ansible.builtin.include_role: diff --git a/roles/devscripts/tasks/139_configs.yml b/roles/devscripts/tasks/139_configs.yml index e899e7673b..a6e7aeba67 100644 --- a/roles/devscripts/tasks/139_configs.yml +++ b/roles/devscripts/tasks/139_configs.yml @@ -38,4 +38,3 @@ src: templates/conf_ciuser.j2 dest: >- {{ cifmw_devscripts_repo_dir }}/config_{{ cifmw_devscripts_user }}.sh - mode: "0644" diff --git a/roles/devscripts/tasks/main.yml b/roles/devscripts/tasks/main.yml index 6a87bf0237..61407b0e68 100644 --- a/roles/devscripts/tasks/main.yml +++ b/roles/devscripts/tasks/main.yml @@ -64,7 +64,6 @@ dest: "{{ cifmw_devscripts_logs_dir }}/{{ item.path | basename }}" remote_src: true src: "{{ item.path }}" - mode: "0644" loop: "{{ _deploy_logs.files }}" loop_control: label: "{{ item.path }}" diff --git a/roles/dlrn_promote/tasks/get_hash_from_commit.yaml b/roles/dlrn_promote/tasks/get_hash_from_commit.yaml index 185fac9df1..86ab6b4582 100644 --- a/roles/dlrn_promote/tasks/get_hash_from_commit.yaml +++ b/roles/dlrn_promote/tasks/get_hash_from_commit.yaml @@ -3,7 +3,6 @@ ansible.builtin.get_url: url: "{{ commit_url }}/commit.yaml" dest: "{{ cifmw_dlrn_promote_workspace }}/commit.yaml" - mode: "0644" force: true register: result until: diff --git a/roles/edpm_build_images/tasks/main.yml b/roles/edpm_build_images/tasks/main.yml index 02309ad893..2c4f1d821b 100644 --- a/roles/edpm_build_images/tasks/main.yml +++ b/roles/edpm_build_images/tasks/main.yml @@ -31,7 +31,6 @@ url: "{{ cifmw_discovered_image_url }}" dest: "{{ cifmw_edpm_build_images_basedir }}" timeout: 20 - mode: "0644" register: result until: result is success retries: 60 diff --git a/roles/edpm_kustomize/tasks/kustomize.yml b/roles/edpm_kustomize/tasks/kustomize.yml index 5c5ecd2fd8..da1fda1060 100644 --- a/roles/edpm_kustomize/tasks/kustomize.yml +++ b/roles/edpm_kustomize/tasks/kustomize.yml @@ -33,7 +33,6 @@ } ) | to_nice_yaml }} - mode: "0644" - name: Apply the already existing kustomization if present environment: diff --git a/roles/edpm_kustomize/tasks/main.yml b/roles/edpm_kustomize/tasks/main.yml index 1065381ed5..0243bbe17d 100644 --- a/roles/edpm_kustomize/tasks/main.yml +++ b/roles/edpm_kustomize/tasks/main.yml @@ -55,7 +55,6 @@ remote_src: true src: "{{ cifmw_edpm_kustomize_cr_path | dirname }}/kustomization.yaml" dest: "{{ cifmw_edpm_kustomize_cr_path | dirname }}/kustomization.initial.yaml" - mode: "0644" - name: Prepare and load the ci-framework kustomize template file vars: diff --git a/roles/env_op_images/tasks/main.yml b/roles/env_op_images/tasks/main.yml index 39a6e55d55..7822587d95 100644 --- a/roles/env_op_images/tasks/main.yml +++ b/roles/env_op_images/tasks/main.yml @@ -139,4 +139,3 @@ ansible.builtin.copy: dest: "{{ cifmw_env_op_images_dir }}/artifacts/{{ cifmw_env_op_images_file }}" content: "{{ _content | to_nice_yaml }}" - mode: "0644" diff --git a/roles/federation/tasks/run_keycloak_setup.yml b/roles/federation/tasks/run_keycloak_setup.yml index 24ecdff734..769fd46526 100644 --- a/roles/federation/tasks/run_keycloak_setup.yml +++ b/roles/federation/tasks/run_keycloak_setup.yml @@ -25,7 +25,6 @@ ansible.builtin.copy: src: "{{ [ ansible_user_dir, '.crc', 'machines', 'src', 'kubeconfig' ] | path_join }}" dest: "{{ [ ansible_user_dir, '.kube', 'config' ] | path_join }}" - mode: "0640" when: cifmw_federation_deploy_type == "crc" - name: Create namespace @@ -39,7 +38,6 @@ ansible.builtin.template: src: rhsso-operator-olm.yaml.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'rhsso-operator-olm.yaml' ] | path_join }}" - mode: "0644" - name: Install federation rhsso operator environment: @@ -91,7 +89,6 @@ ansible.builtin.template: src: sso.yaml.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'sso.yaml' ] | path_join }}" - mode: "0644" - name: Install federation sso pod environment: @@ -133,4 +130,3 @@ ansible.builtin.copy: src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'tls.crt'] | path_join }}" dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" - mode: "0644" diff --git a/roles/federation/tasks/run_openstack_auth_test.yml b/roles/federation/tasks/run_openstack_auth_test.yml index f87b2d9a53..ffbbda6e1e 100644 --- a/roles/federation/tasks/run_openstack_auth_test.yml +++ b/roles/federation/tasks/run_openstack_auth_test.yml @@ -31,7 +31,6 @@ ansible.builtin.template: src: kctestuser1.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser1_username ] | path_join }}" - mode: "0644" - name: Copy federation test user1 cloudrc file into pod kubernetes.core.k8s_cp: @@ -44,7 +43,6 @@ ansible.builtin.copy: src: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" - mode: "0444" - name: Get ingress operator CA cert ansible.builtin.slurp: diff --git a/roles/federation/tasks/run_openstack_setup.yml b/roles/federation/tasks/run_openstack_setup.yml index a4abd325c4..593177a24d 100644 --- a/roles/federation/tasks/run_openstack_setup.yml +++ b/roles/federation/tasks/run_openstack_setup.yml @@ -18,7 +18,6 @@ ansible.builtin.copy: src: /home/zuul/.crc/machines/crc/kubeconfig dest: /home/zuul/.kube/config - mode: "0640" when: cifmw_federation_deploy_type == "crc" - name: Run federation create domain @@ -38,7 +37,6 @@ ansible.builtin.template: src: rules.json.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_rules_file ] | path_join }}" - mode: "0644" - name: Copy federation rules json file into pod kubernetes.core.k8s_cp: diff --git a/roles/hive/tasks/main.yml b/roles/hive/tasks/main.yml index 867b18908d..457a649d82 100644 --- a/roles/hive/tasks/main.yml +++ b/roles/hive/tasks/main.yml @@ -30,7 +30,6 @@ ansible.builtin.file: path: "{{ cifmw_hive_artifacts_dir }}" state: directory - mode: "0755" - name: "Performing {{ cifmw_hive_platform }} {{cifmw_hive_action }}" # noqa: name[template] ansible.builtin.include_tasks: "{{ cifmw_hive_platform }}_{{ cifmw_hive_action }}.yml" diff --git a/roles/install_ca/tasks/main.yml b/roles/install_ca/tasks/main.yml index 9c5c0cbab6..aac0b232a1 100644 --- a/roles/install_ca/tasks/main.yml +++ b/roles/install_ca/tasks/main.yml @@ -29,7 +29,6 @@ url: "{{ cifmw_install_ca_url }}" dest: "{{ cifmw_install_ca_trust_dir }}" validate_certs: "{{ cifmw_install_ca_url_validate_certs | default(omit) }}" - mode: "0644" - name: Install custom CA bundle from inline register: ca_inline diff --git a/roles/install_yamls/tasks/main.yml b/roles/install_yamls/tasks/main.yml index 0a70460e67..0f3ed9536b 100644 --- a/roles/install_yamls/tasks/main.yml +++ b/roles/install_yamls/tasks/main.yml @@ -120,7 +120,6 @@ {% for k,v in cifmw_install_yamls_environment.items() %} export {{ k }}={{ v }} {% endfor %} - mode: "0644" - name: Set install_yamls default values tags: @@ -167,7 +166,6 @@ 'cifmw_install_yamls_defaults': cifmw_install_yamls_defaults } | to_nice_yaml }} - mode: "0644" - name: Create empty cifmw_install_yamls_environment if needed tags: diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index 37d9c6405c..9d8e459e4a 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -51,7 +51,6 @@ 'values.yaml' ) | path_join }} - mode: "0644" - name: Generate the OLM kustomization file ansible.builtin.copy: diff --git a/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml b/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml index 04f360b08f..3c3300e205 100644 --- a/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml +++ b/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml @@ -151,7 +151,6 @@ remote_src: true src: "{{ cifmw_basedir }}/{{ item }}" dest: "{{ _dest }}/" - mode: "0755" loop: - artifacts - logs @@ -161,14 +160,11 @@ failed_when: false ansible.builtin.copy: remote_src: true - src: "{{ item.src }}" + src: "{{ item }}" dest: "{{ _dest }}/" - mode: "{{ item.mode }}" loop: - - { src: "/etc/cifmw-dnsmasq.conf", mode: "0644" } - - { src: "/etc/cifmw-dnsmasq.d", mode: "0755" } - loop_control: - label: "{{ item.src }}" + - /etc/cifmw-dnsmasq.conf + - /etc/cifmw-dnsmasq.d - name: Clean environment vars: diff --git a/roles/libvirt_manager/tasks/clean_layout.yml b/roles/libvirt_manager/tasks/clean_layout.yml index d7467c2dd4..e56816b35e 100644 --- a/roles/libvirt_manager/tasks/clean_layout.yml +++ b/roles/libvirt_manager/tasks/clean_layout.yml @@ -82,7 +82,6 @@ marker: "## {mark} {{ vm }} {{ inventory_hostname }}" state: absent create: true - mode: "0600" loop: "{{ cleanup_vms }}" # KEEP this for now to ensure smoother migration @@ -94,7 +93,6 @@ marker: "## {mark} {{ vm }}" state: absent create: true - mode: "0600" loop: "{{ cleanup_vms }}" - name: Get network list diff --git a/roles/libvirt_manager/tasks/deploy_layout.yml b/roles/libvirt_manager/tasks/deploy_layout.yml index 9705c4e116..fc590981e8 100644 --- a/roles/libvirt_manager/tasks/deploy_layout.yml +++ b/roles/libvirt_manager/tasks/deploy_layout.yml @@ -95,7 +95,6 @@ ansible.builtin.template: dest: "{{ cifmw_libvirt_manager_basedir }}/reproducer-inventory/{{ item }}-group.yml" src: inventory.yml.j2 - mode: "0644" loop: "{{ _cifmw_libvirt_manager_layout.vms.keys() }}" loop_control: label: "{{ item }}" @@ -104,7 +103,6 @@ ansible.builtin.template: dest: "{{ cifmw_libvirt_manager_basedir }}/reproducer-inventory/all-group.yml" src: "all-inventory.yml.j2" - mode: "0644" - name: Ensure storage pool is present. when: @@ -318,7 +316,6 @@ dest: >- {{ cifmw_libvirt_manager_basedir }}/artifacts/virtual-nodes.yml content: "{{ content | to_nice_yaml }}" - mode: "0644" - name: Ensure we get proper access to CRC when: diff --git a/roles/libvirt_manager/tasks/generate_networking_data.yml b/roles/libvirt_manager/tasks/generate_networking_data.yml index 5d614d8ad2..77fb0dc5ea 100644 --- a/roles/libvirt_manager/tasks/generate_networking_data.yml +++ b/roles/libvirt_manager/tasks/generate_networking_data.yml @@ -79,7 +79,6 @@ ansible.builtin.copy: dest: "{{ _nic_info }}" content: "{{ cifmw_libvirt_manager_mac_map | to_nice_yaml }}" - mode: "0644" # END MAC pre-generation management # # START generate all IPs using networking_mapper role/module diff --git a/roles/libvirt_manager/tasks/get_image.yml b/roles/libvirt_manager/tasks/get_image.yml index 9b1f13f58f..d8eb33b05d 100644 --- a/roles/libvirt_manager/tasks/get_image.yml +++ b/roles/libvirt_manager/tasks/get_image.yml @@ -25,7 +25,6 @@ ansible.builtin.get_url: url: "{{ image_data.image_url }}" dest: "{{ image_data.image_local_dir }}/{{ image_data.disk_file_name }}" - mode: "0644" checksum: >- {% if image_data.sha256_image_name -%} sha256:{{ image_data.sha256_image_name }} diff --git a/roles/mirror_registry/tasks/main.yml b/roles/mirror_registry/tasks/main.yml index 2adceaaed9..6f2ac78bde 100644 --- a/roles/mirror_registry/tasks/main.yml +++ b/roles/mirror_registry/tasks/main.yml @@ -28,7 +28,6 @@ owner: "{{ ansible_user_id }}" group: "{{ ansible_user_id }}" state: directory - mode: "0755" - name: Download mirror-registry tools ansible.builtin.unarchive: diff --git a/roles/nat64_appliance/molecule/default/converge.yml b/roles/nat64_appliance/molecule/default/converge.yml index c321d6d1c2..014a76bf83 100644 --- a/roles/nat64_appliance/molecule/default/converge.yml +++ b/roles/nat64_appliance/molecule/default/converge.yml @@ -50,7 +50,6 @@ url: "{{ cifmw_discovered_image_url }}" dest: "{{ cifmw_basedir }}" timeout: 20 - mode: "0644" register: result until: result is success retries: 60 @@ -424,31 +423,26 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/test_node_info.log" content: "{{ _test_node_debug_info.stdout }}" - mode: "0644" - name: Write nat64-appliance info to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_node_info.log" content: "{{ _nat64_appliance_debug_info.stdout }}" - mode: "0644" - name: Write nat64-appliance journal to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_journal.log" content: "{{ _nat64_appliance_journal.stdout }}" - mode: "0644" - name: Write nat64-appliance DNS64 debug to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_dns64_debug.log" content: "{{ _nat64_appliance_dns64_debug.stdout }}" - mode: "0644" - name: Write hypervisor info to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/hypervisor_info.log" content: "{{ _hypervisor_info.stdout }}" - mode: "0644" - name: Ping example.com (delegate to test-node) delegate_to: test-node diff --git a/roles/networking_mapper/tasks/_gather_facts.yml b/roles/networking_mapper/tasks/_gather_facts.yml index d16438b336..25564e6058 100644 --- a/roles/networking_mapper/tasks/_gather_facts.yml +++ b/roles/networking_mapper/tasks/_gather_facts.yml @@ -77,4 +77,3 @@ items2dict | to_nice_yaml }} - mode: "0644" diff --git a/roles/openshift_login/tasks/main.yml b/roles/openshift_login/tasks/main.yml index f2a9f9d1a8..1c2cf634ef 100644 --- a/roles/openshift_login/tasks/main.yml +++ b/roles/openshift_login/tasks/main.yml @@ -98,7 +98,7 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/openshift-login-params.yml" content: "{{ cifmw_openshift_login_params_content | from_yaml | to_nice_yaml }}" - mode: "0600" + - name: Update the install-yamls-params with KUBECONFIG when: cifmw_install_yamls_environment is defined block: @@ -120,4 +120,3 @@ }, recursive=true) | to_nice_yaml }} dest: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml" - mode: "0600" diff --git a/roles/pkg_build/tasks/main.yml b/roles/pkg_build/tasks/main.yml index 727c6e7f34..ba20c937fe 100644 --- a/roles/pkg_build/tasks/main.yml +++ b/roles/pkg_build/tasks/main.yml @@ -20,7 +20,6 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/{{ item }}" state: directory - mode: "0755" loop: - volumes/packages/gating_repo - artifacts @@ -36,7 +35,6 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/volumes/packages/{{ pkg.name }}" state: directory - mode: "0755" loop: "{{ cifmw_pkg_build_list }}" loop_control: loop_var: 'pkg' @@ -46,7 +44,6 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/logs/build_{{ pkg.name }}" state: directory - mode: "0755" loop: "{{ cifmw_pkg_build_list }}" loop_control: loop_var: 'pkg' diff --git a/roles/reproducer/tasks/generate_bm_info.yml b/roles/reproducer/tasks/generate_bm_info.yml index 585758b5e7..7405442fc1 100644 --- a/roles/reproducer/tasks/generate_bm_info.yml +++ b/roles/reproducer/tasks/generate_bm_info.yml @@ -151,4 +151,3 @@ ) %} {% endfor %} {{ {'nodes': _ironic_nodes } | to_nice_yaml(indent=2) }} - mode: "0644" diff --git a/roles/update/tasks/reboot_hypervisor_using_cr.yml b/roles/update/tasks/reboot_hypervisor_using_cr.yml index 3d753930a6..b091cdedd2 100644 --- a/roles/update/tasks/reboot_hypervisor_using_cr.yml +++ b/roles/update/tasks/reboot_hypervisor_using_cr.yml @@ -23,7 +23,6 @@ ansible.builtin.copy: dest: "{{ cifmw_update_artifacts_basedir }}/{{ cifmw_reboot_dep_name }}.yaml" content: "{{ _content | to_nice_yaml }}" - mode: "0644" vars: _content: apiVersion: dataplane.openstack.org/v1beta1 From 76533edced0a4f4a6f2aa5485d286e4d0b6cd834 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Wed, 26 Mar 2025 19:12:08 +0100 Subject: [PATCH 019/480] Update gh actions to sync branches --- .../sync_branches_reusable_workflow.yml | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml index 483c04f852..261b370d86 100644 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -1,5 +1,5 @@ --- -name: Sync a follower branch with Main +name: Sync a target branch with source branch on: workflow_call: inputs: @@ -11,23 +11,24 @@ on: type: string jobs: - sync: + sync-branches: runs-on: ubuntu-latest permissions: contents: write - pull-requests: write steps: - - name: Checkout, rebase and push to target branch + - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - ref: - ${{ inputs.target-branch }} - - run: | - # Details about the GH action bot comes from - # https://api.github.com/users/openshift-merge-robot - git config user.name "openshift-merge-robot" - git config user.email "30189218+openshift-merge-robot@users.noreply.github.com" - git fetch - git rebase origin/${{ inputs.source-branch }} + + - name: Git config + run: | + git config --global user.name "openstack-k8s-ci-robot" + git config --global user.email "70776706+openstack-k8s-ci-robot@users.noreply.github.com" + + - name: Rebase and Push + run: | + git fetch origin ${{ inputs.source-branch }} + git checkout ${{ inputs.target-branch }} + git rebase ${{ inputs.source-branch }} git push origin ${{ inputs.target-branch }} From 5f56142c595bac225828baa7bdde4ad35d23bd21 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Thu, 27 Mar 2025 11:32:38 +0100 Subject: [PATCH 020/480] Update the trigger job to use cifms's workflow --- .github/workflows/sync_branches_with_ext_trigger.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync_branches_with_ext_trigger.yml b/.github/workflows/sync_branches_with_ext_trigger.yml index ce4e4bf942..dd46c1287d 100644 --- a/.github/workflows/sync_branches_with_ext_trigger.yml +++ b/.github/workflows/sync_branches_with_ext_trigger.yml @@ -12,7 +12,7 @@ on: jobs: trigger-sync: - uses: openstack-k8s-operators/openstack-k8s-operators-ci/.github/workflows/release-branch-sync.yaml@main + uses: openstack-k8s-operators/ci-framework/.github/workflows/sync_branches_reusable_workflow.yml@main with: source_branch: ${{ inputs.source-branch }} target_branch: ananya-do-not-use-tmp # Hardcoded till testing finishes From 37a49ca27e2efc73bb0e1ded210f37e01a1ccca6 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Thu, 27 Mar 2025 12:08:04 +0100 Subject: [PATCH 021/480] Fix wrong var name in the reusable sync workflow --- .github/workflows/sync_branches_reusable_workflow.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml index 261b370d86..4d0970aca1 100644 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -3,10 +3,10 @@ name: Sync a target branch with source branch on: workflow_call: inputs: - main-branch: + source-branch: required: true type: string - follower-branch: + target-branch: required: true type: string From ee461ca271312b20554619424ef6fe91afdc80d5 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Thu, 27 Mar 2025 12:23:06 +0100 Subject: [PATCH 022/480] Fix wrong var name in the reusable sync workflow --- .github/workflows/sync_branches_with_ext_trigger.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sync_branches_with_ext_trigger.yml b/.github/workflows/sync_branches_with_ext_trigger.yml index dd46c1287d..f8f5c247f6 100644 --- a/.github/workflows/sync_branches_with_ext_trigger.yml +++ b/.github/workflows/sync_branches_with_ext_trigger.yml @@ -14,5 +14,5 @@ jobs: trigger-sync: uses: openstack-k8s-operators/ci-framework/.github/workflows/sync_branches_reusable_workflow.yml@main with: - source_branch: ${{ inputs.source-branch }} - target_branch: ananya-do-not-use-tmp # Hardcoded till testing finishes + source-branch: ${{ inputs.source-branch }} + target-branch: ananya-do-not-use-tmp # Hardcoded till testing finishes From 00a79e7dc11fb00529c90f2ebc3f357e21f64021 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Thu, 27 Mar 2025 16:11:32 +0100 Subject: [PATCH 023/480] Adds pat token in gh actions workflow --- .github/workflows/sync_branches_reusable_workflow.yml | 2 ++ .github/workflows/sync_branches_with_ext_trigger.yml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml index 4d0970aca1..a3e518bd51 100644 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -20,6 +20,8 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + token: ${{ secrets.PAT_OPENSTACK_K8S_OPERATORS_CI_CIFMW }} + persist-credentials: true - name: Git config run: | diff --git a/.github/workflows/sync_branches_with_ext_trigger.yml b/.github/workflows/sync_branches_with_ext_trigger.yml index f8f5c247f6..56aba5e8ba 100644 --- a/.github/workflows/sync_branches_with_ext_trigger.yml +++ b/.github/workflows/sync_branches_with_ext_trigger.yml @@ -16,3 +16,5 @@ jobs: with: source-branch: ${{ inputs.source-branch }} target-branch: ananya-do-not-use-tmp # Hardcoded till testing finishes + secrets: + token: ${{ secrets.PAT_OPENSTACK_K8S_OPERATORS_CI_CIFMW }} From 51da1206c02a285c4a9e7756535bb01e8bd19b52 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Thu, 27 Mar 2025 16:50:32 +0100 Subject: [PATCH 024/480] Update missing token declaration in gh actions workflow --- .github/workflows/sync_branches_reusable_workflow.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml index a3e518bd51..c9fe86bb1b 100644 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -2,6 +2,10 @@ name: Sync a target branch with source branch on: workflow_call: + secrets: + token: + description: 'PAT with write access' + required: true inputs: source-branch: required: true From ef5d89548d012f5ba467370b29f37f96a9883179 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Fri, 28 Mar 2025 10:27:27 +0100 Subject: [PATCH 025/480] Removes PAT as that should not be required in a workflow --- .github/workflows/sync_branches_periodically.yml | 13 ------------- .../workflows/sync_branches_reusable_workflow.yml | 8 +------- .../workflows/sync_branches_with_ext_trigger.yml | 2 -- 3 files changed, 1 insertion(+), 22 deletions(-) delete mode 100644 .github/workflows/sync_branches_periodically.yml diff --git a/.github/workflows/sync_branches_periodically.yml b/.github/workflows/sync_branches_periodically.yml deleted file mode 100644 index 5f06fff94e..0000000000 --- a/.github/workflows/sync_branches_periodically.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -name: Olive Branch sync - -on: - schedule: - - cron: '0 19 * * *' - -jobs: - trigger-sync: - uses: openstack-k8s-operators/openstack-k8s-operators-ci/.github/workflows/release-branch-sync.yaml@main - with: - source_branch: main - target_branch: ananya-do-not-use-tmp # Hardcoded till testing finishes diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml index c9fe86bb1b..35731aff3f 100644 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -2,10 +2,6 @@ name: Sync a target branch with source branch on: workflow_call: - secrets: - token: - description: 'PAT with write access' - required: true inputs: source-branch: required: true @@ -24,13 +20,11 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - token: ${{ secrets.PAT_OPENSTACK_K8S_OPERATORS_CI_CIFMW }} - persist-credentials: true - name: Git config run: | git config --global user.name "openstack-k8s-ci-robot" - git config --global user.email "70776706+openstack-k8s-ci-robot@users.noreply.github.com" + git config --global user.email "openstack-k8s@redhat.com" - name: Rebase and Push run: | diff --git a/.github/workflows/sync_branches_with_ext_trigger.yml b/.github/workflows/sync_branches_with_ext_trigger.yml index 56aba5e8ba..f8f5c247f6 100644 --- a/.github/workflows/sync_branches_with_ext_trigger.yml +++ b/.github/workflows/sync_branches_with_ext_trigger.yml @@ -16,5 +16,3 @@ jobs: with: source-branch: ${{ inputs.source-branch }} target-branch: ananya-do-not-use-tmp # Hardcoded till testing finishes - secrets: - token: ${{ secrets.PAT_OPENSTACK_K8S_OPERATORS_CI_CIFMW }} From 935d495f507bb469fec4de58669f389a23590053 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Tue, 25 Mar 2025 18:38:26 +0100 Subject: [PATCH 026/480] Set owner:group for directories on controller-0 Set the owner and group to `zuul` on the controller-0 nodes /home/zuul/ci-framework-data folder. Closes: OSPRH-15047 --- create-infra.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/create-infra.yml b/create-infra.yml index fc2e377597..6d7e8b4149 100644 --- a/create-infra.yml +++ b/create-infra.yml @@ -137,3 +137,12 @@ ansible.builtin.include_role: name: sushy_emulator tasks_from: verify.yml + + - name: Set permissions on /home/zuul/ci-framework-data on controller-0 + ansible.builtin.file: + path: "{{ cifmw_basedir | default('/home/zuul/ci-framework-data') }}" + state: directory + recurse: true + owner: zuul + group: zuul + mode: "0755" From 602e6ca601d227b3efb222514df47de0916483b3 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 1 Apr 2025 09:55:52 +0200 Subject: [PATCH 027/480] Parse in better way default IP address It was wrong that we assumed that the CIDR for the network would be '/24'. Signed-off-by: Daniel Pawlik --- hooks/playbooks/fetch_compute_facts.yml | 2 +- hooks/playbooks/kuttl_openstack_prep.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index 96c3c183f8..e65af3e2c2 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -69,7 +69,7 @@ {{ crc_ci_bootstrap_networks_out[_crc_hostname].default.ip4 | default(crc_ci_bootstrap_networks_out[_crc_hostname].default.ip) | - replace('/24', '') + split('/') | first }} NETWORK_MTU: "{{ crc_ci_bootstrap_networks_out[_crc_hostname].default.mtu }}" diff --git a/hooks/playbooks/kuttl_openstack_prep.yml b/hooks/playbooks/kuttl_openstack_prep.yml index 6d9ab067d5..5d8563f765 100644 --- a/hooks/playbooks/kuttl_openstack_prep.yml +++ b/hooks/playbooks/kuttl_openstack_prep.yml @@ -31,7 +31,7 @@ {{ crc_ci_bootstrap_networks_out[_crc_hostname].default.ip4 | default(crc_ci_bootstrap_networks_out[_crc_hostname].default.ip) | - replace('/24', '') + split('/') | first }} NETWORK_MTU: "{{ crc_ci_bootstrap_networks_out[_crc_hostname].default.mtu }}" From 5473fc04d618098f6744f4b231a8404b7e6187b5 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Thu, 16 Jan 2025 11:23:30 +0100 Subject: [PATCH 028/480] Adds kustomization that adds edpm_bootstrap_command to the compute nodes --- hooks/playbooks/fetch_compute_facts.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index e65af3e2c2..d05787e07c 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -257,6 +257,13 @@ path: /spec/nodeTemplate/ansible/ansibleVars/edpm_sshd_allowed_ranges value: ["0.0.0.0/0"] + {% if cifmw_hook_fetch_compute_facts_edpm_cmd is defined %} + - op: add + path: /spec/nodeTemplate/ansible/ansibleVars/edpm_bootstrap_command + value: |- + {{ cifmw_hook_fetch_compute_facts_edpm_cmd | indent( width=8) }} + {% endif %} + - name: Ensure we know about the private host keys ansible.builtin.shell: cmd: | From af2f86ab74c70d1f4b9860112be6ebe5904e7e26 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 27 Mar 2025 09:20:46 +0100 Subject: [PATCH 029/480] Change base molecule nodeset to use just IBM providers The other jobs require multi node deployment that mostly will work on Vexxhost. To unlock resources for other job, let's move molecule job to IBM. Signed-off-by: Daniel Pawlik --- zuul.d/molecule-base.yaml | 2 +- zuul.d/nodeset.yaml | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/zuul.d/molecule-base.yaml b/zuul.d/molecule-base.yaml index dc9141a6d9..22919e9cab 100644 --- a/zuul.d/molecule-base.yaml +++ b/zuul.d/molecule-base.yaml @@ -2,7 +2,7 @@ # one, and be listed in the "molecule.yaml" file. - job: name: cifmw-molecule-base - nodeset: centos-stream-9 + nodeset: centos-stream-9-ibm parent: base-ci-framework provides: - cifmw-molecule diff --git a/zuul.d/nodeset.yaml b/zuul.d/nodeset.yaml index 22a6ae37c8..52e20d003e 100644 --- a/zuul.d/nodeset.yaml +++ b/zuul.d/nodeset.yaml @@ -27,6 +27,18 @@ - name: peers nodes: [] +- nodeset: + name: centos-stream-9-ibm + nodes: + - name: controller + label: cloud-centos-9-stream-tripleo-ibm + groups: + - name: switch + nodes: + - controller + - name: peers + nodes: [] + - nodeset: name: 4x-centos-9-medium nodes: From 5c2a29cedb22f0536ae8bf8d13183be28fbd9921 Mon Sep 17 00:00:00 2001 From: Arx Cruz Date: Tue, 1 Apr 2025 11:02:39 +0200 Subject: [PATCH 030/480] Add retries in connectivity test Adding retries to gave computes more time to test ping instead of failing and waste 12 hours of deployment. --- roles/reproducer/tasks/configure_computes.yml | 27 ++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/roles/reproducer/tasks/configure_computes.yml b/roles/reproducer/tasks/configure_computes.yml index 119d468f9e..95c81347d2 100644 --- a/roles/reproducer/tasks/configure_computes.yml +++ b/roles/reproducer/tasks/configure_computes.yml @@ -2,15 +2,24 @@ - name: Configure networking on computes delegate_to: "{{ _host }}" block: - - name: Ensure we can ping controller-0 from ctlplane - when: - # do not check connectivity between computes/networkers and - # controller-0 in BGP environments via ctlplane until BGP is configured - - _host is not match('^r[0-9]-compute-.*') - - _host is not match('^r[0-9]-networker-.*') - ansible.builtin.command: - cmd: | - ping -c2 {{ cifmw_reproducer_validate_network_host }} + - name: Check connectivity + block: + - name: Ensure we can ping controller-0 from ctlplane + when: + # do not check connectivity between computes/networkers and + # controller-0 in BGP environments via ctlplane until BGP is configured + - _host is not match('^r[0-9]-compute-.*') + - _host is not match('^r[0-9]-networker-.*') + ansible.builtin.command: + cmd: | + ping -c2 {{ cifmw_reproducer_validate_network_host }} + retries: 30 + delay: 10 + register: ping_output + rescue: + - name: Show ping output for debug reasons + ansible.builtin.fail: + msg: "{{ ping_output }}" - name: Tweak dnf configuration become: true From 93df0e7f1a8ea87be3a02fa5db2c3737ca76d3e3 Mon Sep 17 00:00:00 2001 From: Jeremy Agee Date: Mon, 31 Mar 2025 15:58:36 -0400 Subject: [PATCH 031/480] Fix path error in federation role --- roles/federation/tasks/run_keycloak_setup.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/federation/tasks/run_keycloak_setup.yml b/roles/federation/tasks/run_keycloak_setup.yml index 769fd46526..b8023bbee0 100644 --- a/roles/federation/tasks/run_keycloak_setup.yml +++ b/roles/federation/tasks/run_keycloak_setup.yml @@ -23,7 +23,7 @@ - name: Link kubeconfg for comparability ansible.builtin.copy: - src: "{{ [ ansible_user_dir, '.crc', 'machines', 'src', 'kubeconfig' ] | path_join }}" + src: "{{ [ ansible_user_dir, '.crc', 'machines', 'crc', 'kubeconfig' ] | path_join }}" dest: "{{ [ ansible_user_dir, '.kube', 'config' ] | path_join }}" when: cifmw_federation_deploy_type == "crc" From 9061a5eec7ab082eaec1a4fd8efe82b555dea8f5 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Tue, 1 Apr 2025 18:42:27 +0530 Subject: [PATCH 032/480] Pull dlrn hash md5 while populating component repo Currently in component repo, we pull current-podified dlrn md5 hash. But sometimes promotion lags, Specific DFG wants to test with podified-ci-testing content in component pipeline. By passing cifmw_repo_setup_promotion instead of hardcoded current-podified hash allows us to test podified-ci-testing content with component repo. Signed-off-by: Chandan Kumar (raukadah) --- roles/repo_setup/tasks/artifacts.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/roles/repo_setup/tasks/artifacts.yml b/roles/repo_setup/tasks/artifacts.yml index 96ac16ed22..0af799b9d4 100644 --- a/roles/repo_setup/tasks/artifacts.yml +++ b/roles/repo_setup/tasks/artifacts.yml @@ -31,12 +31,15 @@ dest: "{{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5" mode: "0644" - - name: Dump current-podified hash when using with component repo + - name: Dump dlrn hash when using with component repo when: cifmw_repo_setup_component_name | length > 0 block: - - name: Dump current-podified hash + # Note(Chandan Kumar): It should be either podified-ci-testing and current-podified. + # or specific dlrn hash to test component content with current-podified and + # podified-ci-testing. + - name: Dump dlrn hash hash ansible.builtin.get_url: - url: "{{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/current-podified/delorean.repo.md5" + url: "{{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/{{ cifmw_repo_setup_promotion }}/delorean.repo.md5" dest: "{{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5" mode: "0644" From b76d803e79f26251364ff1e9de3fbf1d351a3ae4 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Tue, 1 Apr 2025 21:32:17 +0200 Subject: [PATCH 033/480] Adds crc-storage namespace to os mustgather --- roles/os_must_gather/defaults/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/os_must_gather/defaults/main.yml b/roles/os_must_gather/defaults/main.yml index 0506c4e39d..36ca557fd1 100644 --- a/roles/os_must_gather/defaults/main.yml +++ b/roles/os_must_gather/defaults/main.yml @@ -33,4 +33,5 @@ cifmw_os_must_gather_namespaces: - openshift-nmstate - openshift-marketplace - metallb-system + - crc-storage cifmw_os_must_gather_host_network: false From b01749bba25a0069065ed7ffdc44ae9100a4d716 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 1 Apr 2025 16:28:58 +0200 Subject: [PATCH 034/480] Rename nodesets to include crc-cloud in the name Since OCP 4.18, CI jobs would be using CRC cloud, not CRC extracted, that might be confusing for people outside the project. Signed-off-by: Daniel Pawlik --- zuul.d/nodeset.yaml | 153 ++++++++++++++++++++++---------------------- 1 file changed, 76 insertions(+), 77 deletions(-) diff --git a/zuul.d/nodeset.yaml b/zuul.d/nodeset.yaml index 52e20d003e..da991a5fe4 100644 --- a/zuul.d/nodeset.yaml +++ b/zuul.d/nodeset.yaml @@ -339,33 +339,96 @@ - name: controller label: centos-9-stream-crc-2-39-0-xl - # -# CRC-2.48 (OCP4.18) nodesets +# CRC CLOUD (OCP 4.18) (CRC 2.48.0) nodesets # - nodeset: - name: centos-9-medium-crc-extracted-2-48-0-3xl + name: centos-9-crc-2-48-0-xxl + nodes: + - name: controller + label: centos-9-stream-crc-2-48-0-xxl + +- nodeset: + name: centos-9-medium-2x-centos-9-crc-cloud-ocp-4-18-1-xxl nodes: - name: controller label: cloud-centos-9-stream-tripleo-medium + # Note(Chandan Kumar): Switch to xxl nodeset once RHOSZUUL-1940 resolves + - name: compute-0 + label: cloud-centos-9-stream-tripleo + - name: compute-1 + label: cloud-centos-9-stream-tripleo - name: crc - label: crc-cloud-ocp-4-18-1-3xl + label: crc-cloud-ocp-4-18-1-xxl groups: - name: computes - nodes: [] + nodes: + - compute-0 + - compute-1 - name: ocps nodes: - crc - nodeset: - name: centos-9-crc-2-48-0-xxl + name: centos-9-2x-centos-9-xxl-crc-cloud-ocp-4-18-1-xxl nodes: - name: controller - label: centos-9-stream-crc-2-48-0-xxl + label: cloud-centos-9-stream-tripleo + - name: compute-0 + label: cloud-centos-9-stream-tripleo-xxl + - name: compute-1 + label: cloud-centos-9-stream-tripleo-xxl + - name: crc + label: crc-cloud-ocp-4-18-1-xxl + groups: + - name: computes + nodes: + - compute-0 + - compute-1 + - name: ocps + nodes: + - crc - nodeset: - name: centos-9-rhel-9-2-crc-extracted-2-48-0-3xl + name: centos-9-medium-3x-centos-9-crc-cloud-ocp-4-18-1-xxl + nodes: + - name: controller + label: cloud-centos-9-stream-tripleo-medium + - name: compute-0 + label: cloud-centos-9-stream-tripleo + - name: compute-1 + label: cloud-centos-9-stream-tripleo + - name: compute-2 + label: cloud-centos-9-stream-tripleo + - name: crc + label: crc-cloud-ocp-4-18-1-xxl + groups: + - name: computes + nodes: + - compute-0 + - compute-1 + - compute-2 + - name: ocps + nodes: + - crc + +- nodeset: + name: centos-9-medium-crc-cloud-ocp-4-18-1-3xl + nodes: + - name: controller + label: cloud-centos-9-stream-tripleo-medium + - name: crc + label: crc-cloud-ocp-4-18-1-3xl + groups: + - name: computes + nodes: [] + - name: ocps + nodes: + - crc + +- nodeset: + name: centos-9-rhel-9-2-crc-cloud-ocp-4-18-1-3xl nodes: - name: controller label: cloud-centos-9-stream-tripleo @@ -384,7 +447,7 @@ - standalone - nodeset: - name: centos-9-multinode-rhel-9-2-crc-extracted-2-48-0-3xl + name: centos-9-multinode-rhel-9-2-crc-cloud-ocp-4-18-1-3xl nodes: - name: controller label: cloud-centos-9-stream-tripleo @@ -431,7 +494,7 @@ - overcloud-novacompute-2 - nodeset: - name: centos-9-multinode-rhel-9-2-crc-extracted-2-48-0-3xl-novacells + name: centos-9-multinode-rhel-9-2-crc-cloud-ocp-4-18-1-3xl-novacells nodes: - name: controller label: cloud-centos-9-stream-tripleo @@ -471,7 +534,7 @@ - cell2-controller-compute-0 - nodeset: - name: centos-9-medium-centos-9-crc-extracted-2-48-0-3xl + name: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl nodes: - name: controller label: cloud-centos-9-stream-tripleo-medium @@ -494,71 +557,7 @@ label: centos-9-stream-crc-2-48-0-3xl - nodeset: - name: centos-9-medium-2x-centos-9-crc-extracted-2-48-0-xxl - nodes: - - name: controller - label: cloud-centos-9-stream-tripleo-medium - # Note(Chandan Kumar): Switch to xxl nodeset once RHOSZUUL-1940 resolves - - name: compute-0 - label: cloud-centos-9-stream-tripleo - - name: compute-1 - label: cloud-centos-9-stream-tripleo - - name: crc - label: crc-cloud-ocp-4-18-1-xxl - groups: - - name: computes - nodes: - - compute-0 - - compute-1 - - name: ocps - nodes: - - crc - -- nodeset: - name: centos-9-2x-centos-9-xxl-crc-extracted-2-48-0-xxl - nodes: - - name: controller - label: cloud-centos-9-stream-tripleo - - name: compute-0 - label: cloud-centos-9-stream-tripleo-xxl - - name: compute-1 - label: cloud-centos-9-stream-tripleo-xxl - - name: crc - label: crc-cloud-ocp-4-18-1-xxl - groups: - - name: computes - nodes: - - compute-0 - - compute-1 - - name: ocps - nodes: - - crc - -- nodeset: - name: centos-9-medium-3x-centos-9-crc-extracted-2-48-0-xxl - nodes: - - name: controller - label: cloud-centos-9-stream-tripleo-medium - - name: compute-0 - label: cloud-centos-9-stream-tripleo - - name: compute-1 - label: cloud-centos-9-stream-tripleo - - name: compute-2 - label: cloud-centos-9-stream-tripleo - - name: crc - label: crc-cloud-ocp-4-18-1-xxl - groups: - - name: computes - nodes: - - compute-0 - - compute-1 - - compute-2 - - name: ocps - nodes: - - crc - -- nodeset: - name: centos-9-medium-3x-centos-9-crc-extracted-2-48-0-3xl + name: centos-9-medium-3x-centos-9-crc-cloud-ocp-4-18-1-3xl nodes: - name: controller label: cloud-centos-9-stream-tripleo-medium @@ -583,7 +582,7 @@ # todo: Remove. Temporal. Needed as the credentials used in ci-bootstrap jobs for IBM don't work - nodeset: - name: centos-9-medium-centos-9-crc-extracted-2-48-0-3xl-vexxhost + name: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl-vexxhost nodes: - name: controller label: cloud-centos-9-stream-tripleo-vexxhost-medium From 00e3016e882a08c40ac42dd3b5ec732d41b5be5a Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 27 Mar 2025 08:39:01 +0100 Subject: [PATCH 035/480] Change nodeset name and label related to force spawning on IBM The nodeset name and label that was used got wrong version set. Signed-off-by: Daniel Pawlik --- zuul.d/nodeset.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zuul.d/nodeset.yaml b/zuul.d/nodeset.yaml index da991a5fe4..f69d6da14e 100644 --- a/zuul.d/nodeset.yaml +++ b/zuul.d/nodeset.yaml @@ -630,7 +630,7 @@ label: centos-9-stream-crc-2-48-0-3xl-ibm - nodeset: - name: centos-9-crc-2-39-0-6xlarge-ibm + name: centos-9-crc-2-48-0-6xlarge-ibm nodes: - name: controller - label: centos-9-stream-crc-2-39-0-6xlarge-ibm + label: centos-9-stream-crc-2-48-0-6xlarge-ibm From 4471fe31c1644d9957c60fe1a3b53879626d3375 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Wed, 26 Mar 2025 12:11:44 +0530 Subject: [PATCH 036/480] Set NNCP_DNS_SERVER in kuttl deploy-deps.yaml playbook Currently watcher-operator-kuttl tests are failing with nncp_dns error[1]. https://github.com/openstack-k8s-operators/install_yamls/pull/1024/ recently added DNS only NNCP in install_yamls. In order to make it work, we need to set NNCP_DNS_SERVER var in the kuttl tests to make it work. Links: [1]. https://github.com/openstack-k8s-operators/watcher-operator/pull/119#issuecomment-2753365667 Signed-off-by: Chandan Kumar (raukadah) --- ci/playbooks/kuttl/deploy-deps.yaml | 32 +++++++++++++++++++---------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/ci/playbooks/kuttl/deploy-deps.yaml b/ci/playbooks/kuttl/deploy-deps.yaml index 286b998891..dcf097de5a 100644 --- a/ci/playbooks/kuttl/deploy-deps.yaml +++ b/ci/playbooks/kuttl/deploy-deps.yaml @@ -32,18 +32,28 @@ loop_control: label: "{{ item }}" - - name: Ensure that the isolated net was configured for crc - ansible.builtin.assert: - that: - - crc_ci_bootstrap_networks_out is defined - - "'crc' in crc_ci_bootstrap_networks_out" - - "'default' in crc_ci_bootstrap_networks_out['crc']" + - name: set facts for further usage within the framework + vars: + _crc_hostname: "{{ cifmw_crc_hostname | default('crc') }}" + block: + - name: Ensure that the isolated net was configured for crc + ansible.builtin.assert: + that: + - crc_ci_bootstrap_networks_out is defined + - crc_ci_bootstrap_networks_out[_crc_hostname] is defined + - crc_ci_bootstrap_networks_out[_crc_hostname]['default'] is defined - - name: Set facts for further usage within the framework - ansible.builtin.set_fact: - cifmw_edpm_prepare_extra_vars: - NNCP_INTERFACE: "{{ crc_ci_bootstrap_networks_out.crc.default.iface }}" - NETWORK_MTU: "{{ crc_ci_bootstrap_networks_out.crc.default.mtu }}" + - name: Set facts for further usage within the framework + ansible.builtin.set_fact: + cifmw_edpm_prepare_extra_vars: + NNCP_INTERFACE: "{{ crc_ci_bootstrap_networks_out.crc.default.iface }}" + NETWORK_MTU: "{{ crc_ci_bootstrap_networks_out.crc.default.mtu }}" + NNCP_DNS_SERVER: >- + {{ + crc_ci_bootstrap_networks_out[_crc_hostname].default.ip4 | + default(crc_ci_bootstrap_networks_out[_crc_hostname].default.ip) | + split('/') | first + }} - hosts: "{{ cifmw_target_host | default('localhost') }}" name: Deploy Openstack Operators From d91b8af3f72ab041d7f52718c4c1d5c2f6fffadd Mon Sep 17 00:00:00 2001 From: bshewale Date: Fri, 21 Mar 2025 17:25:34 +0530 Subject: [PATCH 037/480] Switch edpm-multinode github-check jobs to OCP - 4.18 This PR switched the edpm multinode github-check jobs to OCP- 4.18 Jobs: base, edpm_multinode, kuttl_multinode, podified_multinode and tempest_multinode. Depends-On: https://github.com/openstack-k8s-operators/ci-framework/pull/2859 --- zuul.d/adoption.yaml | 14 +++++++++----- zuul.d/base.yaml | 9 +++++---- zuul.d/edpm_multinode.yaml | 8 ++++---- zuul.d/kuttl_multinode.yaml | 2 +- zuul.d/podified_multinode.yaml | 2 +- zuul.d/tempest_multinode.yaml | 2 +- 6 files changed, 21 insertions(+), 16 deletions(-) diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index c435b374b9..607b7008c7 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -3,11 +3,11 @@ # nodeset and an ansible-controller. - job: name: cifmw-adoption-base - parent: base-extracted-crc + parent: base-crc-cloud abstract: true timeout: 14400 attempts: 1 - nodeset: centos-9-rhel-9-2-crc-extracted-2-39-0-3xl + nodeset: centos-9-rhel-9-2-crc-cloud-ocp-4-18-1-3xl roles: - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: @@ -19,6 +19,7 @@ - ci/playbooks/collect-logs.yml - ci/playbooks/multinode-autohold.yml vars: &adoption_vars + enable_ramdisk: true osp_17_repos: - rhel-9-for-x86_64-baseos-eus-rpms - rhel-9-for-x86_64-appstream-eus-rpms @@ -198,7 +199,7 @@ - job: name: cifmw-adoption-base-source-multinode - parent: base-extracted-crc + parent: base-crc-cloud abstract: true timeout: 14400 attempts: 1 @@ -214,6 +215,7 @@ - ci/playbooks/collect-logs.yml - ci/playbooks/multinode-autohold.yml vars: + enable_ramdisk: true <<: *adoption_vars crc_ci_bootstrap_networking: networks: &multinode_networks @@ -406,7 +408,7 @@ - job: name: cifmw-adoption-base-source-multinode-novacells - parent: base-extracted-crc + parent: base-crc-cloud abstract: true voting: false timeout: 14400 @@ -417,6 +419,7 @@ pre-run: *multinode-prerun post-run: *multinode-postrun vars: + enable_ramdisk: true <<: *adoption_vars crc_ci_bootstrap_networking: networks: *multinode_networks @@ -491,13 +494,14 @@ - job: name: cifmw-adoption-base-multinode-networker - parent: base-extracted-crc + parent: base-crc-cloud abstract: true attempts: 1 roles: *multinode-roles pre-run: *multinode-prerun post-run: *multinode-postrun vars: + enable_ramdisk: true <<: *adoption_vars crc_ci_bootstrap_networking: networks: *multinode_networks diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 2b0d1e6081..41e2f169a9 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -121,10 +121,10 @@ # crc_ci_bootstrap_networking using *extra-vars*. - job: name: cifmw-podified-multinode-edpm-base-crc - parent: base-extracted-crc + parent: base-crc-cloud timeout: 10800 attempts: 1 - nodeset: centos-9-medium-centos-9-crc-extracted-2-39-0-3xl + nodeset: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl irrelevant-files: *ir_files required-projects: &multinode_edpm_rp - openstack-k8s-operators/ci-framework @@ -146,6 +146,7 @@ - ci/playbooks/collect-logs.yml - ci/playbooks/multinode-autohold.yml vars: &multinode_edpm_vars + enable_ramdisk: true zuul_log_collection: true registry_login_enabled: true push_registry: quay.rdoproject.org @@ -210,7 +211,7 @@ parent: base-extracted-crc-ci-bootstrap timeout: 10800 attempts: 1 - nodeset: centos-9-medium-centos-9-crc-extracted-2-39-0-3xl + nodeset: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl irrelevant-files: *ir_files required-projects: *multinode_edpm_rp roles: *multinode_edpm_roles @@ -231,7 +232,7 @@ parent: base-extracted-crc-ci-bootstrap-staging timeout: 10800 attempts: 1 - nodeset: centos-9-medium-centos-9-crc-extracted-2-39-0-3xl-vexxhost + nodeset: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl-vexxhost irrelevant-files: *ir_files required-projects: *multinode_edpm_rp roles: *multinode_edpm_roles diff --git a/zuul.d/edpm_multinode.yaml b/zuul.d/edpm_multinode.yaml index 5eadecd4b2..055bde916e 100644 --- a/zuul.d/edpm_multinode.yaml +++ b/zuul.d/edpm_multinode.yaml @@ -2,7 +2,7 @@ - job: name: podified-multinode-edpm-deployment-crc-2comp parent: podified-multinode-edpm-deployment-crc - nodeset: centos-9-medium-2x-centos-9-crc-extracted-2-39-0-xxl + nodeset: centos-9-medium-2x-centos-9-crc-cloud-ocp-4-18-1-xxl description: | A multinode EDPM Zuul job which has one controller, one extracted crc and two compute nodes. It is used in whitebox neutron tempest plugin testing. @@ -68,7 +68,7 @@ - job: name: podified-multinode-edpm-deployment-crc-3comp parent: podified-multinode-edpm-deployment-crc - nodeset: centos-9-medium-3x-centos-9-crc-extracted-2-39-0-xxl + nodeset: centos-9-medium-3x-centos-9-crc-cloud-ocp-4-18-1-xxl vars: crc_ci_bootstrap_cloud_name: "{{ nodepool.cloud | replace('-nodepool-tripleo','') }}" crc_ci_bootstrap_networking: @@ -144,7 +144,7 @@ - job: name: podified-multinode-hci-deployment-crc-3comp parent: podified-multinode-edpm-deployment-crc - nodeset: centos-9-medium-3x-centos-9-crc-extracted-2-39-0-xxl + nodeset: centos-9-medium-3x-centos-9-crc-cloud-ocp-4-18-1-xxl vars: cifmw_edpm_deploy_hci: true crc_ci_bootstrap_cloud_name: "{{ nodepool.cloud | replace('-nodepool-tripleo','') }}" @@ -235,7 +235,7 @@ - job: name: podified-multinode-hci-deployment-crc-1comp parent: podified-multinode-edpm-deployment-crc - nodeset: centos-9-medium-centos-9-crc-extracted-2-39-0-3xl + nodeset: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl vars: cifmw_edpm_deploy_hci: true cifmw_cephadm_single_host_defaults: true diff --git a/zuul.d/kuttl_multinode.yaml b/zuul.d/kuttl_multinode.yaml index bb9a8f1386..81c244d9c7 100644 --- a/zuul.d/kuttl_multinode.yaml +++ b/zuul.d/kuttl_multinode.yaml @@ -4,7 +4,7 @@ parent: cifmw-podified-multinode-edpm-base-crc timeout: 7200 abstract: true - nodeset: centos-9-medium-crc-extracted-2-39-0-3xl + nodeset: centos-9-medium-crc-cloud-ocp-4-18-1-3xl vars: zuul_log_collection: true extra-vars: diff --git a/zuul.d/podified_multinode.yaml b/zuul.d/podified_multinode.yaml index 45d4470419..22458538a5 100644 --- a/zuul.d/podified_multinode.yaml +++ b/zuul.d/podified_multinode.yaml @@ -12,7 +12,7 @@ parent: cifmw-podified-multinode-edpm-base-crc timeout: 5400 abstract: true - nodeset: centos-9-medium-crc-extracted-2-39-0-3xl + nodeset: centos-9-medium-crc-cloud-ocp-4-18-1-3xl run: - ci/playbooks/edpm/run.yml extra-vars: diff --git a/zuul.d/tempest_multinode.yaml b/zuul.d/tempest_multinode.yaml index a23237657c..0d76851d4a 100644 --- a/zuul.d/tempest_multinode.yaml +++ b/zuul.d/tempest_multinode.yaml @@ -4,7 +4,7 @@ parent: cifmw-podified-multinode-edpm-base-crc timeout: 5400 abstract: true - nodeset: centos-9-medium-crc-extracted-2-39-0-3xl + nodeset: centos-9-medium-crc-cloud-ocp-4-18-1-3xl description: | Base multinode job definition for running test-operator. vars: From e12cc67ee5b075db1cc64b034f6109b0b35986a7 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 3 Apr 2025 09:29:39 +0200 Subject: [PATCH 038/480] Allow setting BM_INSTANCE_MEMORY var Without this patch, the CI job will fail with error: fatal: [localhost]: FAILED! => changed=false assertion: _cifmw_install_yamls_unmatched_vars | length == 0 evaluated_to: false msg: 'cifmw_install_yamls_vars contains a variable that is not defined in install_yamls Makefile nor cifmw_install_yamls_whitelisted_vars: BM_INSTANCE_MEMORY' Signed-off-by: Daniel Pawlik --- roles/install_yamls/defaults/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/install_yamls/defaults/main.yml b/roles/install_yamls/defaults/main.yml index 5515bad9f4..5a2b7e7b66 100644 --- a/roles/install_yamls/defaults/main.yml +++ b/roles/install_yamls/defaults/main.yml @@ -35,6 +35,7 @@ cifmw_install_yamls_whitelisted_vars: - OUTPUT_BASEDIR - OUTPUT_DIR - SSH_KEY_FILE + - BM_INSTANCE_MEMORY # Defines in install_yamls when we should clone and checkout based on # openstack-operator references. cifmw_install_yamls_checkout_openstack_ref: "true" From 7770a77d732a22f611a1fe8fadf73981b83f3985 Mon Sep 17 00:00:00 2001 From: Bhagyashri Shewale Date: Mon, 7 Apr 2025 12:42:23 +0530 Subject: [PATCH 039/480] Revert "Pull dlrn hash md5 while populating component repo" This reverts commit 9061a5eec7ab082eaec1a4fd8efe82b555dea8f5. --- roles/repo_setup/tasks/artifacts.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/roles/repo_setup/tasks/artifacts.yml b/roles/repo_setup/tasks/artifacts.yml index 0af799b9d4..96ac16ed22 100644 --- a/roles/repo_setup/tasks/artifacts.yml +++ b/roles/repo_setup/tasks/artifacts.yml @@ -31,15 +31,12 @@ dest: "{{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5" mode: "0644" - - name: Dump dlrn hash when using with component repo + - name: Dump current-podified hash when using with component repo when: cifmw_repo_setup_component_name | length > 0 block: - # Note(Chandan Kumar): It should be either podified-ci-testing and current-podified. - # or specific dlrn hash to test component content with current-podified and - # podified-ci-testing. - - name: Dump dlrn hash hash + - name: Dump current-podified hash ansible.builtin.get_url: - url: "{{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/{{ cifmw_repo_setup_promotion }}/delorean.repo.md5" + url: "{{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/current-podified/delorean.repo.md5" dest: "{{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5" mode: "0644" From d86657a30e7c388acbe0cfcfc5b025f9fd82c786 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 4 Apr 2025 14:07:49 +0530 Subject: [PATCH 040/480] [kuttl multinode] Disable router with gateways With limited public IPs on some clouds we hit quota issues from time to time. So disabling router with gateway ports creation in kuttl multinode jobs. Same will be done on other jobs in follow up. Related-Issue: #OSPCIX-771 --- zuul.d/kuttl_multinode.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/zuul.d/kuttl_multinode.yaml b/zuul.d/kuttl_multinode.yaml index 81c244d9c7..fa5fb7f700 100644 --- a/zuul.d/kuttl_multinode.yaml +++ b/zuul.d/kuttl_multinode.yaml @@ -15,6 +15,7 @@ range: 192.168.122.0/24 mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router: false internal-api: vlan: 20 range: 172.17.0.0/24 From 5e540771e6c43fd854ddf8aba935b816afbddd71 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 7 Apr 2025 09:30:59 +0200 Subject: [PATCH 041/480] Switch adoption base job to use OCP 4.18 For crc-cloud base job, it should use OCP 4.18 image, not CRC extracted 2.39. Signed-off-by: Daniel Pawlik --- zuul.d/adoption.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index 607b7008c7..773c52be6c 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -203,7 +203,7 @@ abstract: true timeout: 14400 attempts: 1 - nodeset: centos-9-multinode-rhel-9-2-crc-extracted-2-39-0-3xl + nodeset: centos-9-multinode-rhel-9-2-crc-cloud-ocp-4-18-1-3xl roles: &multinode-roles - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: &multinode-prerun @@ -413,7 +413,7 @@ voting: false timeout: 14400 attempts: 1 - nodeset: centos-9-multinode-rhel-9-2-crc-extracted-2-39-0-3xl-novacells + nodeset: centos-9-multinode-rhel-9-2-crc-cloud-ocp-4-18-1-3xl-novacells roles: - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: *multinode-prerun From dba35bd8fff7d33ea735f131b0e5b6efee065598 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 7 Apr 2025 12:21:21 +0200 Subject: [PATCH 042/480] Switch to OCP 4.18 for periodic-whitebox-neutron-tempest-plugin CI job The job is using parent that is related to crc-cloud, where nodeset is set to use crc-extracted and that's wrong. Let's switch to CRC cloud (OCP 4.18) also here. Signed-off-by: Daniel Pawlik --- zuul.d/whitebox_neutron_tempest_jobs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zuul.d/whitebox_neutron_tempest_jobs.yaml b/zuul.d/whitebox_neutron_tempest_jobs.yaml index c800e6fad2..f321bbcef1 100644 --- a/zuul.d/whitebox_neutron_tempest_jobs.yaml +++ b/zuul.d/whitebox_neutron_tempest_jobs.yaml @@ -5,7 +5,7 @@ - job: name: whitebox-neutron-tempest-plugin-podified-multinode-edpm-deployment-crc-2comp parent: podified-multinode-edpm-deployment-crc-2comp - nodeset: centos-9-2x-centos-9-xxl-crc-extracted-2-39-0-xxl + nodeset: centos-9-2x-centos-9-xxl-crc-cloud-ocp-4-18-1-xxl timeout: 12600 override-checkout: main description: | From 0798e7642fd9240cc8e8be9c1e79faaf9d1c119a Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 7 Apr 2025 13:10:49 +0530 Subject: [PATCH 043/480] Set timezone to UTC in cloud-init user-data --- roles/libvirt_manager/tasks/create_cloud_init_iso.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml index 2d9d3dcedc..9f300d80d5 100644 --- a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml +++ b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml @@ -39,6 +39,7 @@ ignore_growroot_disabled: true mode: growpart resize_rootfs: noblock + timezone: UTC - name: "Define network config" when: From 6c1ad633171e6b9c43b79be9737ca0b6e60b586e Mon Sep 17 00:00:00 2001 From: Luca Miccini Date: Tue, 1 Apr 2025 10:17:14 +0200 Subject: [PATCH 044/480] Add bgp-l3-xl dt This commit adds a "bgp-l3-xl" dt where workers/computes/networkers are deployed across three racks over a virtualized spine/leaf fabric using bgp. The overall footprint consists of 3 masters, 9 workers (3x rack), 6 computes (2x rack), 3 networkers (1x rack) plus 1 router, 2 spines and 6 leaf switches (2x rack). ~~~ $ virsh list Id Name State ---------------------------------------- 1292 cifmw-controller-0 running 1293 cifmw-r0-compute-0 running 1294 cifmw-r0-compute-1 running 1295 cifmw-r1-compute-0 running 1296 cifmw-r1-compute-1 running 1297 cifmw-r2-compute-0 running 1298 cifmw-r2-compute-1 running 1299 cifmw-r0-networker-0 running 1300 cifmw-r1-networker-0 running 1301 cifmw-r2-networker-0 running 1302 cifmw-ocp-master-0 running 1303 cifmw-ocp-master-1 running 1304 cifmw-ocp-master-2 running 1305 cifmw-ocp-worker-0 running 1306 cifmw-ocp-worker-1 running 1307 cifmw-ocp-worker-2 running 1308 cifmw-ocp-worker-3 running 1309 cifmw-ocp-worker-4 running 1310 cifmw-ocp-worker-5 running 1311 cifmw-ocp-worker-6 running 1312 cifmw-ocp-worker-7 running 1313 cifmw-ocp-worker-8 running 1314 cifmw-ocp-worker-9 running 1315 cifmw-router-0 running 1316 cifmw-spine-0 running 1317 cifmw-spine-1 running 1318 cifmw-leaf-0 running 1319 cifmw-leaf-1 running 1320 cifmw-leaf-2 running 1321 cifmw-leaf-3 running 1322 cifmw-leaf-4 running 1323 cifmw-leaf-5 running ~~~ --- .github/CODEOWNERS | 3 + playbooks/bgp/prepare-bgp-computes.yaml | 71 + ...repare-bgp-hypervisor-from-controller.yaml | 23 + playbooks/bgp/prepare-bgp-spines-leaves.yaml | 521 ++++++++ playbooks/bgp/templates/leaf-frr.conf.j2 | 87 ++ playbooks/bgp/templates/router-frr.conf.j2 | 64 + playbooks/bgp/templates/spine-frr.conf.j2 | 80 ++ .../common-bgp-edpm-values.yaml.j2 | 86 ++ .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../bgp-l3-xl/network-values/values.yaml.j2 | 194 +++ roles/reproducer/tasks/ocp_layout.yml | 4 +- scenarios/reproducers/bgp-l3-xl.yml | 1144 +++++++++++++++++ 17 files changed, 2305 insertions(+), 2 deletions(-) create mode 100644 playbooks/bgp/prepare-bgp-computes.yaml create mode 100644 playbooks/bgp/prepare-bgp-hypervisor-from-controller.yaml create mode 100644 playbooks/bgp/prepare-bgp-spines-leaves.yaml create mode 100644 playbooks/bgp/templates/leaf-frr.conf.j2 create mode 100644 playbooks/bgp/templates/router-frr.conf.j2 create mode 100644 playbooks/bgp/templates/spine-frr.conf.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r0-compute-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r0-networker-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r1-compute-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r1-networker-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r2-compute-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r2-networker-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 create mode 100644 scenarios/reproducers/bgp-l3-xl.yml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 783e6a4c25..3c8d2cbfa2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,7 +6,10 @@ roles/adoption_osp_deploy @openstack-k8s-operators/adoption-core-reviewers # BGP roles/ci_gen_kustomize_values/templates/bgp_dt01 @openstack-k8s-operators/bgp +roles/ci_gen_kustomize_values/templates/bgp-l3-xl @openstack-k8s-operators/bgp playbooks/bgp-l3-computes-ready.yml @openstack-k8s-operators/bgp +playbooks/bgp @openstack-k8s-operators/bgp +scenarios/reproducers/bgp-l3-xl.yml @openstack-k8s-operators/bgp # Compliance roles/compliance @openstack-k8s-operators/security diff --git a/playbooks/bgp/prepare-bgp-computes.yaml b/playbooks/bgp/prepare-bgp-computes.yaml new file mode 100644 index 0000000000..c23c69ab3d --- /dev/null +++ b/playbooks/bgp/prepare-bgp-computes.yaml @@ -0,0 +1,71 @@ +--- +- name: Configure computes + hosts: "computes{{ networkers_bool | default(false) | bool | ternary(',networkers', '') }}" + tasks: + - name: Check default route corresponds with BGP + ansible.builtin.command: + cmd: "ip route show default" + register: _initial_default_ip_route_result + changed_when: false + + - name: Early end if default route is already based on BGP + ansible.builtin.meta: end_play + when: "'proto bgp' in _initial_default_ip_route_result.stdout" + + - name: Obtain the device with the DHCP default route + ansible.builtin.shell: + cmd: > + ip r show default | + grep "proto dhcp" | + grep -o "dev \w*" | + cut -d" " -f 2 + ignore_errors: true + register: dhcp_default_route_device + changed_when: false + + - name: Remove DHCP default route if it exists + when: + - dhcp_default_route_device.rc == 0 + - dhcp_default_route_device.stdout | trim | length > 0 + vars: + default_device: "{{ dhcp_default_route_device.stdout | trim }}" + block: + - name: Obtain the connection for the DHCP default route device + ansible.builtin.command: + cmd: > + nmcli -g GENERAL.CONNECTION device show {{ default_device }} + register: default_connection + changed_when: false + + - name: Ignore dhcp default route from ocpbm interfaces + become: true + community.general.nmcli: + conn_name: "{{ default_connection.stdout | trim }}" + gw4_ignore_auto: true + gw6_ignore_auto: true + never_default4: true + state: present + + - name: Remove default route obtained via DHCP from leafs in order to apply BGP + become: true + ansible.builtin.shell: + cmd: > + set -o pipefail && ip route show default | + grep "proto dhcp" | + xargs -r ip route del + changed_when: false + + - name: Restart NetworkManager + become: true + ansible.builtin.systemd: + name: NetworkManager.service + state: restarted + + - name: Check new default route corresponds with BGP + ansible.builtin.command: + cmd: "ip route show default" + register: default_ip_route_result + retries: 10 + delay: 1 + until: "'proto bgp' in default_ip_route_result.stdout" + changed_when: false diff --git a/playbooks/bgp/prepare-bgp-hypervisor-from-controller.yaml b/playbooks/bgp/prepare-bgp-hypervisor-from-controller.yaml new file mode 100644 index 0000000000..da03ec8608 --- /dev/null +++ b/playbooks/bgp/prepare-bgp-hypervisor-from-controller.yaml @@ -0,0 +1,23 @@ +--- +- name: Prepare the BGP hypervisor with needed configuration + hosts: hypervisor + tasks: + - name: Set IPv4 forwarding + become: true + ansible.posix.sysctl: + name: net.ipv4.ip_forward + value: '1' + sysctl_set: true + sysctl_file: /etc/sysctl.d/90-network.conf + state: present + reload: true + + - name: Disable reverse path forwarding validation + become: true + ansible.posix.sysctl: + name: net.ipv4.conf.all.rp_filter + value: '0' + sysctl_set: true + sysctl_file: /etc/sysctl.d/90-network.conf + state: present + reload: true diff --git a/playbooks/bgp/prepare-bgp-spines-leaves.yaml b/playbooks/bgp/prepare-bgp-spines-leaves.yaml new file mode 100644 index 0000000000..7b020507fc --- /dev/null +++ b/playbooks/bgp/prepare-bgp-spines-leaves.yaml @@ -0,0 +1,521 @@ +--- +- name: Common spines and leaves configuration + hosts: "spines,leafs{{ router_bool | default(false) | ternary(',routers', '') }}" + tasks: + - name: Workaround router advertisement packets polluting routing tables + become: true + ansible.builtin.shell: + cmd: | + for i in $(ls /proc/sys/net/ipv6/conf/*/forwarding); do echo 1 > $i; done + changed_when: false + + - name: Register interfaces + ansible.builtin.shell: + cmd: "set -o pipefail && ls -1 /proc/sys/net/ipv4/conf/*/rp_filter | cut -d/ -f7" + register: interfaces + changed_when: false + + - name: Disable reverse path forwarding validation + become: true + ansible.posix.sysctl: + name: "net.ipv4.conf.{{ item }}.rp_filter" + value: "0" + sysctl_set: true + sysctl_file: /etc/sysctl.d/sysctl.conf + state: present + reload: true + loop: "{{ interfaces.stdout_lines }}" + register: result + retries: 3 + timeout: 60 + until: result is not failed + + - name: Disable reverse path forwarding validation + become: true + ansible.posix.sysctl: + name: "{{ item.key }}" + value: "{{ item.value }}" + sysctl_set: true + sysctl_file: /etc/sysctl.d/sysctl.conf + state: present + reload: true + loop: "{{ sysctls | dict2items }}" + vars: + sysctls: + net.ipv4.conf.all.rp_filter: '0' + net.ipv4.conf.default.rp_filter: '0' + register: result + retries: 3 + timeout: 60 + until: result is not failed + + - name: Set IPv4 forwarding + become: true + ansible.posix.sysctl: + name: net.ipv4.ip_forward + value: '1' + sysctl_set: true + sysctl_file: /etc/sysctl.d/90-network.conf + state: present + reload: true + + - name: Set IPv6 forwarding + become: true + ansible.posix.sysctl: + name: net.ipv6.conf.all.forwarding + value: '1' + sysctl_set: true + sysctl_file: /etc/sysctl.d/90-network.conf + state: present + reload: true + + - name: Check installed packages + ansible.builtin.package_facts: + manager: auto + + - name: Install FRR + when: '"frr" not in ansible_facts.packages' + block: + - name: Install RHOS Release tool + become: true + ansible.builtin.package: + name: "{{ cifmw_repo_setup_rhos_release_rpm }}" + state: present + disable_gpg_check: true + + - name: Enable RHOS release repos. + become: true + ansible.builtin.command: + cmd: "rhos-release rhel" + changed_when: false + + - name: Install frr + become: true + ansible.builtin.package: + name: frr + state: present + + - name: Enable FRR BGP daemon + become: true + ansible.builtin.lineinfile: + path: /etc/frr/daemons + regexp: "^bgpd=" + line: "bgpd=yes" + owner: frr + group: frr + mode: '640' + + - name: Enable FRR BFD daemon + become: true + ansible.builtin.lineinfile: + path: /etc/frr/daemons + regexp: "^bfdd=" + line: "bfdd=yes" + owner: frr + group: frr + mode: '640' + + - name: Enable retain option of zebra + become: true + ansible.builtin.lineinfile: + path: /etc/frr/daemons + regexp: "^zebra_options=" + line: "zebra_options=\" -A 127.0.0.1 -s 90000000 -r \"" + owner: frr + group: frr + mode: '640' + +# Router play +- name: Configure router + hosts: "{{ router_bool | default(false) | ternary('routers', 'localhost') }}" + tasks: + - name: Early end if no router defined + ansible.builtin.meta: end_play + when: not (router_bool | default(false)) + + - name: Obtain the connection for the eth0 interface + ansible.builtin.command: + cmd: > + nmcli -g GENERAL.CONNECTION device show eth0 + register: router_eth0_conn + changed_when: false + + # When eth0 connection name is "Wired connection 1", then the rest of the + # connection names corresponding to the interfaces will follow this pattern: + # eth1 -> "Wired connection 2" + # eth2 -> "Wired connection 3" + # When eth0 connection name is different from "Wired connection 1", then the + # rest of the connection names corresponding to the interfaces will follow + # this pattern: + # eth1 -> "Wired connection 1" + # eth2 -> "Wired connection 2" + - name: Set router_conn_name_offset + ansible.builtin.set_fact: + router_conn_name_offset: >- + {{ + 1 if "Wired connection 1" == (router_eth0_conn.stdout | trim) + else 0 + }} + + - name: Build downlink connection list + vars: + connection_name: "Wired connection {{ (item | int) + (router_conn_name_offset | int) }}" + interface_name: "eth{{ item }}" + ansible.builtin.set_fact: + router_downlink_conns: "{{ router_downlink_conns | default([]) + [connection_name] }}" + router_downlink_ifs: "{{ router_downlink_ifs | default([]) + [interface_name] }}" + loop: "{{ range(1, 3) | list }}" # the number of spines is always 2 + + - name: Build uplink connection + vars: + len_router_downlink_conns: "{{ router_downlink_conns | length }}" + ansible.builtin.set_fact: + router_uplink_conn: "Wired connection {{ 1 + (len_router_downlink_conns | int) + (router_conn_name_offset | int) }}" + router_uplink_if: "eth{{ 1 + (len_router_downlink_conns | int) }}" + + - name: Configure downlink router connections with nmcli + become: true + community.general.nmcli: + autoconnect: true + conn_name: "{{ item }}" + type: ethernet + method4: disabled + method6: link-local + state: present + loop: "{{ router_downlink_conns }}" + + - name: Configure uplink router connections with nmcli + become: true + community.general.nmcli: + autoconnect: true + conn_name: "{{ router_uplink_conn }}" + ip4: "{{ router_uplink_ip }}/30" + method4: manual + method6: link-local + state: present + + - name: Add provider network gateway IP to router loopback + become: true + community.general.nmcli: + autoconnect: true + conn_name: lo + ip4: + - 127.0.0.1/8 + - 192.168.133.1/32 + method4: manual + ip6: "::1/128" + method6: manual + state: present + + - name: Configure FRR + become: true + ansible.builtin.template: + src: templates/router-frr.conf.j2 + dest: /etc/frr/frr.conf + owner: frr + group: frr + mode: '640' + + - name: Enable and start FRR + become: true + ansible.builtin.service: + name: frr + enabled: true + state: restarted + + - name: Masquerade mortacci + block: + - name: Install iptables + become: true + ansible.builtin.package: + name: iptables + state: present + + - name: Masquerade outgoing traffic + vars: + router_ext_if: eth0 + become: true + ansible.builtin.shell: + cmd: | + iptables -t nat -A POSTROUTING -s 99.99.0.0/16 -o {{ router_ext_if }} -j MASQUERADE + iptables -t nat -A POSTROUTING -s 192.168.0.0/16 -o {{ router_ext_if }} -j MASQUERADE + changed_when: false + + - name: Restart NetworkManager + become: true + ansible.builtin.systemd: + name: NetworkManager.service + state: restarted + + +# Spines play +- name: Configure spines + hosts: spines + tasks: + - name: Obtain the connection for the eth0 interface + ansible.builtin.command: + cmd: > + nmcli -g GENERAL.CONNECTION device show eth0 + register: spine_eth0_conn + changed_when: false + + - name: Set spine_conn_name_offset + ansible.builtin.set_fact: + spine_conn_name_offset: >- + {{ + 1 if "Wired connection 1" == (spine_eth0_conn.stdout | trim) + else 0 + }} + + - name: Build downlink connection list + vars: + num_conns: "{{ (num_racks | default(4) | int) * 2 }}" + connection_name: "Wired connection {{ (item | int) + (spine_conn_name_offset | int) }}" + interface_name: "eth{{ item }}" + ansible.builtin.set_fact: + spine_downlink_conns: "{{ spine_downlink_conns | default([]) + [connection_name] }}" + spine_downlink_ifs: "{{ spine_downlink_ifs | default([]) + [interface_name] }}" + loop: "{{ range(1, 1 + (num_conns | int)) | list }}" + + - name: Build uplink connection + vars: + len_spine_downlink_conns: "{{ spine_downlink_conns | length }}" + ansible.builtin.set_fact: + spine_uplink_conn: "Wired connection {{ 1 + (len_spine_downlink_conns | int) + (spine_conn_name_offset | int) }}" + spine_uplink_if: "eth{{ 1 + (len_spine_downlink_conns | int) }}" + + - name: Configure spine connections with nmcli + become: true + vars: + spine_conns: >- + {{ + router_bool | default(false) | + ternary(spine_downlink_conns + [spine_uplink_conn], + spine_downlink_conns) + }} + community.general.nmcli: + autoconnect: true + conn_name: "{{ item }}" + type: ethernet + method4: disabled + method6: link-local + state: present + loop: "{{ spine_conns }}" + + - name: Configure FRR + become: true + ansible.builtin.template: + src: templates/spine-frr.conf.j2 + dest: /etc/frr/frr.conf + owner: frr + group: frr + mode: '640' + + - name: Enable and start FRR + become: true + ansible.builtin.service: + name: frr + enabled: true + state: restarted + + - name: Masquerade mortacci + when: not (router_bool | default(false)) + block: + - name: Install iptables + become: true + ansible.builtin.package: + name: iptables + state: present + + - name: Masquerade outgoing traffic + become: true + ansible.builtin.shell: + cmd: | + iptables -t nat -A POSTROUTING -s 99.99.0.0/16 -o {{ spine_uplink_if }} -j MASQUERADE + iptables -t nat -A POSTROUTING -s 192.168.0.0/16 -o {{ spine_uplink_if }} -j MASQUERADE + changed_when: false + +# Leaves play +- name: Configure leaves + hosts: leafs + vars: + leaf_id: "{{ (ansible_hostname.split('-')[-1] | int) % 2 }}" # always 2 leaves per rack + rack_id: "{{ (ansible_hostname.split('-')[-1] | int) // 2 }}" # always 2 leaves per rack + tasks: + - name: Obtain the connection for the eth0 interface + ansible.builtin.command: + cmd: > + nmcli -g GENERAL.CONNECTION device show eth0 + register: leaf_eth0_conn + changed_when: false + + - name: Set leaf_conn_name_offset + ansible.builtin.set_fact: + leaf_conn_name_offset: >- + {{ + 1 if "Wired connection 1" == (leaf_eth0_conn.stdout | trim) + else 0 + }} + + - name: Build uplink connection list + vars: + connection_name: "Wired connection {{ (item | int) + (leaf_conn_name_offset | int) }}" + interface_name: "eth{{ item }}" + ansible.builtin.set_fact: + uplink_conns: "{{ uplink_conns | default([]) + [connection_name] }}" + uplink_ifs: "{{ uplink_ifs | default([]) + [interface_name] }}" + loop: "{{ range(1, 3) | list }}" # the number of spines is always 2 + + - name: Build downlink connection list + vars: + num_conns: "{{ (edpm_nodes_per_rack | default(1) | int) + (ocp_nodes_per_rack | default(0) | int) }}" + connection_name: "Wired connection {{ (item | int) + (leaf_conn_name_offset | int) }}" + interface_name: "eth{{ item }}" + ansible.builtin.set_fact: + leaf_downlink_conns: "{{ leaf_downlink_conns | default([]) + [connection_name] }}" + leaf_downlink_ifs: "{{ leaf_downlink_ifs | default([]) + [interface_name] }}" + loop: "{{ range(3, 3 + (num_conns | int)) | list }}" + + - name: Build downlink connection list for rack3 + vars: + connection_name: "Wired connection {{ (item | int) + (leaf_conn_name_offset | int) }}" + interface_name: "eth{{ item }}" + ansible.builtin.set_fact: + downlink_conns_rack3: "{{ downlink_conns_rack3 | default([]) + [connection_name] }}" + downlink_ifs_rack3: "{{ downlink_ifs_rack3 | default([]) + [interface_name] }}" + loop: "{{ range(3, 6) | list }}" # number of OCP nodes on rack3 is always 3 + + # rack3 is special because only OCP nodes are deployed on it when it exists + - name: Configure downlink leaf connections on rack3 + become: true + vars: + leaf_ds_ip4: >- + 100.{{ 64 + (leaf_id | int) }}.{{ rack_id }}.{{ 1 + 4 * (loop_index | int) }} + when: (rack_id | int) == 3 + community.general.nmcli: + autoconnect: true + conn_name: "{{ item }}" + ip4: "{{ leaf_ds_ip4 }}/30" + method4: manual + method6: link-local + state: present + loop: "{{ downlink_conns_rack3 }}" + loop_control: + index_var: loop_index + + - name: Configure downlink leaf connections on racks 0, 1 and 2 + become: true + vars: + leaf_ds_ip4: >- + 100.{{ 64 + (leaf_id | int) }}.{{ rack_id }}.{{ 1 + 4 * (loop_index | int) }} + when: (rack_id | int) != 3 + community.general.nmcli: + autoconnect: true + conn_name: "{{ item }}" + ip4: "{{ leaf_ds_ip4 }}/30" + method4: manual + method6: link-local + state: present + loop: "{{ leaf_downlink_conns }}" + loop_control: + index_var: loop_index + + - name: Configure uplink leaf connections + become: true + community.general.nmcli: + autoconnect: true + conn_name: "{{ item }}" + method4: disabled + method6: link-local + state: present + loop: "{{ uplink_conns }}" + + - name: Configure FRR + become: true + vars: + downlink_interfaces: "{{ downlink_ifs_rack3 if (rack_id | int) == 3 else leaf_downlink_ifs }}" + ansible.builtin.template: + src: templates/leaf-frr.conf.j2 + dest: /etc/frr/frr.conf + owner: frr + group: frr + mode: '640' + + - name: Enable FRR Zebra daemon + become: true + ansible.builtin.lineinfile: + path: /etc/frr/daemons + regexp: "^zebra=" + line: "zebra=yes" + owner: frr + group: frr + mode: '640' + + - name: Enable and start FRR + become: true + ansible.builtin.service: + name: frr + enabled: true + state: restarted + +# Final play to remove DHCP default routes +- name: Remove DHCP default routes and use BGP instead + hosts: "leafs{{ router_bool | default(false) | ternary(',spines', '') }}" + tasks: + - name: Obtain the device with the DHCP default route + ansible.builtin.shell: + cmd: > + ip r show default | + grep "proto dhcp" | + grep -o "dev \w*" | + cut -d" " -f 2 + ignore_errors: true + register: dhcp_default_route_device + changed_when: false + + - name: Remove DHCP default route if it exists + when: + - dhcp_default_route_device.rc == 0 + - dhcp_default_route_device.stdout | trim | length > 0 + vars: + default_device: "{{ dhcp_default_route_device.stdout | trim }}" + block: + - name: Obtain the connection for the DHCP default route device + ansible.builtin.command: + cmd: > + nmcli -g GENERAL.CONNECTION device show {{ default_device }} + register: default_connection + changed_when: false + + - name: Ignore dhcp default route from ocpbm interfaces + become: true + community.general.nmcli: + conn_name: "{{ default_connection.stdout | trim }}" + gw4_ignore_auto: true + gw6_ignore_auto: true + never_default4: true + state: present + + - name: Remove default route obtained via DHCP from leaves in order to apply BGP + become: true + ansible.builtin.shell: + cmd: > + set -o pipefail && ip route show default | + grep "proto dhcp" | + xargs -r ip route del + changed_when: false + + - name: Restart NetworkManager + become: true + ansible.builtin.systemd: + name: NetworkManager.service + state: restarted + + - name: Check new default route corresponds with BGP + ansible.builtin.command: + cmd: "ip route show default" + register: default_ip_route_result + retries: 10 + delay: 1 + until: "'proto bgp' in default_ip_route_result.stdout" + changed_when: false diff --git a/playbooks/bgp/templates/leaf-frr.conf.j2 b/playbooks/bgp/templates/leaf-frr.conf.j2 new file mode 100644 index 0000000000..e300f0f8f2 --- /dev/null +++ b/playbooks/bgp/templates/leaf-frr.conf.j2 @@ -0,0 +1,87 @@ +hostname {{ ansible_hostname }} +log file /var/log/frr/frr.log +service integrated-vtysh-config +line vty +frr version 7.0 + +debug bfd peer +debug bfd network +debug bfd zebra + +debug bgp graceful-restart +debug bgp neighbor-events +debug bgp updates +debug bgp update-groups + +router bgp 64999 + bgp log-neighbor-changes + bgp graceful-shutdown + + bgp graceful-restart + bgp graceful-restart notification + bgp graceful-restart restart-time 60 + bgp graceful-restart preserve-fw-state + ! bgp long-lived-graceful-restart stale-time 15 + + neighbor downlink peer-group + neighbor downlink remote-as internal + neighbor downlink bfd + neighbor downlink bfd profile tripleo + neighbor downlink password f00barZ + ! neighbor downlink capability extended-nexthop +{% for iface in downlink_interfaces %} + neighbor {{iface}} interface peer-group downlink +{% endfor %} + + neighbor uplink peer-group + neighbor uplink remote-as external + neighbor uplink bfd + neighbor uplink bfd profile tripleo + ! neighbor uplink capability extended-nexthop +{% for iface in uplink_ifs %} + neighbor {{iface}} interface peer-group uplink +{% endfor %} + + address-family ipv4 unicast + redistribute connected + neighbor downlink route-reflector-client + neighbor downlink default-originate + neighbor downlink next-hop-self + neighbor downlink prefix-list only-host-prefixes out + neighbor uplink allowas-in origin + neighbor uplink prefix-list only-default-host-prefixes in + exit-address-family + + address-family ipv6 unicast + redistribute connected + neighbor downlink activate + neighbor downlink route-reflector-client + neighbor downlink default-originate + neighbor downlink next-hop-self + neighbor uplink activate + neighbor uplink allowas-in origin + neighbor uplink prefix-list only-default-host-prefixes in + exit-address-family + + address-family l2vpn evpn + neighbor uplink activate + neighbor uplink allowas-in origin + neighbor downlink activate + neighbor downlink route-reflector-client + exit-address-family + +ip prefix-list only-default-host-prefixes permit 0.0.0.0/0 +ip prefix-list only-default-host-prefixes permit 0.0.0.0/0 ge 32 +ip prefix-list only-host-prefixes permit 0.0.0.0/0 ge 32 + +ipv6 prefix-list only-default-host-prefixes permit ::/0 +ipv6 prefix-list only-default-host-prefixes permit ::/0 ge 128 +ipv6 prefix-list only-host-prefixes permit ::/0 ge 128 + +ip nht resolve-via-default + +bfd + profile tripleo + detect-multiplier 10 + transmit-interval 500 + receive-interval 500 diff --git a/playbooks/bgp/templates/router-frr.conf.j2 b/playbooks/bgp/templates/router-frr.conf.j2 new file mode 100644 index 0000000000..d3308bd36b --- /dev/null +++ b/playbooks/bgp/templates/router-frr.conf.j2 @@ -0,0 +1,64 @@ +hostname {{ ansible_hostname }} +log file /var/log/frr/frr.log +service integrated-vtysh-config +line vty +frr version 7.0 + +debug bfd peer +debug bfd network +debug bfd zebra + +debug bgp graceful-restart +debug bgp neighbor-events +debug bgp updates +debug bgp update-groups + +router bgp 65000 + bgp log-neighbor-changes + bgp graceful-shutdown + + neighbor downlink peer-group + neighbor downlink remote-as internal + neighbor downlink bfd + ! neighbor downlink capability extended-nexthop +{% for iface in router_downlink_ifs %} + neighbor {{iface}} interface peer-group downlink +{% endfor %} + + neighbor uplink peer-group + neighbor uplink remote-as external + neighbor uplink bfd + ! neighbor uplink capability extended-nexthop + neighbor {{router_uplink_if}} interface peer-group uplink + + address-family ipv4 unicast + redistribute connected + neighbor downlink default-originate + neighbor downlink prefix-list only-host-prefixes in + neighbor uplink prefix-list only-default-host-prefixes in + exit-address-family + + address-family ipv6 unicast + redistribute connected + neighbor downlink activate + neighbor downlink default-originate + neighbor downlink prefix-list only-host-prefixes in + neighbor uplink activate + neighbor uplink prefix-list only-default-host-prefixes in + exit-address-family + + address-family l2vpn evpn + neighbor uplink activate + neighbor downlink activate + neighbor downlink route-reflector-client + exit-address-family + +ip prefix-list only-default-host-prefixes permit 0.0.0.0/0 +ip prefix-list only-default-host-prefixes permit 0.0.0.0/0 ge 32 +ip prefix-list only-host-prefixes permit 0.0.0.0/0 ge 32 + +ipv6 prefix-list only-default-host-prefixes permit ::/0 +ipv6 prefix-list only-default-host-prefixes permit ::/0 ge 128 +ipv6 prefix-list only-host-prefixes permit ::/0 ge 128 + +ip nht resolve-via-default diff --git a/playbooks/bgp/templates/spine-frr.conf.j2 b/playbooks/bgp/templates/spine-frr.conf.j2 new file mode 100644 index 0000000000..c0f03b67ed --- /dev/null +++ b/playbooks/bgp/templates/spine-frr.conf.j2 @@ -0,0 +1,80 @@ +hostname {{ ansible_hostname }} +log file /var/log/frr/frr.log +service integrated-vtysh-config +line vty +frr version 7.0 + +debug bfd peer +debug bfd network +debug bfd zebra + +debug bgp graceful-restart +debug bgp neighbor-events +debug bgp updates +debug bgp update-groups + +router bgp 65000 + bgp log-neighbor-changes + bgp graceful-shutdown + + neighbor downlink peer-group + neighbor downlink remote-as external + neighbor downlink bfd + neighbor downlink bfd profile tripleo + ! neighbor downlink capability extended-nexthop +{% for iface in spine_downlink_ifs %} + neighbor {{iface}} interface peer-group downlink +{% endfor %} + +{% if router_bool | default(false) %} + neighbor uplink peer-group + neighbor uplink remote-as internal + neighbor uplink bfd + neighbor uplink bfd profile tripleo + ! neighbor uplink capability extended-nexthop + neighbor {{spine_uplink_if}} interface peer-group uplink +{% endif %} + + address-family ipv4 unicast + redistribute connected + neighbor downlink default-originate + neighbor downlink prefix-list only-host-prefixes in +{% if router_bool | default(false) %} + neighbor uplink prefix-list only-default-host-prefixes in + neighbor uplink next-hop-self +{% endif %} + exit-address-family + + address-family ipv6 unicast + redistribute connected + neighbor downlink activate + neighbor downlink default-originate + neighbor downlink prefix-list only-host-prefixes in +{% if router_bool | default(false) %} + neighbor uplink activate + neighbor uplink prefix-list only-default-host-prefixes in +{% endif %} + exit-address-family + + address-family l2vpn evpn + neighbor downlink activate +{% if router_bool | default(false) %} + neighbor uplink activate +{% endif %} + exit-address-family + +ip prefix-list only-default-host-prefixes permit 0.0.0.0/0 +ip prefix-list only-default-host-prefixes permit 0.0.0.0/0 ge 32 +ip prefix-list only-host-prefixes permit 0.0.0.0/0 ge 32 + +ipv6 prefix-list only-default-host-prefixes permit ::/0 +ipv6 prefix-list only-default-host-prefixes permit ::/0 ge 128 +ipv6 prefix-list only-host-prefixes permit ::/0 ge 128 + +ip nht resolve-via-default + +bfd + profile tripleo + detect-multiplier 10 + transmit-interval 500 + receive-interval 500 diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 new file mode 100644 index 0000000000..05d4e436de --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 @@ -0,0 +1,86 @@ +# source: bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 +{% set instances_names = [] %} +{% set rack = 'r' ~ rack_number %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith('-'.join([rack, node_type])) %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{% endfor %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_sshd_allowed_ranges: +{% set sshd_allowed_range = cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) %} +{% for rack in ['r0', 'r1', 'r2'] %} +{% set _ = sshd_allowed_range.append(cifmw_networking_env_definition.networks['ctlplane' + rack].network_v4) %} +{% endfor %} +{% for range in sshd_allowed_range %} + - "{{ range }}" +{% endfor %} + - 192.168.125.0/24 + - 192.168.111.0/24 + nodes: +{% for instance in instances_names %} + {{ instance }}: + ansible: +{% set ctlplane_rack = 'ctlplane' + rack %} + host: {{ cifmw_networking_env_definition.instances[instance].networks[ctlplane_rack].ip_v4 }} +{% if original_content.data.nodeset.nodes['edpm-' ~ instance].ansible.ansibleVars is defined %} + ansibleVars: {{ original_content.data.nodeset.nodes['edpm-' ~ instance].ansible.ansibleVars }} +{% endif %} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} +{% if 'storagemgmt' not in net %} + - name: {{ net if net != ctlplane_rack else 'ctlplane' }} + subnetName: {{ 'subnet1' if net != ctlplane_rack else 'subnet' ~ rack_number }} +{% if 'ctlplane' in net %} + defaultRoute: true + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[ctlplane_rack].ip_v4 }} +{% endif %} +{% endif %} +{% endfor %} +{% if 'compute-0' in instance %} +{% set peer_suffix = 1 %} +{% set main_suffix = 7 %} +{% elif 'compute-1' in instance %} +{% set peer_suffix = 5 %} +{% set main_suffix = 8 %} +{% else %} +{% set peer_suffix = 9 %} +{% set main_suffix = 9 %} +{% endif %} + - name: BgpNet0 + subnetName: subnet{{ rack_number }} + fixedIP: 100.64.{{ rack_number }}.{{ peer_suffix + 1 }} + - name: BgpNet1 + subnetName: subnet{{ rack_number }} + fixedIP: 100.65.{{ rack_number }}.{{ peer_suffix + 1 }} + - name: BgpMainNet + subnetName: subnet{{ rack_number }} + fixedIP: 99.99.{{ rack_number }}.{{ main_suffix }} + - name: BgpMainNetV6 + subnetName: subnet{{ rack_number }} +{% if 'compute-0' in instance %} +{% set suffix = 7 %} +{% elif 'compute-1' in instance %} +{% set suffix = 8 %} +{% else %} +{% set suffix = 9 %} +{% endif %} + fixedIP: f00d:f00d:f00d:f00d:f00d:f00d:f00d:00{{ (rack_number | int) + 1 }}{{ suffix }} +{% endfor %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r0-compute-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r0-compute-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..504c997d1e --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r0-compute-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp-l3-xl/edpm-r0-compute-nodeset-values/values.yaml.j2 +{% set node_type = "compute" %} +{% set rack_number = 0 %} +{% include 'templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r0-networker-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r0-networker-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..b4291928d5 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r0-networker-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp-l3-xl/edpm-r0-networker-nodeset-values/values.yaml.j2 +{% set node_type = "networker" %} +{% set rack_number = 0 %} +{% include 'templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r1-compute-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r1-compute-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..a5d2a1d9a6 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r1-compute-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp-l3-xl/edpm-r1-compute-nodeset-values/values.yaml.j2 +{% set node_type = "compute" %} +{% set rack_number = 1 %} +{% include 'templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r1-networker-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r1-networker-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..c7b93c435d --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r1-networker-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp-l3-xl/edpm-r1-networker-nodeset-values/values.yaml.j2 +{% set node_type = "networker" %} +{% set rack_number = 1 %} +{% include 'templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r2-compute-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r2-compute-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..745044adea --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r2-compute-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp-l3-xl/edpm-r2-compute-nodeset-values/values.yaml.j2 +{% set node_type = "compute" %} +{% set rack_number = 2 %} +{% include 'templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r2-networker-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r2-networker-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..d5a617959a --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/edpm-r2-networker-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp-l3-xl/edpm-r2-networker-nodeset-values/values.yaml.j2 +{% set node_type = "networker" %} +{% set rack_number = 2 %} +{% include 'templates/bgp-l3-xl/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 new file mode 100644 index 0000000000..abd5bce805 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 @@ -0,0 +1,194 @@ +--- +# source: bgp-l3-xl/network-values/values.yaml.j2 +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{% set hostname = cifmw_networking_env_definition.instances[host]['hostname'] %} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: + name: {{ hostname }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} + {{ network.network_name }}_ip: {{ network.ip_v4 }} +{% if 'worker-9' == hostname and 'ctlplane' == network.network_name %} + base_if: {{ network.interface_name }} +{% endif %} +{% endfor %} +{% set node_bgp_orig_content = original_content.data.bgp.bgpdefs['node' ~ ns.ocp_index] %} +{% set node_bgp_net0 = node_bgp_orig_content.bgpnet0 %} +{% if 'worker-9' != hostname %} +{% set node_bgp_net1 = node_bgp_orig_content.bgpnet1 %} +{% endif %} + bgp_peers: + - {{ node_bgp_net0.bgp_peer }} +{% if 'worker-9' != hostname %} + - {{ node_bgp_net1.bgp_peer }} +{% endif %} + bgp_ip: + - {{ node_bgp_net0.bgp_ip }} +{% if 'worker-9' != hostname %} + - {{ node_bgp_net1.bgp_ip }} +{% endif %} + loopback_ip: {{ node_bgp_orig_content.loopback_ip }} + loopback_ipv6: {{ node_bgp_orig_content.loopback_ipv6 }} +{% if node_bgp_orig_content.routes | default(false) %} + routes: {{ node_bgp_orig_content.routes }} +{% endif %} +{% set ns.ocp_index = ns.ocp_index+1 %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() %} +{% set ns.lb_tools = {} %} + {{ network.network_name }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} + subnets: +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} +{% if network.network_name != 'ctlplane' %} + - allocationRanges: +{% for range in network.tools.netconfig.ipv4_ranges %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network.network_v4 }} +{% if network.gw_v4 is defined %} + gateway: {{ network.gw_v4 }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} +{% else %} +{% for rack in ['r0', 'r1', 'r2'] %} +{% set rack_subnet = cifmw_networking_env_definition.networks[network.network_name + rack] %} + - allocationRanges: +{% for range in rack_subnet.tools.netconfig.ipv4_ranges %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ rack_subnet.network_v4 }} +{% if rack_subnet.gw_v4 is defined %} + gateway: {{ rack_subnet.gw_v4 }} +{% endif %} + name: {{ 'subnet' ~ loop.index0 }} +{% if rack_subnet.vlan_id is defined %} + vlan: {{ rack_subnet.vlan_id }} +{% endif %} +{% endfor %} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% for lb_range in network.tools[tool].ipv4_ranges %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network.network_v4 | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} + base_iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% else %} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% endif %} +{% if network.tools.multus is defined %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "bridge", + "isDefaultGateway": true, + "isGateway": true, + "forceAddress": false, + "ipMasq": true, + "hairpinMode": true, +{% if network.network_name == "octavia" %} + "bridge": "octbr", +{% elif network.network_name == "ctlplane" %} + "bridge": "ospbr", +{% else %} + "bridge": "{{ network.network_name }}", +{% endif %} + "ipam": { + "type": "whereabouts", + "range": "{{ network.network_v4 }}", +{% if network.network_name == "octavia" and network.tools.multus.ipv4_routes | default([]) | length > 0 %} + "routes": [ +{% for route in network.tools.multus.ipv4_routes %} + { + "dst": "{{ route.destination }}", + "gw": "{{ route.gateway }}" + }{% if not loop.last %},{% endif %} +{% endfor %} + ], +{% endif %} + "range_start": "{{ network.tools.multus.ipv4_ranges.0.start }}", + "range_end": "{{ network.tools.multus.ipv4_ranges.0.end }}", +{% if network.network_name == "ctlplane" %} + "gateway": "{{ network.network_v4 |ansible.utils.nthhost(2) }}" +{% else %} + "gateway": "{{ network.network_v4 |ansible.utils.nthhost(1) }}" +{% endif %} + } + } +{% endif %} +{% endfor %} + + dns-resolver: + config: + server: +# We set ctlplane = 192.168.125.0/24 and we rely on this definition to create the nad above. +# BGP exposes nad ips by advertising a 192.168.125.X address on the worker, and this would break dns +# because the traffic will not be sent to the right nic if a local ip on the same network is present. +# To avoid messing with routes etc we hardcode the 122.1 ip here + - 192.168.122.1 + search: [] + options: + - key: server + values: + - 192.168.122.1 +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + + routes: + config: [] + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'].network_v4 | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'].network_v4 | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: local-storage diff --git a/roles/reproducer/tasks/ocp_layout.yml b/roles/reproducer/tasks/ocp_layout.yml index b93f0753a4..64fe6eef15 100644 --- a/roles/reproducer/tasks/ocp_layout.yml +++ b/roles/reproducer/tasks/ocp_layout.yml @@ -329,7 +329,7 @@ - name: Allow libvirt zone on the temporary VBMC port become: true ansible.posix.firewalld: - port: "51881-51890/udp" + port: "51881-51899/udp" zone: libvirt state: enabled immediate: true @@ -347,6 +347,6 @@ - name: Remove temporary VBMC port from libvirt zone become: true ansible.posix.firewalld: - port: "51881-51890/udp" + port: "51881-51899/udp" zone: libvirt state: disabled diff --git a/scenarios/reproducers/bgp-l3-xl.yml b/scenarios/reproducers/bgp-l3-xl.yml new file mode 100644 index 0000000000..044bb90a53 --- /dev/null +++ b/scenarios/reproducers/bgp-l3-xl.yml @@ -0,0 +1,1144 @@ +--- + +cifmw_os_net_setup_config: + - name: public + external: true + is_default: true + provider_network_type: flat + provider_physical_network: datacentre + shared: true + subnets: + - name: public_subnet + cidr: 192.168.133.0/24 + allocation_pool_start: 192.168.133.190 + allocation_pool_end: 192.168.133.250 + gateway_ip: 192.168.133.1 + enable_dhcp: true + + +cifmw_run_id: '' +cifmw_use_devscripts: true +cifmw_use_libvirt: true +cifmw_virtualbmc_daemon_port: 50881 +cifmw_use_uefi: >- + {{ (cifmw_repo_setup_os_release is defined + and cifmw_repo_setup_os_release == 'rhel') | bool }} +num_racks: 3 +cifmw_libvirt_manager_compute_amount: "{{ num_racks }}" +cifmw_libvirt_manager_networker_amount: 3 +cifmw_libvirt_manager_pub_net: ocpbm +cifmw_libvirt_manager_spineleaf_setup: true +cifmw_libvirt_manager_network_interface_types: + rtr-ocp: network + s0-rtr: network + s1-rtr: network + l00-s0: network + l01-s0: network + l00-s1: network + l01-s1: network + l10-s0: network + l11-s0: network + l10-s1: network + l11-s1: network + l20-s0: network + l21-s0: network + l20-s1: network + l21-s1: network + l00-node0: network + l00-node1: network + l00-node2: network + l00-ocp0: network + l00-ocp1: network + l00-ocp2: network + l00-ocp3: network + l01-node0: network + l01-node1: network + l01-node2: network + l01-ocp0: network + l01-ocp1: network + l01-ocp2: network + l01-ocp3: network + l10-node0: network + l10-node1: network + l10-node2: network + l10-ocp0: network + l10-ocp1: network + l10-ocp2: network + l10-ocp3: network + l11-node0: network + l11-node1: network + l11-node2: network + l11-ocp0: network + l11-ocp1: network + l11-ocp2: network + l11-ocp3: network + l20-node0: network + l20-node1: network + l20-node2: network + l20-ocp0: network + l20-ocp1: network + l20-ocp2: network + l20-ocp3: network + l21-node0: network + l21-node1: network + l21-node2: network + l21-ocp0: network + l21-ocp1: network + l21-ocp2: network + l21-ocp3: network + +cifmw_libvirt_manager_configuration: + networks: + osp_trunk: | + + osp_trunk + + + + + + # router to ocp network + rtr-ocp: | + + rtr-ocp + + + # spines to router networks + s0-rtr: | + + s0-rtr + + + s1-rtr: | + + s1-rtr + + + # leafs to spines networks + ## rack0 + l00-s0: | + + l00-s0 + + + l00-s1: | + + l00-s1 + + + l01-s0: | + + l01-s0 + + + l01-s1: | + + l01-s1 + + + ## rack1 + l10-s0: | + + l10-s0 + + + l10-s1: | + + l10-s1 + + + l11-s0: | + + l11-s0 + + + l11-s1: | + + l11-s1 + + + ## rack2 + l20-s0: | + + l20-s0 + + + l20-s1: | + + l20-s1 + + + l21-s0: | + + l21-s0 + + + l21-s1: | + + l21-s1 + + + # leafs to nodes and ocps + ## rack0 + l00-node0: | + + l00-node0 + + + l00-node1: | + + l00-node1 + + + l00-node2: | + + l00-node2 + + + l00-ocp0: | + + l00-ocp0 + + + l00-ocp1: | + + l00-ocp1 + + + l00-ocp2: | + + l00-ocp2 + + + l00-ocp3: | + + l00-ocp3 + + + l01-node0: | + + l01-node0 + + + l01-node1: | + + l01-node1 + + + l01-node2: | + + l01-node2 + + + l01-ocp0: | + + l01-ocp0 + + + l01-ocp1: | + + l01-ocp1 + + + l01-ocp2: | + + l01-ocp2 + + + l01-ocp3: | + + l01-ocp3 + + + ## rack1 + l10-node0: | + + l10-node0 + + + l10-node1: | + + l10-node1 + + + l10-node2: | + + l10-node2 + + + l10-ocp0: | + + l10-ocp0 + + + l10-ocp1: | + + l10-ocp1 + + + l10-ocp2: | + + l10-ocp2 + + + l10-ocp3: | + + l10-ocp3 + + + l11-node0: | + + l11-node0 + + + l11-node1: | + + l11-node1 + + + l11-node2: | + + l11-node2 + + + l11-ocp0: | + + l11-ocp0 + + + l11-ocp1: | + + l11-ocp1 + + + l11-ocp2: | + + l11-ocp2 + + + l11-ocp3: | + + l11-ocp3 + + + ## rack2 + l20-node0: | + + l20-node0 + + + l20-node1: | + + l20-node1 + + + l20-node2: | + + l20-node2 + + + l20-ocp0: | + + l20-ocp0 + + + l20-ocp1: | + + l20-ocp1 + + + l20-ocp2: | + + l20-ocp2 + + + l20-ocp3: | + + l20-ocp3 + + + l21-node0: | + + l21-node0 + + + l21-node1: | + + l21-node1 + + + l21-node2: | + + l21-node2 + + + l21-ocp0: | + + l21-ocp0 + + + l21-ocp1: | + + l21-ocp1 + + + l21-ocp2: | + + l21-ocp2 + + + l21-ocp3: | + + l21-ocp3 + + + ocpbm: | + + ocpbm + + + + + + + ocppr: | + + ocppr + + + + r0_tr: | + + r0_tr + + + + + + r1_tr: | + + r1_tr + + + + + + r2_tr: | + + r2_tr + + + + + + + vms: + controller: + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - osp_trunk + r0-compute: &r0_compute_def + amount: 2 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - "ocpbm" + - "r0_tr" + spineleafnets: + - # rack0 - compute0 + - "l00-node0" + - "l01-node0" + - # rack0 - compute0 + - "l00-node1" + - "l01-node1" + r1-compute: + amount: 2 + root_part_id: "{{ cifmw_root_partition_id }}" + uefi: "{{ cifmw_use_uefi }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "centos-stream-9.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - r1_tr + spineleafnets: + - # rack1 - compute0 + - "l10-node0" + - "l11-node0" + - # rack1 - compute1 + - "l10-node1" + - "l11-node1" + r2-compute: + amount: 2 + root_part_id: "{{ cifmw_root_partition_id }}" + uefi: "{{ cifmw_use_uefi }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "centos-stream-9.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - r2_tr + spineleafnets: + - # rack2 - compute0 + - "l20-node0" + - "l21-node0" + - # rack2 - compute1 + - "l20-node1" + - "l21-node1" + + r0-networker: + amount: 1 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 40 + memory: 8 + cpus: 4 + # ansible_group: networker + nets: + - "ocpbm" + - "r0_tr" + spineleafnets: + - # rack0 - networker0 + - "l00-node2" + - "l01-node2" + r1-networker: + amount: 1 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 40 + memory: 8 + cpus: 4 + # ansible_group: networker + nets: + - "ocpbm" + - "r1_tr" + spineleafnets: + - # rack1 - networker0 + - "l10-node2" + - "l11-node2" + r2-networker: + amount: 1 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 40 + memory: 8 + cpus: 4 + # ansible_group: networker + nets: + - "ocpbm" + - "r2_tr" + spineleafnets: + - # rack2 - networker0 + - "l20-node2" + - "l21-node2" + ocp: + amount: 3 + uefi: true + root_part_id: 4 + admin_user: core + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "ocp_master" + disksize: "105" + memory: 16 + cpus: 10 + extra_disks_num: 1 + extra_disks_size: "20G" + nets: # nets common to all the ocp nodes + - "ocppr" + - "ocpbm" + - "osp_trunk" + spineleafnets: + - # rack0 - ocp master 0 + - "l00-ocp0" + - "l01-ocp0" + - # rack1 - ocp master 1 + - "l10-ocp0" + - "l11-ocp0" + - # rack2 - ocp master 2 + - "l20-ocp0" + - "l21-ocp0" + ocp_worker: + amount: 10 + uefi: true + root_part_id: 4 + admin_user: core + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "ocp_worker" + disksize: "105" + memory: 16 + cpus: 10 + extra_disks_num: 1 + extra_disks_size: "20G" + nets: # nets common to all the ocp_worker nodes + - "ocppr" + - "ocpbm" + - "osp_trunk" + spineleafnets: + - # rack0 - ocp worker 0 + - "l00-ocp1" + - "l01-ocp1" + - # rack0 - ocp worker 1 + - "l00-ocp2" + - "l01-ocp2" + - # rack0 - ocp worker 2 + - "l00-ocp3" + - "l01-ocp3" + - # rack1 - ocp worker 3 + - "l10-ocp1" + - "l11-ocp1" + - # rack1 - ocp worker 4 + - "l10-ocp2" + - "l11-ocp2" + - # rack1 - ocp worker 5 + - "l10-ocp3" + - "l11-ocp3" + - # rack2 - ocp worker 6 + - "l20-ocp1" + - "l21-ocp1" + - # rack2 - ocp worker 7 + - "l20-ocp2" + - "l21-ocp2" + - # rack2 - ocp worker 8 + - "l20-ocp3" + - "l21-ocp3" + - # router - ocp_tester (worker 9) + - "rtr-ocp" + router: + amount: 1 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 25 + memory: 4 + cpus: 2 + nets: # nets common to all the router nodes + - "ocpbm" + spineleafnets: + - # router - ocp_tester + - "s0-rtr" + - "s1-rtr" + - "rtr-ocp" + spine: + amount: 2 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 25 + memory: 4 + cpus: 2 + nets: # nets common to all the spine nodes + - "ocpbm" + spineleafnets: + - # spine0 + - "l00-s0" + - "l01-s0" + - "l10-s0" + - "l11-s0" + - "l20-s0" + - "l21-s0" + - "s0-rtr" + - # spine1 + - "l00-s1" + - "l01-s1" + - "l10-s1" + - "l11-s1" + - "l20-s1" + - "l21-s1" + - "s1-rtr" + leaf: + amount: 6 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 25 + memory: 4 + cpus: 2 + nets: # nets common to all the leaf nodes + - "ocpbm" + spineleafnets: + - # rack0 - leaf00 + - "l00-s0" + - "l00-s1" + - "l00-node0" + - "l00-node1" + - "l00-node2" + - "l00-ocp0" + - "l00-ocp1" + - "l00-ocp2" + - "l00-ocp3" + - # rack0 - leaf01 + - "l01-s0" + - "l01-s1" + - "l01-node0" + - "l01-node1" + - "l01-node2" + - "l01-ocp0" + - "l01-ocp1" + - "l01-ocp2" + - "l01-ocp3" + - # rack1 - leaf10 + - "l10-s0" + - "l10-s1" + - "l10-node0" + - "l10-node1" + - "l10-node2" + - "l10-ocp0" + - "l10-ocp1" + - "l10-ocp2" + - "l10-ocp3" + - # rack1 - leaf11 + - "l11-s0" + - "l11-s1" + - "l11-node0" + - "l11-node1" + - "l11-node2" + - "l11-ocp0" + - "l11-ocp1" + - "l11-ocp2" + - "l11-ocp3" + - # rack2 - leaf20 + - "l20-s0" + - "l20-s1" + - "l20-node0" + - "l20-node1" + - "l20-node2" + - "l20-ocp0" + - "l20-ocp1" + - "l20-ocp2" + - "l20-ocp3" + - # rack2 - leaf21 + - "l21-s0" + - "l21-s1" + - "l21-node0" + - "l21-node1" + - "l21-node2" + - "l21-ocp0" + - "l21-ocp1" + - "l21-ocp2" + - "l21-ocp3" + +## devscript support for OCP deploy +cifmw_devscripts_config_overrides: + fips_mode: "{{ cifmw_fips_enabled | default(false) | bool }}" + cluster_subnet_v4: "192.172.0.0/16" + network_config_folder: "/home/zuul/netconf" + +# Required for egress traffic from pods to the osp_trunk network +cifmw_devscripts_enable_ocp_nodes_host_routing: true + +# Automation section. Most of those parameters will be passed to the +# controller-0 as-is and be consumed by the `deploy-va.sh` script. +# Please note, all paths are on the controller-0, meaning managed by the +# Framework. Please do not edit them! +_arch_repo: "/home/zuul/src/github.com/openstack-k8s-operators/architecture" +cifmw_architecture_scenario: bgp-l3-xl +cifmw_kustomize_deploy_architecture_examples_path: "examples/dt/" +cifmw_arch_automation_file: "bgp-l3-xl.yaml" +cifmw_architecture_automation_file: >- + {{ + (_arch_repo, + 'automation/vars', + cifmw_arch_automation_file) | + path_join + }} + +cifmw_kustomize_deploy_metallb_source_files: >- + {{ + (_arch_repo, + 'examples/dt/bgp-l3-xl/metallb') | + path_join + }} + +# bgp_spines_leaves_playbook: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/ci-framework']. +# src_dir }}/playbooks/bgp/prepare-bgp-spines-leaves.yaml" +# bgp_computes_playbook: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/ci-framework']. +# src_dir }}/playbooks/bgp/prepare-bgp-computes.yaml" + + +pre_deploy: + - name: BGP spines and leaves configuration + type: playbook + source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/bgp/prepare-bgp-spines-leaves.yaml" + extra_vars: + num_racks: "{{ num_racks }}" + router_bool: true + edpm_nodes_per_rack: 3 + ocp_nodes_per_rack: 4 + router_uplink_ip: 100.64.10.1 + +# post_deploy: +# - name: BGP computes configuration +# type: playbook +# source: "{{ bgp_computes_playbook }}" +# extra_vars: +# #networkers_bool: true +# networkers_bool: false + +cifmw_libvirt_manager_default_gw_nets: + - ocpbm + - r0_tr + - r1_tr + - r2_tr +cifmw_networking_mapper_interfaces_info_translations: + osp_trunk: + - controlplane + - ctlplane + r0_tr: + - ctlplaner0 + r1_tr: + - ctlplaner1 + r2_tr: + - ctlplaner2 + + +cifmw_networking_definition: + networks: + ctlplane: + network: "192.168.125.0/24" + gateway: "192.168.125.1" + dns: + - "192.168.122.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 120 + - start: 150 + end: 200 + + ctlplaner0: + network: "192.168.122.0/24" + gateway: "192.168.122.1" + dns: + - "192.168.122.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 130 + - start: 150 + end: 200 + + ctlplaner1: + network: "192.168.123.0/24" + gateway: "192.168.123.1" + dns: + - "192.168.123.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 130 + - start: 150 + end: 170 + metallb: + ranges: + - start: 80 + end: 90 + ctlplaner2: + network: "192.168.124.0/24" + gateway: "192.168.124.1" + dns: + - "192.168.124.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 130 + - start: 150 + end: 170 + metallb: + ranges: + - start: 80 + end: 90 + + internalapi: + network: "172.17.0.0/24" + vlan: 20 + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + + storage: + network: "172.18.0.0/24" + vlan: 21 + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + + tenant: + network: "172.19.0.0/24" + vlan: 22 + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + + octavia: + vlan: 23 + mtu: 1500 + network: "172.23.0.0/24" + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 250 + + # Not really used, but required by architecture + # https://github.com/openstack-k8s-operators/architecture/blob/main/lib/networking/netconfig/kustomization.yaml#L28-L36 + external: + network: "192.168.32.0/20" + vlan: 99 + mtu: 1500 + tools: + netconfig: + ranges: + - start: 130 + end: 250 + + group-templates: + r0-computes: + network-template: + range: + start: 100 + length: 5 + networks: + ctlplaner0: {} + internalapi: + trunk-parent: ctlplaner0 + tenant: + trunk-parent: ctlplaner0 + storage: + trunk-parent: ctlplaner0 + r1-computes: + network-template: + range: + start: 110 + length: 5 + networks: + ctlplaner1: {} + internalapi: + trunk-parent: ctlplaner1 + tenant: + trunk-parent: ctlplaner1 + storage: + trunk-parent: ctlplaner1 + r2-computes: + network-template: + range: + start: 120 + length: 5 + networks: + ctlplaner2: {} + internalapi: + trunk-parent: ctlplaner2 + tenant: + trunk-parent: ctlplaner2 + storage: + trunk-parent: ctlplaner2 + r0-networkers: + network-template: + range: + start: 200 + length: 5 + networks: + ctlplaner0: {} + internalapi: + trunk-parent: ctlplaner0 + tenant: + trunk-parent: ctlplaner0 + storage: + trunk-parent: ctlplaner0 + r1-networkers: + network-template: + range: + start: 210 + length: 5 + networks: + ctlplaner1: {} + internalapi: + trunk-parent: ctlplaner1 + tenant: + trunk-parent: ctlplaner1 + storage: + trunk-parent: ctlplaner1 + r2-networkers: + network-template: + range: + start: 220 + length: 5 + networks: + ctlplaner2: {} + internalapi: + trunk-parent: ctlplaner2 + tenant: + trunk-parent: ctlplaner2 + storage: + trunk-parent: ctlplaner2 + ocps: + network-template: + range: + start: 10 + length: 10 + networks: {} + ocp_workers: + network-template: + range: + start: 20 + length: 10 + networks: {} + + instances: + controller-0: + networks: + ctlplane: + ip: "192.168.125.9" From e1d5ea931388c0f7dc6e536406bc16fec5904335 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Fri, 21 Mar 2025 11:36:59 +0530 Subject: [PATCH 045/480] Enable risky-file-permissions linter --- .ansible-lint | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ansible-lint b/.ansible-lint index eca5d61350..3234711638 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -41,10 +41,10 @@ enable_list: - no-log-password - no-same-owner - name[play] + - risky-file-permissions skip_list: - jinja[spacing] # We don't really want to get that one. Too picky - no-changed-when # once we get the oc module we can re-enable it - - risky-file-permissions # Seems to fail on 0644 on files ?! - schema[meta] # Apparently "CentOS 9" isn't known... ?! - schema[vars] # weird issue with some "vars" in playbooks - yaml[line-length] # We have long lines, yes. From 74bcbde3da8ee7fb8d41b5661093db7cc36e3b8d Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Fri, 21 Mar 2025 11:38:15 +0530 Subject: [PATCH 046/480] Add mode for files and dirs The aim is to enable risky-file-permissions linter. --- deploy-osp-adoption.yml | 2 ++ docs/source/files/bootstrap-hypervisor.yml | 2 +- hooks/playbooks/adoption_ironic_post_oc.yml | 5 +++++ hooks/playbooks/barbican-enable-luna.yml | 1 + hooks/playbooks/control_plane_ceph_backends.yml | 1 + hooks/playbooks/control_plane_hci_pre_deploy.yml | 1 + hooks/playbooks/control_plane_horizon.yml | 1 + hooks/playbooks/control_plane_ironic.yml | 1 + hooks/playbooks/federation-controlplane-config.yml | 1 + hooks/playbooks/fetch_compute_facts.yml | 4 ++++ hooks/playbooks/ironic_enroll_nodes.yml | 1 + hooks/playbooks/kustomize_cr.yml | 3 +++ hooks/playbooks/kuttl_openstack_prep.yml | 1 + hooks/playbooks/link2file.yml | 1 + playbooks/dcn.yml | 1 + playbooks/nfs.yml | 3 +++ playbooks/unique-id.yml | 1 + playbooks/update.yml | 3 +++ roles/adoption_osp_deploy/tasks/deploy_overcloud.yml | 1 + roles/adoption_osp_deploy/tasks/prepare_undercloud.yml | 2 ++ roles/artifacts/tasks/ansible_logs.yml | 1 + roles/artifacts/tasks/main.yml | 1 + roles/build_openstack_packages/tasks/create_repo.yml | 2 ++ roles/build_openstack_packages/tasks/downstream.yml | 2 ++ roles/build_openstack_packages/tasks/install_dlrn.yml | 2 ++ roles/cert_manager/tasks/olm_manifest.yml | 1 + roles/ci_dcn_site/tasks/ceph.yml | 1 + roles/ci_dcn_site/tasks/scaledown_site.yml | 2 ++ roles/ci_local_storage/tasks/main.yml | 1 + roles/ci_lvms_storage/tasks/main.yml | 1 + roles/ci_multus/molecule/resources/clean.yml | 1 + roles/ci_multus/tasks/main.yml | 2 ++ roles/ci_network/tasks/main.yml | 1 + roles/ci_nmstate/tasks/nmstate_k8s_install.yml | 1 + .../tasks/nmstate_unmanaged_provision_node.yml | 1 + roles/cifmw_cephadm/tasks/dashboard/validation.yml | 2 ++ roles/cifmw_external_dns/tasks/requirements.yml | 1 + roles/compliance/tasks/create_scap_report.yml | 1 + roles/copy_container/molecule/default/converge.yml | 1 + roles/copy_container/tasks/main.yml | 1 + .../molecule/check_cluster_status/tasks/test.yml | 1 + roles/devscripts/tasks/139_configs.yml | 1 + roles/devscripts/tasks/main.yml | 1 + roles/dlrn_promote/tasks/get_hash_from_commit.yaml | 1 + roles/edpm_build_images/tasks/main.yml | 1 + roles/edpm_kustomize/tasks/kustomize.yml | 1 + roles/edpm_kustomize/tasks/main.yml | 1 + roles/env_op_images/tasks/main.yml | 1 + roles/federation/tasks/run_keycloak_setup.yml | 4 ++++ roles/federation/tasks/run_openstack_auth_test.yml | 2 ++ roles/federation/tasks/run_openstack_setup.yml | 2 ++ roles/hive/tasks/main.yml | 1 + roles/install_ca/tasks/main.yml | 1 + roles/install_yamls/tasks/main.yml | 2 ++ roles/kustomize_deploy/tasks/install_operators.yml | 1 + .../molecule/generate_network_data/tasks/test.yml | 10 +++++++--- roles/libvirt_manager/tasks/clean_layout.yml | 2 ++ roles/libvirt_manager/tasks/deploy_layout.yml | 3 +++ .../libvirt_manager/tasks/generate_networking_data.yml | 1 + roles/libvirt_manager/tasks/get_image.yml | 1 + roles/mirror_registry/tasks/main.yml | 1 + roles/nat64_appliance/molecule/default/converge.yml | 6 ++++++ roles/networking_mapper/tasks/_gather_facts.yml | 1 + roles/openshift_login/tasks/main.yml | 3 ++- roles/pkg_build/tasks/main.yml | 3 +++ roles/reproducer/tasks/generate_bm_info.yml | 1 + roles/update/tasks/reboot_hypervisor_using_cr.yml | 1 + 67 files changed, 111 insertions(+), 5 deletions(-) diff --git a/deploy-osp-adoption.yml b/deploy-osp-adoption.yml index 45b6aae89a..1dee12e853 100644 --- a/deploy-osp-adoption.yml +++ b/deploy-osp-adoption.yml @@ -85,6 +85,7 @@ ansible.builtin.file: path: "{{ cifmw_basedir }}/artifacts/parameters" state: "directory" + mode: "0755" - name: Save variables for use with hooks vars: @@ -96,6 +97,7 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/parameters/adoption_osp.yml" content: "{{ _content | to_nice_yaml }}" + mode: "0644" - name: Set inventory_file for localhost to use with hooks ansible.builtin.set_fact: inventory_file: "{{ hostvars[_target_host]['inventory_file'] }}" diff --git a/docs/source/files/bootstrap-hypervisor.yml b/docs/source/files/bootstrap-hypervisor.yml index 337c9eea2a..96cc0bb90b 100644 --- a/docs/source/files/bootstrap-hypervisor.yml +++ b/docs/source/files/bootstrap-hypervisor.yml @@ -56,7 +56,7 @@ dest: "/etc/sudoers.d/{{ _user }}" owner: root group: root - mode: 0640 + mode: "0640" - name: Install basic packages become: true diff --git a/hooks/playbooks/adoption_ironic_post_oc.yml b/hooks/playbooks/adoption_ironic_post_oc.yml index 198ee8fd51..a97d4164a9 100644 --- a/hooks/playbooks/adoption_ironic_post_oc.yml +++ b/hooks/playbooks/adoption_ironic_post_oc.yml @@ -55,6 +55,7 @@ ansible.builtin.file: state: directory path: "{{ ansible_user_dir }}/ironic-python-agent" + mode: "0755" loop: - osp-undercloud-0 - osp-controller-0 @@ -82,6 +83,7 @@ src: "{{ ansible_user_dir }}/ironic-python-agent/ironic-python-agent.kernel" dest: /var/lib/ironic/httpboot/agent.kernel remote_src: true + mode: "0644" loop: - osp-controller-0 - osp-controller-1 @@ -93,6 +95,7 @@ src: "{{ ansible_user_dir }}/ironic-python-agent/ironic-python-agent.initramfs" dest: /var/lib/ironic/httpboot/agent.ramdisk remote_src: true + mode: "0644" loop: - osp-controller-0 - osp-controller-1 @@ -166,11 +169,13 @@ ansible.builtin.file: state: directory path: "{{ ansible_user_dir }}/ci-framework-data/parameters" + mode: "0755" - name: Write ironic_nodes.yaml on osp-unercloud-o ansible.builtin.copy: content: "{{ _ironic_nodes_slurp.content | b64decode }}" dest: "{{ ansible_user_dir }}/ci-framework-data/parameters/ironic_nodes.yaml" + mode: "0644" - name: Run baremetal create command to enroll the nodes in the Ironic service environment: diff --git a/hooks/playbooks/barbican-enable-luna.yml b/hooks/playbooks/barbican-enable-luna.yml index c3a6a2b8f5..d319e25c52 100644 --- a/hooks/playbooks/barbican-enable-luna.yml +++ b/hooks/playbooks/barbican-enable-luna.yml @@ -46,6 +46,7 @@ login_secret: "{{ cifmw_hsm_login_secret | default('barbican-luna-login', true) }}" ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/93-barbican-luna.yaml" + mode: "0644" content: |- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization diff --git a/hooks/playbooks/control_plane_ceph_backends.yml b/hooks/playbooks/control_plane_ceph_backends.yml index 49324a05c2..9d04193788 100644 --- a/hooks/playbooks/control_plane_ceph_backends.yml +++ b/hooks/playbooks/control_plane_ceph_backends.yml @@ -25,3 +25,4 @@ ansible.builtin.template: dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/90-ceph-backends-kustomization.yaml" src: "config_ceph_backends.yaml.j2" + mode: "0644" diff --git a/hooks/playbooks/control_plane_hci_pre_deploy.yml b/hooks/playbooks/control_plane_hci_pre_deploy.yml index 97d04349e3..7bf373686e 100644 --- a/hooks/playbooks/control_plane_hci_pre_deploy.yml +++ b/hooks/playbooks/control_plane_hci_pre_deploy.yml @@ -32,3 +32,4 @@ - op: add path: /spec/swift/enabled value: {{ cifmw_services_swift_enabled | default('false') }} + mode: "0644" diff --git a/hooks/playbooks/control_plane_horizon.yml b/hooks/playbooks/control_plane_horizon.yml index 010e1eace7..852298c741 100644 --- a/hooks/playbooks/control_plane_horizon.yml +++ b/hooks/playbooks/control_plane_horizon.yml @@ -26,3 +26,4 @@ - op: add path: /spec/horizon/template/memcachedInstance value: memcached + mode: "0644" diff --git a/hooks/playbooks/control_plane_ironic.yml b/hooks/playbooks/control_plane_ironic.yml index 7f278107d2..b0faee9fb1 100644 --- a/hooks/playbooks/control_plane_ironic.yml +++ b/hooks/playbooks/control_plane_ironic.yml @@ -24,3 +24,4 @@ - op: add path: /spec/ironic/enabled value: {{ cifmw_services_ironic_enabled | default('false') }} + mode: "0644" diff --git a/hooks/playbooks/federation-controlplane-config.yml b/hooks/playbooks/federation-controlplane-config.yml index bd9b9b76f9..845d3958de 100644 --- a/hooks/playbooks/federation-controlplane-config.yml +++ b/hooks/playbooks/federation-controlplane-config.yml @@ -37,6 +37,7 @@ remote_id_attribute=HTTP_OIDC_ISS [auth] methods = password,token,oauth1,mapped,application_credential,openid + mode: "0644" - name: Get ingress operator CA cert ansible.builtin.slurp: diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index d05787e07c..1089cc8e9b 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -17,6 +17,7 @@ ansible.builtin.copy: dest: "/etc/yum.repos.d/" src: "{{ cifmw_basedir }}/artifacts/repositories/" + mode: "0644" - name: Build dataset hook hosts: localhost @@ -106,6 +107,7 @@ "values": [] } ] + mode: "0644" - name: Prepare EDPM deploy related facts and keys when: @@ -135,6 +137,7 @@ vars: dns_servers: "{{ ((['192.168.122.10'] + ansible_facts['dns']['nameservers']) | unique)[0:2] }}" ansible.builtin.copy: + mode: "0644" dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/dataplane/99-kustomization.yaml" content: |- apiVersion: kustomize.config.k8s.io/v1beta1 @@ -277,3 +280,4 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/{{ step }}_{{ hook_name }}.yml" content: "{{ file_content | to_nice_yaml }}" + mode: "0644" diff --git a/hooks/playbooks/ironic_enroll_nodes.yml b/hooks/playbooks/ironic_enroll_nodes.yml index b27f333a6f..e4edb57799 100644 --- a/hooks/playbooks/ironic_enroll_nodes.yml +++ b/hooks/playbooks/ironic_enroll_nodes.yml @@ -61,6 +61,7 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/parameters/ironic_nodes.yaml" content: "{{ ironic_nodes | to_yaml }}" + mode: "0644" - name: Enroll ironic nodes ansible.builtin.shell: | diff --git a/hooks/playbooks/kustomize_cr.yml b/hooks/playbooks/kustomize_cr.yml index 4ee5ad7eac..752b71d5ce 100644 --- a/hooks/playbooks/kustomize_cr.yml +++ b/hooks/playbooks/kustomize_cr.yml @@ -27,6 +27,7 @@ ansible.builtin.copy: src: "{{ cifmw_kustomize_cr_file_path }}/{{ cifmw_kustomize_cr_file_name }}" dest: "{{ cifmw_kustomize_cr_artifact_dir }}/{{ cifmw_kustomize_cr_file_name }}" + mode: "0644" remote_src: true - name: Generate kustomization file @@ -34,6 +35,7 @@ ansible.builtin.template: src: "{{ playbook_dir }}/{{ cifmw_kustomize_cr_template }}" dest: "{{ cifmw_kustomize_cr_artifact_dir }}/kustomization.yaml" + mode: "0644" - name: Run oc kustomize environment: @@ -47,3 +49,4 @@ ansible.builtin.copy: dest: "{{ cifmw_kustomize_cr_artifact_dir }}/kustomized_{{ cifmw_kustomize_cr_file_name }}" content: "{{ kustomized_cr.stdout }}" + mode: "0644" diff --git a/hooks/playbooks/kuttl_openstack_prep.yml b/hooks/playbooks/kuttl_openstack_prep.yml index 5d8563f765..4488225ace 100644 --- a/hooks/playbooks/kuttl_openstack_prep.yml +++ b/hooks/playbooks/kuttl_openstack_prep.yml @@ -42,3 +42,4 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/parameters/{{ step }}_{{ hook_name }}.yml" content: "{{ file_content | to_nice_yaml }}" + mode: "0644" diff --git a/hooks/playbooks/link2file.yml b/hooks/playbooks/link2file.yml index 97142dcbae..0e613c47d5 100644 --- a/hooks/playbooks/link2file.yml +++ b/hooks/playbooks/link2file.yml @@ -58,6 +58,7 @@ ansible.builtin.copy: src: "{{ item.stat.lnk_source }}" dest: "{{ _file_path }}" + mode: "0644" loop: "{{ _file_info.results }}" loop_control: label: "{{ item.item }}" diff --git a/playbooks/dcn.yml b/playbooks/dcn.yml index ff700f4a3d..bf5a956f96 100644 --- a/playbooks/dcn.yml +++ b/playbooks/dcn.yml @@ -106,5 +106,6 @@ ansible.builtin.copy: src: "{{ item.path }}" dest: "/home/zuul/ci-framework-data/artifacts/manifests/openstack/cr" + mode: "0644" loop: "{{ dcn_crs.files }}" when: dcn_crs.matched > 0 diff --git a/playbooks/nfs.yml b/playbooks/nfs.yml index 9dd4ee4065..5d20b62b6a 100644 --- a/playbooks/nfs.yml +++ b/playbooks/nfs.yml @@ -48,6 +48,7 @@ option: vers3 value: n backup: true + mode: "0644" - name: Disable NFSv3-related services ansible.builtin.systemd_service: @@ -89,6 +90,7 @@ 'cifmw_nfs_network_range': cifmw_nfs_network_out.stdout | from_json | json_query('cidr') } | to_nice_yaml }} + mode: "0644" # NOTE: This represents a workaround because there's an edpm-nftables role # in edpm-ansible already. That role should contain the implementation @@ -125,6 +127,7 @@ option: host value: "{{ cifmw_nfs_network_out.stdout | from_json | json_query('address') }}" backup: true + mode: "0644" - name: Enable and restart nfs-server service ansible.builtin.systemd: diff --git a/playbooks/unique-id.yml b/playbooks/unique-id.yml index c3f1c7d390..9b9709534c 100644 --- a/playbooks/unique-id.yml +++ b/playbooks/unique-id.yml @@ -38,6 +38,7 @@ ansible.builtin.copy: dest: "{{ _unique_id_file }}" content: "{{ cifmw_run_id | default(_unique_id) | lower }}" + mode: "0644" # Since the user might pass their own run ID, we can just consume it. # If, for a subsequent run, the user doesn't pass the run ID, we will diff --git a/playbooks/update.yml b/playbooks/update.yml index 5a35158e04..2a6895f76c 100644 --- a/playbooks/update.yml +++ b/playbooks/update.yml @@ -24,6 +24,7 @@ remote_src: true src: "{{ cifmw_basedir }}/artifacts/repositories/" dest: "{{ cifmw_basedir }}/artifacts/before_update_repos/" + mode: "0644" - name: Run repo_setup ansible.builtin.include_role: @@ -48,6 +49,7 @@ ansible.builtin.copy: dest: "/etc/yum.repos.d/" src: "{{ cifmw_basedir }}/artifacts/repositories/" + mode: "0644" - name: Run Ceph update if part of the deployment hosts: "{{ (groups[cifmw_ceph_target | default('computes')] | default([]))[:1] }}" @@ -73,6 +75,7 @@ ansible.builtin.copy: content: "{{ cephconf['content'] | b64decode }}" dest: "/tmp/ceph.conf" + mode: "0644" - name: Extract the CephFSID from ceph.conf ansible.builtin.set_fact: diff --git a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml index 3bc1e7558d..1ee13ba8b1 100644 --- a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml @@ -52,6 +52,7 @@ ansible.builtin.copy: src: "{{ _roles_file }}" dest: "{{ _roles_file_dest }}" + mode: "0644" - name: Run overcloud deploy delegate_to: "osp-undercloud-0" diff --git a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml index aa0c1cdbd7..74b411aa08 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml @@ -66,6 +66,7 @@ ansible.builtin.copy: src: "{{ _container_prapare_path }}" dest: "{{ ansible_user_dir }}/containers-prepare-parameters.yaml" + mode: "0644" when: cifmw_adoption_osp_deploy_scenario.container_prepare_params is defined # Adoption requires Ceph 7 (Reef) as a requirement. Instead of performing a Ceph @@ -239,4 +240,5 @@ option: "{{ item.option }}" value: "{{ item.value }}" state: "present" + mode: "0644" loop: "{{ _undercloud_conf.config }}" diff --git a/roles/artifacts/tasks/ansible_logs.yml b/roles/artifacts/tasks/ansible_logs.yml index 169a550f6e..1355019bb8 100644 --- a/roles/artifacts/tasks/ansible_logs.yml +++ b/roles/artifacts/tasks/ansible_logs.yml @@ -10,4 +10,5 @@ src: "{{ item.path }}" dest: "{{ cifmw_artifacts_basedir }}/logs/" remote_src: true + mode: "0644" loop: "{{ files_to_copy.files }}" diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index a0ecb5cc50..7519b41063 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -33,6 +33,7 @@ ansible.builtin.file: path: "{{ cifmw_artifacts_basedir }}/{{ item }}" state: directory + mode: "0755" loop: - artifacts - logs diff --git a/roles/build_openstack_packages/tasks/create_repo.yml b/roles/build_openstack_packages/tasks/create_repo.yml index 016fc3586d..0dc056c8f1 100644 --- a/roles/build_openstack_packages/tasks/create_repo.yml +++ b/roles/build_openstack_packages/tasks/create_repo.yml @@ -39,6 +39,7 @@ remote_src: true src: "{{ _repodir.path }}/" dest: "{{ cifmw_bop_gating_repo_dest }}" + mode: "0644" - name: Add gating.repo file to install the required built packages ansible.builtin.copy: @@ -50,6 +51,7 @@ gpgcheck=0 priority=1 dest: "{{ cifmw_bop_gating_repo_dest }}/gating.repo" + mode: "0644" - name: Serve gating repo ansible.builtin.import_tasks: serve_gating_repo.yml diff --git a/roles/build_openstack_packages/tasks/downstream.yml b/roles/build_openstack_packages/tasks/downstream.yml index 260a0b5ef0..751126be43 100644 --- a/roles/build_openstack_packages/tasks/downstream.yml +++ b/roles/build_openstack_packages/tasks/downstream.yml @@ -26,12 +26,14 @@ remote_src: true src: "{{ ansible_user_dir }}/{{ cifmw_bop_initial_dlrn_config }}.cfg" dest: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}.cfg" + mode: "0644" - name: Copy patch_rebaser.ini to patch_rebaser repo ansible.builtin.copy: remote_src: true src: "{{ ansible_user_dir }}/patch_rebaser.ini" dest: "{{ cifmw_bop_build_repo_dir }}/patch_rebaser/patch_rebaser/patch_rebaser.ini" + mode: "0644" - name: Copy Downstream scripts to DLRN repo ansible.builtin.copy: diff --git a/roles/build_openstack_packages/tasks/install_dlrn.yml b/roles/build_openstack_packages/tasks/install_dlrn.yml index 1a08a1729a..11cd72ed26 100644 --- a/roles/build_openstack_packages/tasks/install_dlrn.yml +++ b/roles/build_openstack_packages/tasks/install_dlrn.yml @@ -126,6 +126,7 @@ ansible.builtin.template: src: projects.ini.j2 dest: '{{ cifmw_bop_build_repo_dir }}/DLRN/projects.ini' + mode: "0644" - name: Copy the DLRN scripts in the virtualenv to the scripts dir ansible.posix.synchronize: @@ -159,6 +160,7 @@ remote_src: true src: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}.cfg" dest: "{{ cifmw_bop_build_repo_dir }}/DLRN/scripts/{{ cifmw_bop_initial_dlrn_config }}-local.cfg" + mode: "0644" - name: Remove last """ from local mock config # noqa: command-instead-of-module ansible.builtin.command: diff --git a/roles/cert_manager/tasks/olm_manifest.yml b/roles/cert_manager/tasks/olm_manifest.yml index 90ba2331ca..48e8ad2645 100644 --- a/roles/cert_manager/tasks/olm_manifest.yml +++ b/roles/cert_manager/tasks/olm_manifest.yml @@ -3,6 +3,7 @@ ansible.builtin.copy: dest: "{{ cifmw_cert_manager_manifests_dir }}/cert-manager-{{ item.kind | lower }}-olm.yaml" content: "{{ item | to_nice_yaml }}" + mode: "0644" loop: - "{{ cifmw_cert_manager_olm_operator_group }}" - "{{ cifmw_cert_manager_olm_subscription }}" diff --git a/roles/ci_dcn_site/tasks/ceph.yml b/roles/ci_dcn_site/tasks/ceph.yml index c27815d741..b44d837a5c 100644 --- a/roles/ci_dcn_site/tasks/ceph.yml +++ b/roles/ci_dcn_site/tasks/ceph.yml @@ -36,6 +36,7 @@ create: true backup: true insertbefore: EOF + mode: "0644" - name: Ensure Ceph bootstrap host can ping itself register: _cmd_result diff --git a/roles/ci_dcn_site/tasks/scaledown_site.yml b/roles/ci_dcn_site/tasks/scaledown_site.yml index 407b9a188d..23ba5da09b 100644 --- a/roles/ci_dcn_site/tasks/scaledown_site.yml +++ b/roles/ci_dcn_site/tasks/scaledown_site.yml @@ -200,11 +200,13 @@ ansible.builtin.file: path: "/tmp/ceph_conf_files" state: directory + mode: "0750" - name: Save secret data to files ansible.builtin.copy: content: "{{ secret_info.resources[0].data[key] | b64decode | regex_replace('(?m)^\\s*\\n', '') }}" dest: "/tmp/ceph_conf_files/{{ key }}" + mode: "0640" loop: "{{ secret_info.resources[0].data.keys() }}" loop_control: loop_var: key diff --git a/roles/ci_local_storage/tasks/main.yml b/roles/ci_local_storage/tasks/main.yml index 6daf1e0061..169f581619 100644 --- a/roles/ci_local_storage/tasks/main.yml +++ b/roles/ci_local_storage/tasks/main.yml @@ -33,6 +33,7 @@ ansible.builtin.copy: dest: "{{ cifmw_cls_manifests_dir }}/storage-class.yaml" content: "{{ cifmw_cls_storage_manifest | to_nice_yaml }}" + mode: "0644" - name: Get k8s nodes ansible.builtin.import_tasks: fetch_names.yml diff --git a/roles/ci_lvms_storage/tasks/main.yml b/roles/ci_lvms_storage/tasks/main.yml index 362a200d6f..e3699aba4a 100644 --- a/roles/ci_lvms_storage/tasks/main.yml +++ b/roles/ci_lvms_storage/tasks/main.yml @@ -26,6 +26,7 @@ ansible.builtin.file: path: "{{ cifmw_lvms_manifests_dir }}" state: directory + mode: "0755" - name: Put the manifest files in place ansible.builtin.template: diff --git a/roles/ci_multus/molecule/resources/clean.yml b/roles/ci_multus/molecule/resources/clean.yml index 2f9abfbd4b..e88c90ee19 100644 --- a/roles/ci_multus/molecule/resources/clean.yml +++ b/roles/ci_multus/molecule/resources/clean.yml @@ -23,6 +23,7 @@ src: "{{ cifmw_ci_multus_manifests_dir }}" dest: "{{ cifmw_ci_multus_manifests_dir }}.backup" remote_src: true + mode: "0755" - name: Call cleanup ansible.builtin.import_role: diff --git a/roles/ci_multus/tasks/main.yml b/roles/ci_multus/tasks/main.yml index 5edcdfb30f..84d8a8c572 100644 --- a/roles/ci_multus/tasks/main.yml +++ b/roles/ci_multus/tasks/main.yml @@ -18,6 +18,7 @@ ansible.builtin.file: path: "{{ cifmw_ci_multus_manifests_dir }}" state: directory + mode: "0755" - name: Build list of networks from cifmw_networking_env_definition block: @@ -117,6 +118,7 @@ ansible.builtin.template: src: "nad.yml.j2" dest: "{{ cifmw_ci_multus_manifests_dir }}/ci_multus_nads.yml" + mode: "0644" - name: Create resources in OCP when: not cifmw_ci_multus_dryrun diff --git a/roles/ci_network/tasks/main.yml b/roles/ci_network/tasks/main.yml index 179d4b8b47..27d1be8494 100644 --- a/roles/ci_network/tasks/main.yml +++ b/roles/ci_network/tasks/main.yml @@ -42,6 +42,7 @@ section: "{{ nm_conf.section }}" option: "{{ nm_conf.option }}" value: "{{ nm_conf.value }}" + mode: "0644" loop: "{{ cifmw_network_nm_config }}" loop_control: loop_var: nm_conf diff --git a/roles/ci_nmstate/tasks/nmstate_k8s_install.yml b/roles/ci_nmstate/tasks/nmstate_k8s_install.yml index ed707c6763..8ce164cbb3 100644 --- a/roles/ci_nmstate/tasks/nmstate_k8s_install.yml +++ b/roles/ci_nmstate/tasks/nmstate_k8s_install.yml @@ -3,6 +3,7 @@ ansible.builtin.file: path: "{{ cifmw_ci_nmstate_manifests_dir }}" state: directory + mode: "0755" - name: Create the nmstate namespace kubernetes.core.k8s: diff --git a/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml b/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml index 470811463b..6cc009fad3 100644 --- a/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml +++ b/roles/ci_nmstate/tasks/nmstate_unmanaged_provision_node.yml @@ -25,6 +25,7 @@ ansible.builtin.file: path: "{{ cifmw_ci_nmstate_configs_dir }}" state: directory + mode: "0755" - name: "Save nmstate state for {{ cifmw_ci_nmstate_unmanaged_host }}" ansible.builtin.copy: diff --git a/roles/cifmw_cephadm/tasks/dashboard/validation.yml b/roles/cifmw_cephadm/tasks/dashboard/validation.yml index b8e6569b89..1559ba30a9 100644 --- a/roles/cifmw_cephadm/tasks/dashboard/validation.yml +++ b/roles/cifmw_cephadm/tasks/dashboard/validation.yml @@ -25,6 +25,7 @@ ansible.builtin.get_url: url: "{{ cifmw_cephadm_urischeme_dashboard | default('http') }}://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" dest: "/tmp/dash_response" + mode: "0644" validate_certs: false register: dashboard_response failed_when: dashboard_response.failed == true @@ -37,6 +38,7 @@ ansible.builtin.get_url: url: "{{ cifmw_cephadm_urischeme_dashboard | default('http') }}://{{ grafana_server_addr }}:{{ cifmw_cephadm_dashboard_port }}" dest: "/tmp/dash_http_response" + mode: "0644" validate_certs: false username: admin password: admin diff --git a/roles/cifmw_external_dns/tasks/requirements.yml b/roles/cifmw_external_dns/tasks/requirements.yml index af123b118d..21799f008a 100644 --- a/roles/cifmw_external_dns/tasks/requirements.yml +++ b/roles/cifmw_external_dns/tasks/requirements.yml @@ -56,6 +56,7 @@ ansible.builtin.file: path: "{{ cifmw_external_dns_manifests_dir }}" state: directory + mode: "0755" - name: Stat cifmw_external_dns_certificate on target hosts ansible.builtin.stat: diff --git a/roles/compliance/tasks/create_scap_report.yml b/roles/compliance/tasks/create_scap_report.yml index 5cb8a1e9eb..74bf07f531 100644 --- a/roles/compliance/tasks/create_scap_report.yml +++ b/roles/compliance/tasks/create_scap_report.yml @@ -31,6 +31,7 @@ ansible.builtin.copy: src: "{{ bzip_file.path }}" dest: "{{ base_name }}.xml.bz2" + mode: "0644" - name: Unzip the file ansible.builtin.command: "bunzip2 {{ base_name }}.xml.bz2" diff --git a/roles/copy_container/molecule/default/converge.yml b/roles/copy_container/molecule/default/converge.yml index c17b388b1d..a80508c0bd 100644 --- a/roles/copy_container/molecule/default/converge.yml +++ b/roles/copy_container/molecule/default/converge.yml @@ -43,6 +43,7 @@ ansible.builtin.copy: dest: "/tmp/copy-quay-config.yaml" content: "{{ _data }}" + mode: "0644" - name: Copy containers from RDO quay to local registry ansible.builtin.command: diff --git a/roles/copy_container/tasks/main.yml b/roles/copy_container/tasks/main.yml index 53947623e1..fb95f13034 100644 --- a/roles/copy_container/tasks/main.yml +++ b/roles/copy_container/tasks/main.yml @@ -42,6 +42,7 @@ ansible.builtin.copy: src: copy-quay/ dest: "{{ temporary_copy_container_dir.path }}" + mode: "0755" - name: Build the copy-container register: go_build diff --git a/roles/devscripts/molecule/check_cluster_status/tasks/test.yml b/roles/devscripts/molecule/check_cluster_status/tasks/test.yml index 99866dbe2d..b764da7f13 100644 --- a/roles/devscripts/molecule/check_cluster_status/tasks/test.yml +++ b/roles/devscripts/molecule/check_cluster_status/tasks/test.yml @@ -95,6 +95,7 @@ ansible.builtin.copy: dest: "/home/dev-scripts/.ocp_cert_not_after" content: "{{ _date }}" + mode: "0644" - name: Ensure freshly built config ansible.builtin.include_role: diff --git a/roles/devscripts/tasks/139_configs.yml b/roles/devscripts/tasks/139_configs.yml index a6e7aeba67..e899e7673b 100644 --- a/roles/devscripts/tasks/139_configs.yml +++ b/roles/devscripts/tasks/139_configs.yml @@ -38,3 +38,4 @@ src: templates/conf_ciuser.j2 dest: >- {{ cifmw_devscripts_repo_dir }}/config_{{ cifmw_devscripts_user }}.sh + mode: "0644" diff --git a/roles/devscripts/tasks/main.yml b/roles/devscripts/tasks/main.yml index 61407b0e68..6a87bf0237 100644 --- a/roles/devscripts/tasks/main.yml +++ b/roles/devscripts/tasks/main.yml @@ -64,6 +64,7 @@ dest: "{{ cifmw_devscripts_logs_dir }}/{{ item.path | basename }}" remote_src: true src: "{{ item.path }}" + mode: "0644" loop: "{{ _deploy_logs.files }}" loop_control: label: "{{ item.path }}" diff --git a/roles/dlrn_promote/tasks/get_hash_from_commit.yaml b/roles/dlrn_promote/tasks/get_hash_from_commit.yaml index 86ab6b4582..185fac9df1 100644 --- a/roles/dlrn_promote/tasks/get_hash_from_commit.yaml +++ b/roles/dlrn_promote/tasks/get_hash_from_commit.yaml @@ -3,6 +3,7 @@ ansible.builtin.get_url: url: "{{ commit_url }}/commit.yaml" dest: "{{ cifmw_dlrn_promote_workspace }}/commit.yaml" + mode: "0644" force: true register: result until: diff --git a/roles/edpm_build_images/tasks/main.yml b/roles/edpm_build_images/tasks/main.yml index 2c4f1d821b..02309ad893 100644 --- a/roles/edpm_build_images/tasks/main.yml +++ b/roles/edpm_build_images/tasks/main.yml @@ -31,6 +31,7 @@ url: "{{ cifmw_discovered_image_url }}" dest: "{{ cifmw_edpm_build_images_basedir }}" timeout: 20 + mode: "0644" register: result until: result is success retries: 60 diff --git a/roles/edpm_kustomize/tasks/kustomize.yml b/roles/edpm_kustomize/tasks/kustomize.yml index da1fda1060..5c5ecd2fd8 100644 --- a/roles/edpm_kustomize/tasks/kustomize.yml +++ b/roles/edpm_kustomize/tasks/kustomize.yml @@ -33,6 +33,7 @@ } ) | to_nice_yaml }} + mode: "0644" - name: Apply the already existing kustomization if present environment: diff --git a/roles/edpm_kustomize/tasks/main.yml b/roles/edpm_kustomize/tasks/main.yml index 0243bbe17d..1065381ed5 100644 --- a/roles/edpm_kustomize/tasks/main.yml +++ b/roles/edpm_kustomize/tasks/main.yml @@ -55,6 +55,7 @@ remote_src: true src: "{{ cifmw_edpm_kustomize_cr_path | dirname }}/kustomization.yaml" dest: "{{ cifmw_edpm_kustomize_cr_path | dirname }}/kustomization.initial.yaml" + mode: "0644" - name: Prepare and load the ci-framework kustomize template file vars: diff --git a/roles/env_op_images/tasks/main.yml b/roles/env_op_images/tasks/main.yml index 7822587d95..39a6e55d55 100644 --- a/roles/env_op_images/tasks/main.yml +++ b/roles/env_op_images/tasks/main.yml @@ -139,3 +139,4 @@ ansible.builtin.copy: dest: "{{ cifmw_env_op_images_dir }}/artifacts/{{ cifmw_env_op_images_file }}" content: "{{ _content | to_nice_yaml }}" + mode: "0644" diff --git a/roles/federation/tasks/run_keycloak_setup.yml b/roles/federation/tasks/run_keycloak_setup.yml index b8023bbee0..41cd8ef218 100644 --- a/roles/federation/tasks/run_keycloak_setup.yml +++ b/roles/federation/tasks/run_keycloak_setup.yml @@ -25,6 +25,7 @@ ansible.builtin.copy: src: "{{ [ ansible_user_dir, '.crc', 'machines', 'crc', 'kubeconfig' ] | path_join }}" dest: "{{ [ ansible_user_dir, '.kube', 'config' ] | path_join }}" + mode: "0640" when: cifmw_federation_deploy_type == "crc" - name: Create namespace @@ -38,6 +39,7 @@ ansible.builtin.template: src: rhsso-operator-olm.yaml.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'rhsso-operator-olm.yaml' ] | path_join }}" + mode: "0644" - name: Install federation rhsso operator environment: @@ -89,6 +91,7 @@ ansible.builtin.template: src: sso.yaml.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'sso.yaml' ] | path_join }}" + mode: "0644" - name: Install federation sso pod environment: @@ -130,3 +133,4 @@ ansible.builtin.copy: src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'tls.crt'] | path_join }}" dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" + mode: "0644" diff --git a/roles/federation/tasks/run_openstack_auth_test.yml b/roles/federation/tasks/run_openstack_auth_test.yml index ffbbda6e1e..f87b2d9a53 100644 --- a/roles/federation/tasks/run_openstack_auth_test.yml +++ b/roles/federation/tasks/run_openstack_auth_test.yml @@ -31,6 +31,7 @@ ansible.builtin.template: src: kctestuser1.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser1_username ] | path_join }}" + mode: "0644" - name: Copy federation test user1 cloudrc file into pod kubernetes.core.k8s_cp: @@ -43,6 +44,7 @@ ansible.builtin.copy: src: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" + mode: "0444" - name: Get ingress operator CA cert ansible.builtin.slurp: diff --git a/roles/federation/tasks/run_openstack_setup.yml b/roles/federation/tasks/run_openstack_setup.yml index 593177a24d..a4abd325c4 100644 --- a/roles/federation/tasks/run_openstack_setup.yml +++ b/roles/federation/tasks/run_openstack_setup.yml @@ -18,6 +18,7 @@ ansible.builtin.copy: src: /home/zuul/.crc/machines/crc/kubeconfig dest: /home/zuul/.kube/config + mode: "0640" when: cifmw_federation_deploy_type == "crc" - name: Run federation create domain @@ -37,6 +38,7 @@ ansible.builtin.template: src: rules.json.j2 dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_rules_file ] | path_join }}" + mode: "0644" - name: Copy federation rules json file into pod kubernetes.core.k8s_cp: diff --git a/roles/hive/tasks/main.yml b/roles/hive/tasks/main.yml index 457a649d82..867b18908d 100644 --- a/roles/hive/tasks/main.yml +++ b/roles/hive/tasks/main.yml @@ -30,6 +30,7 @@ ansible.builtin.file: path: "{{ cifmw_hive_artifacts_dir }}" state: directory + mode: "0755" - name: "Performing {{ cifmw_hive_platform }} {{cifmw_hive_action }}" # noqa: name[template] ansible.builtin.include_tasks: "{{ cifmw_hive_platform }}_{{ cifmw_hive_action }}.yml" diff --git a/roles/install_ca/tasks/main.yml b/roles/install_ca/tasks/main.yml index aac0b232a1..9c5c0cbab6 100644 --- a/roles/install_ca/tasks/main.yml +++ b/roles/install_ca/tasks/main.yml @@ -29,6 +29,7 @@ url: "{{ cifmw_install_ca_url }}" dest: "{{ cifmw_install_ca_trust_dir }}" validate_certs: "{{ cifmw_install_ca_url_validate_certs | default(omit) }}" + mode: "0644" - name: Install custom CA bundle from inline register: ca_inline diff --git a/roles/install_yamls/tasks/main.yml b/roles/install_yamls/tasks/main.yml index 0f3ed9536b..0a70460e67 100644 --- a/roles/install_yamls/tasks/main.yml +++ b/roles/install_yamls/tasks/main.yml @@ -120,6 +120,7 @@ {% for k,v in cifmw_install_yamls_environment.items() %} export {{ k }}={{ v }} {% endfor %} + mode: "0644" - name: Set install_yamls default values tags: @@ -166,6 +167,7 @@ 'cifmw_install_yamls_defaults': cifmw_install_yamls_defaults } | to_nice_yaml }} + mode: "0644" - name: Create empty cifmw_install_yamls_environment if needed tags: diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index 9d8e459e4a..37d9c6405c 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -51,6 +51,7 @@ 'values.yaml' ) | path_join }} + mode: "0644" - name: Generate the OLM kustomization file ansible.builtin.copy: diff --git a/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml b/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml index 3c3300e205..04f360b08f 100644 --- a/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml +++ b/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml @@ -151,6 +151,7 @@ remote_src: true src: "{{ cifmw_basedir }}/{{ item }}" dest: "{{ _dest }}/" + mode: "0755" loop: - artifacts - logs @@ -160,11 +161,14 @@ failed_when: false ansible.builtin.copy: remote_src: true - src: "{{ item }}" + src: "{{ item.src }}" dest: "{{ _dest }}/" + mode: "{{ item.mode }}" loop: - - /etc/cifmw-dnsmasq.conf - - /etc/cifmw-dnsmasq.d + - { src: "/etc/cifmw-dnsmasq.conf", mode: "0644" } + - { src: "/etc/cifmw-dnsmasq.d", mode: "0755" } + loop_control: + label: "{{ item.src }}" - name: Clean environment vars: diff --git a/roles/libvirt_manager/tasks/clean_layout.yml b/roles/libvirt_manager/tasks/clean_layout.yml index e56816b35e..d7467c2dd4 100644 --- a/roles/libvirt_manager/tasks/clean_layout.yml +++ b/roles/libvirt_manager/tasks/clean_layout.yml @@ -82,6 +82,7 @@ marker: "## {mark} {{ vm }} {{ inventory_hostname }}" state: absent create: true + mode: "0600" loop: "{{ cleanup_vms }}" # KEEP this for now to ensure smoother migration @@ -93,6 +94,7 @@ marker: "## {mark} {{ vm }}" state: absent create: true + mode: "0600" loop: "{{ cleanup_vms }}" - name: Get network list diff --git a/roles/libvirt_manager/tasks/deploy_layout.yml b/roles/libvirt_manager/tasks/deploy_layout.yml index fc590981e8..9705c4e116 100644 --- a/roles/libvirt_manager/tasks/deploy_layout.yml +++ b/roles/libvirt_manager/tasks/deploy_layout.yml @@ -95,6 +95,7 @@ ansible.builtin.template: dest: "{{ cifmw_libvirt_manager_basedir }}/reproducer-inventory/{{ item }}-group.yml" src: inventory.yml.j2 + mode: "0644" loop: "{{ _cifmw_libvirt_manager_layout.vms.keys() }}" loop_control: label: "{{ item }}" @@ -103,6 +104,7 @@ ansible.builtin.template: dest: "{{ cifmw_libvirt_manager_basedir }}/reproducer-inventory/all-group.yml" src: "all-inventory.yml.j2" + mode: "0644" - name: Ensure storage pool is present. when: @@ -316,6 +318,7 @@ dest: >- {{ cifmw_libvirt_manager_basedir }}/artifacts/virtual-nodes.yml content: "{{ content | to_nice_yaml }}" + mode: "0644" - name: Ensure we get proper access to CRC when: diff --git a/roles/libvirt_manager/tasks/generate_networking_data.yml b/roles/libvirt_manager/tasks/generate_networking_data.yml index 77fb0dc5ea..5d614d8ad2 100644 --- a/roles/libvirt_manager/tasks/generate_networking_data.yml +++ b/roles/libvirt_manager/tasks/generate_networking_data.yml @@ -79,6 +79,7 @@ ansible.builtin.copy: dest: "{{ _nic_info }}" content: "{{ cifmw_libvirt_manager_mac_map | to_nice_yaml }}" + mode: "0644" # END MAC pre-generation management # # START generate all IPs using networking_mapper role/module diff --git a/roles/libvirt_manager/tasks/get_image.yml b/roles/libvirt_manager/tasks/get_image.yml index d8eb33b05d..9b1f13f58f 100644 --- a/roles/libvirt_manager/tasks/get_image.yml +++ b/roles/libvirt_manager/tasks/get_image.yml @@ -25,6 +25,7 @@ ansible.builtin.get_url: url: "{{ image_data.image_url }}" dest: "{{ image_data.image_local_dir }}/{{ image_data.disk_file_name }}" + mode: "0644" checksum: >- {% if image_data.sha256_image_name -%} sha256:{{ image_data.sha256_image_name }} diff --git a/roles/mirror_registry/tasks/main.yml b/roles/mirror_registry/tasks/main.yml index 6f2ac78bde..2adceaaed9 100644 --- a/roles/mirror_registry/tasks/main.yml +++ b/roles/mirror_registry/tasks/main.yml @@ -28,6 +28,7 @@ owner: "{{ ansible_user_id }}" group: "{{ ansible_user_id }}" state: directory + mode: "0755" - name: Download mirror-registry tools ansible.builtin.unarchive: diff --git a/roles/nat64_appliance/molecule/default/converge.yml b/roles/nat64_appliance/molecule/default/converge.yml index 014a76bf83..c321d6d1c2 100644 --- a/roles/nat64_appliance/molecule/default/converge.yml +++ b/roles/nat64_appliance/molecule/default/converge.yml @@ -50,6 +50,7 @@ url: "{{ cifmw_discovered_image_url }}" dest: "{{ cifmw_basedir }}" timeout: 20 + mode: "0644" register: result until: result is success retries: 60 @@ -423,26 +424,31 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/test_node_info.log" content: "{{ _test_node_debug_info.stdout }}" + mode: "0644" - name: Write nat64-appliance info to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_node_info.log" content: "{{ _nat64_appliance_debug_info.stdout }}" + mode: "0644" - name: Write nat64-appliance journal to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_journal.log" content: "{{ _nat64_appliance_journal.stdout }}" + mode: "0644" - name: Write nat64-appliance DNS64 debug to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/nat64_appliance_dns64_debug.log" content: "{{ _nat64_appliance_dns64_debug.stdout }}" + mode: "0644" - name: Write hypervisor info to file ansible.builtin.copy: dest: "{{ cifmw_basedir }}/logs/hypervisor_info.log" content: "{{ _hypervisor_info.stdout }}" + mode: "0644" - name: Ping example.com (delegate to test-node) delegate_to: test-node diff --git a/roles/networking_mapper/tasks/_gather_facts.yml b/roles/networking_mapper/tasks/_gather_facts.yml index 25564e6058..d16438b336 100644 --- a/roles/networking_mapper/tasks/_gather_facts.yml +++ b/roles/networking_mapper/tasks/_gather_facts.yml @@ -77,3 +77,4 @@ items2dict | to_nice_yaml }} + mode: "0644" diff --git a/roles/openshift_login/tasks/main.yml b/roles/openshift_login/tasks/main.yml index 1c2cf634ef..f2a9f9d1a8 100644 --- a/roles/openshift_login/tasks/main.yml +++ b/roles/openshift_login/tasks/main.yml @@ -98,7 +98,7 @@ ansible.builtin.copy: dest: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/openshift-login-params.yml" content: "{{ cifmw_openshift_login_params_content | from_yaml | to_nice_yaml }}" - + mode: "0600" - name: Update the install-yamls-params with KUBECONFIG when: cifmw_install_yamls_environment is defined block: @@ -120,3 +120,4 @@ }, recursive=true) | to_nice_yaml }} dest: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/install-yamls-params.yml" + mode: "0600" diff --git a/roles/pkg_build/tasks/main.yml b/roles/pkg_build/tasks/main.yml index ba20c937fe..727c6e7f34 100644 --- a/roles/pkg_build/tasks/main.yml +++ b/roles/pkg_build/tasks/main.yml @@ -20,6 +20,7 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/{{ item }}" state: directory + mode: "0755" loop: - volumes/packages/gating_repo - artifacts @@ -35,6 +36,7 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/volumes/packages/{{ pkg.name }}" state: directory + mode: "0755" loop: "{{ cifmw_pkg_build_list }}" loop_control: loop_var: 'pkg' @@ -44,6 +46,7 @@ ansible.builtin.file: path: "{{ cifmw_pkg_build_basedir }}/logs/build_{{ pkg.name }}" state: directory + mode: "0755" loop: "{{ cifmw_pkg_build_list }}" loop_control: loop_var: 'pkg' diff --git a/roles/reproducer/tasks/generate_bm_info.yml b/roles/reproducer/tasks/generate_bm_info.yml index 7405442fc1..585758b5e7 100644 --- a/roles/reproducer/tasks/generate_bm_info.yml +++ b/roles/reproducer/tasks/generate_bm_info.yml @@ -151,3 +151,4 @@ ) %} {% endfor %} {{ {'nodes': _ironic_nodes } | to_nice_yaml(indent=2) }} + mode: "0644" diff --git a/roles/update/tasks/reboot_hypervisor_using_cr.yml b/roles/update/tasks/reboot_hypervisor_using_cr.yml index b091cdedd2..3d753930a6 100644 --- a/roles/update/tasks/reboot_hypervisor_using_cr.yml +++ b/roles/update/tasks/reboot_hypervisor_using_cr.yml @@ -23,6 +23,7 @@ ansible.builtin.copy: dest: "{{ cifmw_update_artifacts_basedir }}/{{ cifmw_reboot_dep_name }}.yaml" content: "{{ _content | to_nice_yaml }}" + mode: "0644" vars: _content: apiVersion: dataplane.openstack.org/v1beta1 From f82226e578bfad905d80eff2178faf34b0969abb Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Fri, 21 Mar 2025 11:38:15 +0530 Subject: [PATCH 047/480] Add mode for files and dirs The aim is to enable risky-file-permissions linter. --- hooks/playbooks/fetch_compute_facts.yml | 2 +- playbooks/update.yml | 4 ++-- roles/build_openstack_packages/tasks/create_repo.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index 1089cc8e9b..f48541bb62 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -17,7 +17,7 @@ ansible.builtin.copy: dest: "/etc/yum.repos.d/" src: "{{ cifmw_basedir }}/artifacts/repositories/" - mode: "0644" + mode: "0755" - name: Build dataset hook hosts: localhost diff --git a/playbooks/update.yml b/playbooks/update.yml index 2a6895f76c..11a9a824ec 100644 --- a/playbooks/update.yml +++ b/playbooks/update.yml @@ -24,7 +24,7 @@ remote_src: true src: "{{ cifmw_basedir }}/artifacts/repositories/" dest: "{{ cifmw_basedir }}/artifacts/before_update_repos/" - mode: "0644" + mode: "0755" - name: Run repo_setup ansible.builtin.include_role: @@ -49,7 +49,7 @@ ansible.builtin.copy: dest: "/etc/yum.repos.d/" src: "{{ cifmw_basedir }}/artifacts/repositories/" - mode: "0644" + mode: "0755" - name: Run Ceph update if part of the deployment hosts: "{{ (groups[cifmw_ceph_target | default('computes')] | default([]))[:1] }}" diff --git a/roles/build_openstack_packages/tasks/create_repo.yml b/roles/build_openstack_packages/tasks/create_repo.yml index 0dc056c8f1..74f0b0ca6d 100644 --- a/roles/build_openstack_packages/tasks/create_repo.yml +++ b/roles/build_openstack_packages/tasks/create_repo.yml @@ -39,7 +39,7 @@ remote_src: true src: "{{ _repodir.path }}/" dest: "{{ cifmw_bop_gating_repo_dest }}" - mode: "0644" + mode: "0755" - name: Add gating.repo file to install the required built packages ansible.builtin.copy: From 0f9f620cbb260f7907a74ae5e45127e71a17d272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Mon, 7 Apr 2025 15:15:06 +0200 Subject: [PATCH 048/480] adoption_osp_deploy: edpm_nodes_networker section Move _vm_groups['osp-networkers'] nodes into a separate edpm_nodes_networker variable. Also add if conditions to only include _vm_groups if length > 0 for both computes and networkers. --- .../templates/adoption_vars.yaml.j2 | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 index d0a581445f..49c7468036 100644 --- a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 +++ b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 @@ -3,17 +3,22 @@ source_mariadb_ip: {{ _controller_1_internalapi_ip }} source_ovndb_ip: {{ _controller_1_internalapi_ip }} edpm_node_hostname: {{ _compute_1_name }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }} edpm_node_ip: {{ _compute_1_ip }} + +{% if _vm_groups['osp-computes'] | default([]) | length > 0 %} edpm_computes: | {% for compute in _vm_groups['osp-computes'] %} {% set node_nets = cifmw_networking_env_definition.instances[compute] %} ["{{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplane.ip_v4 }}" {% endfor %} +{% endif %} + +{% if _vm_groups['osp-networkers'] | default([]) | length > 0 %} edpm_networkers: | {% for networker in _vm_groups['osp-networkers'] | default([]) %} {% set node_nets = cifmw_networking_env_definition.instances[networker] %} ["{{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplane.ip_v4 }}" {% endfor %} - +{% endif %} source_galera_members: | {% for controller in _vm_groups['osp-controllers'] %} @@ -21,6 +26,7 @@ source_galera_members: | ["{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.internalapi.ip_v4 }}" {% endfor %} +{% if _vm_groups['osp-computes'] | default([]) | length > 0 %} edpm_nodes: {% for compute in _vm_groups['osp-computes'] %} {% set node_nets = cifmw_networking_env_definition.instances[compute] %} @@ -38,6 +44,10 @@ edpm_nodes: {% endif %} {% endfor %} {% endfor %} +{% endif %} + +{% if _vm_groups['osp-networkers'] | default([]) | length > 0 %} +edpm_nodes_networker: {% for networker in _vm_groups['osp-networkers'] | default([]) %} {% set node_nets = cifmw_networking_env_definition.instances[networker] %} {{ networker }}: @@ -54,7 +64,7 @@ edpm_nodes: {% endif %} {% endfor %} {% endfor %} - +{% endif %} upstream_dns: {{ cifmw_networking_env_definition.networks.ctlplane.dns_v4 | first }} os_cloud_name: {{ cifmw_adoption_osp_deploy_scenario.stacks[0].stackname }} From e453a75342168e12447895cdd1c69a14ad179c28 Mon Sep 17 00:00:00 2001 From: Sergey Bekkerman Date: Tue, 11 Mar 2025 16:07:22 +0100 Subject: [PATCH 049/480] [ci_dcn_site] Update repo variables paths Modified `ci_dcn_site_cifmw_repo_path` to create its value dynamically from cifmw_architecture_repo --- roles/ci_dcn_site/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/ci_dcn_site/defaults/main.yml b/roles/ci_dcn_site/defaults/main.yml index 857c256f9a..0a8f1dd278 100644 --- a/roles/ci_dcn_site/defaults/main.yml +++ b/roles/ci_dcn_site/defaults/main.yml @@ -14,9 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. -ci_dcn_site_arch_repo_path: /home/zuul/src/github.com/openstack-k8s-operators/architecture +ci_dcn_site_arch_repo_path: "{{ cifmw_architecture_repo | default('/home/zuul/src/github.com/openstack-k8s-operators/architecture') }}" ci_dcn_site_arch_path: "{{ ci_dcn_site_arch_repo_path }}/examples/dt/dcn" -ci_dcn_site_cifmw_repo_path: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework +ci_dcn_site_cifmw_repo_path: "{{ ci_dcn_site_arch_repo_path | regex_replace('architecture$', 'ci-framework') }}" ci_dcn_site_search_storage_network_names: - "storage" - "storagedcn1" From 65cf94dc754007c23ecbcb83aca64511a01bcf6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mari=C3=A1n=20Kr=C4=8Dm=C3=A1rik?= Date: Tue, 8 Apr 2025 02:15:51 +0200 Subject: [PATCH 050/480] Use upstream ci-framework --- roles/ci_dcn_site/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ci_dcn_site/defaults/main.yml b/roles/ci_dcn_site/defaults/main.yml index 0a8f1dd278..8e11237b27 100644 --- a/roles/ci_dcn_site/defaults/main.yml +++ b/roles/ci_dcn_site/defaults/main.yml @@ -16,7 +16,7 @@ ci_dcn_site_arch_repo_path: "{{ cifmw_architecture_repo | default('/home/zuul/src/github.com/openstack-k8s-operators/architecture') }}" ci_dcn_site_arch_path: "{{ ci_dcn_site_arch_repo_path }}/examples/dt/dcn" -ci_dcn_site_cifmw_repo_path: "{{ ci_dcn_site_arch_repo_path | regex_replace('architecture$', 'ci-framework') }}" +ci_dcn_site_cifmw_repo_path: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework ci_dcn_site_search_storage_network_names: - "storage" - "storagedcn1" From 6c2d5c0efcbe2a37c25387a30cec6b8cb0de0376 Mon Sep 17 00:00:00 2001 From: frenzyfriday Date: Fri, 28 Mar 2025 13:45:13 +0100 Subject: [PATCH 051/480] Adds GH action to run branch sync --- .../sync_branches_reusable_workflow.yml | 6 ++++++ .../sync_branches_with_ext_trigger.yml | 20 ++++++++----------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml index 35731aff3f..5c258d47ed 100644 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -9,6 +9,10 @@ on: target-branch: required: true type: string + secrets: + ssh-key: + description: 'Deploy token write access' + required: true jobs: sync-branches: @@ -20,6 +24,8 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + ssh-key: ${{ secrets.ssh-key }} + persist-credentials: true - name: Git config run: | diff --git a/.github/workflows/sync_branches_with_ext_trigger.yml b/.github/workflows/sync_branches_with_ext_trigger.yml index f8f5c247f6..2e59a615ab 100644 --- a/.github/workflows/sync_branches_with_ext_trigger.yml +++ b/.github/workflows/sync_branches_with_ext_trigger.yml @@ -1,18 +1,14 @@ -name: Sync branches with external trigger - +--- +name: Sync a target branch with source branch on: - workflow_dispatch: - inputs: - source-branch: - required: false - default: 'main' - target-branch: - required: false - default: 'ananya-do-not-use-tmp' + repository_dispatch: + types: [trigger-sync] jobs: trigger-sync: uses: openstack-k8s-operators/ci-framework/.github/workflows/sync_branches_reusable_workflow.yml@main with: - source-branch: ${{ inputs.source-branch }} - target-branch: ananya-do-not-use-tmp # Hardcoded till testing finishes + source-branch: ${{ github.event.client_payload.source-branch }} + target-branch: ${{ github.event.client_payload.target-branch }} + secrets: + ssh-key: ${{ secrets.DEPLOY_KEY }} From 63d678ede1db9765df0363c8c2f4f48431a57d9d Mon Sep 17 00:00:00 2001 From: Jiri Macku Date: Thu, 10 Apr 2025 12:07:38 +0200 Subject: [PATCH 052/480] [cifmw_external_dns] Check better for the secret We see often the task exits the retry loop but we don't have the secrets na the subsequent Populate key task fails. --- roles/cifmw_external_dns/tasks/cert.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/roles/cifmw_external_dns/tasks/cert.yml b/roles/cifmw_external_dns/tasks/cert.yml index 29f66bcd30..a5776c09b0 100644 --- a/roles/cifmw_external_dns/tasks/cert.yml +++ b/roles/cifmw_external_dns/tasks/cert.yml @@ -54,7 +54,10 @@ register: cert_info retries: "{{ cifmw_external_dns_retries }}" delay: "{{ cifmw_external_dns_delay }}" - until: cert_info.failed == false + until: + - cert_info.failed == false + - cert_info.resources[0].data['tls.crt'] is defined + - cert_info.resources[0].data['tls.key'] is defined - name: Ensure key and certificate directories exist on target host become: true From 69f8180fe20dea2d5461f8d7f85bb06a1534eb42 Mon Sep 17 00:00:00 2001 From: Andrew Bays Date: Tue, 15 Apr 2025 11:39:36 +0000 Subject: [PATCH 053/480] [OSPNW-910] Add kustomize template for uni04delta-ipv6-adoption --- .../network-values/values.yaml.j2 | 147 ++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 new file mode 100644 index 0000000000..5c33c206fb --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 @@ -0,0 +1,147 @@ +--- +# source: uni04delta-ipv6-adoption/network-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: +{% set ns.ocp_index = ns.ocp_index+1 %} + name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} +{# FIXEME: (hjensas): We need to ensure the OCP cluster_name and base_domain is available here #} +{# Because devscripts use fqdn for node names when ipv6 #} + node_name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }}.ocp.openstack.lab +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} + {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} +{% endfor %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() %} +{% set ns.lb_tools = {} %} + {{ network.network_name }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} +{% if network.tools.netconfig is defined %} + subnets: + - name: subnet1 + cidr: {{ network[_ipv.network_vX] }} + gateway: {{ omit if network[_ipv.gw_vX] is not defined else network[_ipv.gw_vX] }} + vlan: {{ omit if network.vlan_id is not defined else network.vlan_id }} + allocationRanges: +{% for range in network.tools.netconfig[_ipv.ipvX_ranges] %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% for lb_range in network.tools[tool][_ipv.ipvX_ranges] %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network[_ipv.network_vX] | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} + iface: {{ omit if ns.interfaces[network.network_name] is not defined else network.network_name }} + base_iface: {{ omit if ns.interfaces[network.network_name] is not defined else ns.interfaces[network.network_name] }} +{% elif network.network_name != "ironic" %} + iface: {{ omit if ns.interfaces[network.network_name] is not defined else ns.interfaces[network.network_name] }} +{% else %} + iface: {{ omit if ns.interfaces[network.network_name] is not defined else network.network_name }} +{% endif %} +{% if network.tools.multus is defined and network.network_name == "ctlplane" %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "macvlan", + "master": "ospbr", + "ipam": { + "type": "whereabouts", + "range": "{{ network[_ipv.network_vX] }}", + "range_start": "{{ network.tools.multus[_ipv.ipvX_ranges].0.start }}", + "range_end": "{{ network.tools.multus[_ipv.ipvX_ranges].0.end }}" + } + } +{% endif %} +{% if network.tools.multus is defined and network.network_name == "ironic" %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "ironic", + "type": "bridge", + "bridge": "ironic", + "ipam": { + "type": "whereabouts", + "range": "{{ network[_ipv.network_vX] }}", + "range_start": "{{ network.tools.multus[_ipv.ipvX_ranges].0.start }}", + "range_end": "{{ network.tools.multus[_ipv.ipvX_ranges].0.end }}" + } + } +{% endif %} +{% if network.tools.multus is defined and network.network_name not in ["ctlplane", "ironic"] %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "macvlan", + "master": "{{ network.network_name if network.vlan_id is defined else ns.interfaces[network.network_name] }}", + "ipam": { + "type": "whereabouts", + "range": "{{ network[_ipv.network_vX] }}", + "range_start": "{{ network.tools.multus[_ipv.ipvX_ranges].0.start }}", + "range_end": "{{ network.tools.multus[_ipv.ipvX_ranges].0.end }}" + } + } +{% endif %} +{% endfor %} + dns-resolver: + config: + server: + - "{{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }}" + search: [] + options: + - key: server + values: + - {{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }} +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: {{ cifmw_ci_gen_kustomize_values_storage_class }} From 21b3b276c96d83adbdda54020070adbd9fb04fe7 Mon Sep 17 00:00:00 2001 From: Marian Krcmarik Date: Thu, 27 Mar 2025 00:03:55 +0100 Subject: [PATCH 054/480] ci_dcn_site: Add removing and adding a node to a DCN site --- playbooks/dcn.yml | 44 +++- roles/ci_dcn_site/README.md | 2 + roles/ci_dcn_site/tasks/add_node.yml | 115 +++++++++ roles/ci_dcn_site/tasks/ceph.yml | 19 ++ roles/ci_dcn_site/tasks/post-ceph.yml | 4 + roles/ci_dcn_site/tasks/remove_node.yml | 231 ++++++++++++++++++ roles/ci_dcn_site/tasks/scaledown_site.yml | 69 ++++-- .../tasks/update_conf_new_node.yml | 145 +++++++++++ .../dataplane_remove_node_deploy.yaml.j2 | 9 + .../templates/deployment/values.yaml.j2 | 2 +- .../node_network_env_definitions.yaml.j2 | 68 ++++++ roles/ci_dcn_site/templates/values.yaml.j2 | 12 +- roles/cifmw_cephadm/tasks/scale_down_node.yml | 97 ++++++++ 13 files changed, 786 insertions(+), 31 deletions(-) create mode 100644 roles/ci_dcn_site/tasks/add_node.yml create mode 100644 roles/ci_dcn_site/tasks/remove_node.yml create mode 100644 roles/ci_dcn_site/tasks/update_conf_new_node.yml create mode 100644 roles/ci_dcn_site/templates/dataplane_remove_node_deploy.yaml.j2 create mode 100644 roles/ci_dcn_site/templates/node_network_env_definitions.yaml.j2 create mode 100644 roles/cifmw_cephadm/tasks/scale_down_node.yml diff --git a/playbooks/dcn.yml b/playbooks/dcn.yml index bf5a956f96..761cfd68e9 100644 --- a/playbooks/dcn.yml +++ b/playbooks/dcn.yml @@ -61,7 +61,6 @@ when: - _subnet_network_range != '' - _ceph_bootstrap_node != '' - - cifmw_ci_dcn_site_scaledown_az is not defined or cifmw_ci_dcn_site_scaledown_az == "" ansible.builtin.include_role: name: ci_dcn_site @@ -83,6 +82,49 @@ name: ci_dcn_site when: cifmw_ci_dcn_site_scaledown_az is defined and cifmw_ci_dcn_site_scaledown_az != "" + - name: Remove a compute node from the deployment + vars: + _node_to_remove: "{{ cifmw_ci_dcn_site_scaledown_node }}" + _az: "{{ cifmw_ci_dcn_site_scaledown_node_az | default('az1') }}" + _group_name: "{{ az_to_group_map[_az] }}" + _group_hosts: "{{ groups[_group_name] }}" + _edpm_hosts: "{{ cifmw_baremetal_hosts | dict2items | selectattr('key', 'in', groups[_group_name]) | items2dict }}" + _ceph_bootstrap_node: "{{ (_edpm_hosts | dict2items | first).key if _edpm_hosts | length > 0 else '' }}" + when: + - cifmw_ci_dcn_site_scaledown_node is defined and cifmw_ci_dcn_site_scaledown_node != "" + ansible.builtin.include_role: + name: ci_dcn_site + tasks_from: remove_node.yml + + - name: Add a compute node to a site + when: + - cifmw_ci_dcn_site_scaleout_node is defined and cifmw_ci_dcn_site_scaleout_node != "" + block: + # We need to update ci-framework related variable and inventory files to include the new host + - name: Update ci-framework variables and inventory files + vars: + _node_to_add: "{{ cifmw_ci_dcn_site_scaleout_node }}" + _az: "{{ cifmw_ci_dcn_site_scaleout_node_az | default('az1') }}" + _subnet: "{{ cifmw_ci_dcn_site_scaleout_node_subnet | default('subnet2') }}" + _group_name: "{{ az_to_group_map[_az] }}" + ansible.builtin.include_role: + name: ci_dcn_site + tasks_from: update_conf_new_node.yml + + - name: Add a compute node to dataplane + vars: + _node_to_add: "{{ cifmw_ci_dcn_site_scaleout_node }}" + _az: "{{ cifmw_ci_dcn_site_scaleout_node_az | default('az1') }}" + _subnet: "{{ cifmw_ci_dcn_site_scaleout_node_subnet | default('subnet2') }}" + _subnet_network_range: "{{ _network_ranges[_az[-1] | int] }}" + _group_name: "{{ az_to_group_map[_az] }}" + _group_hosts: "{{ groups[_group_name] }}" + _edpm_hosts: "{{ updated_cifmw_baremetal_hosts | dict2items | selectattr('key', 'in', groups[_group_name]) | items2dict }}" + _ceph_bootstrap_node: "{{ (_edpm_hosts | dict2items | first).key if _edpm_hosts | length > 0 else '' }}" + ansible.builtin.include_role: + name: ci_dcn_site + tasks_from: add_node.yml + - name: Find all created CRs ansible.builtin.find: paths: >- diff --git a/roles/ci_dcn_site/README.md b/roles/ci_dcn_site/README.md index fdcfff17ff..43055b50e3 100644 --- a/roles/ci_dcn_site/README.md +++ b/roles/ci_dcn_site/README.md @@ -16,6 +16,8 @@ with a collocated Ceph cluster. * `_group_name`: The name of the group of nodes to be deployed, e.g. `dcn1-computes` * `_subnet`: The name of the subnet the DCN site will use, e.g. `subnet2` * `_subnet_network_range`: The range of the subnet the DCN site will use, e.g. `192.168.133.0/24` +* `_node_to_remove`: The hostname of the node to be removed from the DCN deployment. +* `_node_to_add`: The hostname of the node to be added to the specified AZ. ## Examples diff --git a/roles/ci_dcn_site/tasks/add_node.yml b/roles/ci_dcn_site/tasks/add_node.yml new file mode 100644 index 0000000000..8442e4d8b6 --- /dev/null +++ b/roles/ci_dcn_site/tasks/add_node.yml @@ -0,0 +1,115 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# We are scaling out and the ceph cluster already exists we can directly create the Nodeset utilizing the existing +# "pre-ceph" values and then update it with "post-ceph" values which include additional ceph configuration and then +# create and apply Deployment CR with full list of dataplane services. +- name: Create a values.yaml.j2 file with full list of dataplane services + ansible.builtin.copy: + dest: "{{ ci_dcn_site_cifmw_repo_path }}/roles/ci_dcn_site/templates/deployment/values.yaml.j2" + mode: '0644' + content: | + --- + # source: dcn/deployment/values.yaml.j2 + apiVersion: v1 + kind: ConfigMap + metadata: + name: edpm-deployment-values-post-ceph + annotations: + config.kubernetes.io/local-config: "true" + data: + nodeset_name: "{% raw %}{{ _group_name }}{% endraw %}-edpm" + deployment: + name: "add-node-{% raw %}{{ _group_name }}{% endraw %}" + servicesOverride: + - bootstrap + - configure-network + - validate-network + - install-os + - ceph-hci-pre + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ceph-client + - ovn + - "neutron-metadata-cell{% raw %}{{ _all_azs.index(_az) +1 }}{% endraw %}" + - libvirt + - nova-custom-ceph-{% raw %}{{ _az }}{% endraw %} + +- name: Initialize vars + ansible.builtin.set_fact: + _ceph_vars_list: [] + _all_azs: [] + +- name: Find all ceph .conf and .keyring files + register: _ceph_conf_files + ansible.builtin.find: + paths: "/tmp" + patterns: "ceph*.conf,ceph*.keyring,az*.conf,az*.keyring" + recurse: false + +- name: Load ceph configuration files + ansible.builtin.set_fact: + _ceph_files: "{{ _ceph_conf_files.files | map(attribute='path') | list }}" + +- name: Render the NodeSet values.yaml + vars: + _edpm_instance_dict: "{{ cifmw_networking_env_definition.instances }}" + _edpm_network_dict: "{{ cifmw_networking_env_definition.networks }}" + _ssh_authorizedkeys: "{{ lookup('file', '~/.ssh/id_cifw.pub', rstrip=False) }}" + _ssh_private_key: "{{ lookup('file', '~/.ssh/id_cifw', rstrip=False) }}" + _ssh_public_key: "{{ lookup('file', '~/.ssh/id_cifw.pub', rstrip=False) }}" + _migration_priv_key: "{{ lookup('file', '~/ci-framework-data/artifacts/nova_migration_key', rstrip=False) }}" + _migration_pub_key: "{{ lookup('file', '~/ci-framework-data/artifacts/nova_migration_key.pub', rstrip=False) }}" + ansible.builtin.template: + backup: true + src: "templates/edpm-pre-ceph/nodeset/values.yaml.j2" + dest: "{{ ci_dcn_site_arch_path }}/edpm-pre-ceph/nodeset/values.yaml" + mode: "0644" + +- name: Kustomize NodeSet + ansible.builtin.set_fact: + nodeset_cr: >- + {{ lookup('kubernetes.core.kustomize', + dir=ci_dcn_site_arch_path + '/edpm-pre-ceph/nodeset') }} + +- name: Save the NodeSet CR + ansible.builtin.copy: + mode: "0644" + dest: "{{ ci_dcn_site_arch_path }}/dataplane-nodeset-pre-ceph_{{ _az }}.yaml" + content: "{{ nodeset_cr }}" + backup: true + +- name: Render the values with updated ceph configuration, kustomize and apply CR of NodeSet and DataPlaneDeployment + ansible.builtin.import_tasks: post-ceph.yml + +- name: Set Network related facts + ansible.builtin.include_tasks: set_network_facts.yml + +- name: Deploy Ceph in DCN context + ansible.builtin.include_tasks: ceph.yml + +- name: Run Nova cell discovery for new DCN hosts + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: nova-cell0-conductor-0 + command: nova-manage cell_v2 discover_hosts --verbose + +- name: Add new hosts to AZ + ansible.builtin.include_tasks: az.yml diff --git a/roles/ci_dcn_site/tasks/ceph.yml b/roles/ci_dcn_site/tasks/ceph.yml index b44d837a5c..835d64dcd5 100644 --- a/roles/ci_dcn_site/tasks/ceph.yml +++ b/roles/ci_dcn_site/tasks/ceph.yml @@ -67,6 +67,25 @@ dest: "~/ci-framework-data/parameters/ceph-{{ _az }}.yml" content: "{{ _content | to_nice_yaml }}" +- name: Check if ceph_client file already exists + ansible.builtin.stat: + path: "/tmp/ceph_client_{{_az}}.yml" + register: ceph_file_stat + +- name: Load ceph variables if the file exists + ansible.builtin.include_vars: + file: "/tmp/ceph_client_{{_az}}.yml" + when: ceph_file_stat.stat.exists + +- name: If It exists Add cifmw_cephadm_keys to ceph variables file + ansible.builtin.blockinfile: + path: "~/ci-framework-data/parameters/ceph-{{ _az }}.yml" + block: | + cifmw_cephadm_keys: + {{ keys | default({}) | to_nice_yaml }} + insertafter: EOF + when: ceph_file_stat.stat.exists + - name: Deploy Ceph cifmw.general.ci_script: output_dir: "/home/zuul/ci-framework-data/artifacts" diff --git a/roles/ci_dcn_site/tasks/post-ceph.yml b/roles/ci_dcn_site/tasks/post-ceph.yml index b2758cfdc6..71fb59ae86 100644 --- a/roles/ci_dcn_site/tasks/post-ceph.yml +++ b/roles/ci_dcn_site/tasks/post-ceph.yml @@ -47,6 +47,10 @@ ansible.builtin.set_fact: ci_dcn_site_glance_map: "{{ ci_dcn_site_glance_map | combine( { item: ['az0', item ] } ) }}" +- name: Get fsid of ceph cluster for currently deployed AZ + ansible.builtin.set_fact: + cifmw_ceph_client_fsid: "{{ _ceph_vars_list | selectattr('cifmw_ceph_client_cluster', 'equalto', _az) | map(attribute='cifmw_ceph_client_fsid') | first }}" + - name: Render the post-ceph values.yaml ansible.builtin.template: mode: "0644" diff --git a/roles/ci_dcn_site/tasks/remove_node.yml b/roles/ci_dcn_site/tasks/remove_node.yml new file mode 100644 index 0000000000..06ccb5778e --- /dev/null +++ b/roles/ci_dcn_site/tasks/remove_node.yml @@ -0,0 +1,231 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Remove the compute node from CEPH cluster +- name: Initialize vars + ansible.builtin.set_fact: + _ceph_vars_list: [] + +- name: Find all ceph variable files + register: _ceph_vars_files + ansible.builtin.find: + paths: "/tmp" + patterns: "ceph_client_az*.yml" + recurse: false + +- name: Load all ceph vars from files + loop: "{{ _ceph_vars_files.files | map(attribute='path') | list }}" + register: _ceph_vars + ansible.builtin.include_vars: + file: "{{ item }}" + +- name: Combine ceph variables into a list of dictionaries + loop: "{{ _ceph_vars.results }}" + ansible.builtin.set_fact: + _ceph_vars_list: "{{ _ceph_vars_list | union([item.ansible_facts]) }}" + +- name: Get compute nodes from the scale-downed AZ + register: removed_compute + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + sh -c "openstack compute service list -c Host -c Zone -f value | grep {{ _node_to_remove}} | awk '{print $1}'" + +- name: Set removed compute node fact + ansible.builtin.set_fact: + _removed_compute: "{{ removed_compute.stdout | trim }}" + +- name: Get AZ of compute node to be removed + register: compute_az + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + sh -c "openstack compute service list -c Host -c Zone -f value | grep {{ _removed_compute }} | awk '{print $2}'" + +- name: Set removed compute node fact + ansible.builtin.set_fact: + _compute_az: "{{ compute_az.stdout | trim }}" + +- name: List instances which are running on the node to be removed + register: osp_instances + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack server list --availability-zone {{ _compute_az }} --host {{ _removed_compute }} --all-projects -f value -c ID + +- name: Clean the running instances from the node up + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack server delete --force {{ item }} + loop: "{{ osp_instances.stdout_lines }}" + +- name: Delete the compute nodes from the aggregate + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack aggregate remove host {{ _compute_az }} {{ _removed_compute }} + +- name: Disable the compute service on scale-downed compute nodes + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack compute service set {{ _removed_compute }} nova-compute --disable + +- name: Get fsid of ceph cluster for Ceph cluster the node is being removed from + ansible.builtin.set_fact: + cifmw_cephadm_fsid: "{{ _ceph_vars_list | selectattr('cifmw_ceph_client_cluster', 'equalto', _az) | map(attribute='cifmw_ceph_client_fsid') | first }}" + +- name: Set cifmw_cephadm_cluster which the node is removed from + ansible.builtin.set_fact: + cifmw_cephadm_cluster: "{{ _az }}" + +- name: Remove the node from Ceph cluster + ansible.builtin.include_role: + name: cifmw_cephadm + tasks_from: scale_down_node.yml + vars: + ceph_bootstrap_node: "{{ _ceph_bootstrap_node }}" + ceph_node_to_remove: "{{ _node_to_remove}}" + +- name: Get the Cell UUID + register: cell_uuid + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: nova-cell0-conductor-0 + command: >- + sh -c "nova-manage cell_v2 list_hosts | grep {{ _removed_compute }} | awk '{print $4}'" + +- name: Remove the compute hosts from the cell + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: nova-cell0-conductor-0 + command: >- + nova-manage cell_v2 delete_host --cell_uuid {{ cell_uuid.stdout }} --host {{ _removed_compute }} + +- name: Stop the ovn_controller service + ansible.builtin.service: + name: edpm_ovn_controller + state: stopped + become: true + delegate_to: "{{ _node_to_remove}}" + +- name: Stop the ovn metadata agent service + ansible.builtin.service: + name: edpm_ovn_metadata_agent + state: stopped + become: true + delegate_to: "{{ _node_to_remove}}" + +- name: Stop the nova-compute service + ansible.builtin.service: + name: edpm_nova_compute + state: stopped + become: true + delegate_to: "{{ _node_to_remove}}" + +- name: Remove the systemd unit files of the ovn and nova-compute containers + ansible.builtin.shell: | + rm -f /etc/systemd/system/edpm_ovn_controller.service + rm -f /etc/systemd/system/edpm_ovn_metadata_agent.service + rm -f /etc/systemd/system/edpm_nova_compute.service + become: true + delegate_to: "{{ _node_to_remove}}" + +- name: Delete the network agents on scale-downed compute nodes + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + sh -c "openstack network agent list --host {{ _removed_compute }} -c ID -f value | xargs openstack network agent delete" + +- name: Remove specific node from OpenStackDataPlaneNodeSet + kubernetes.core.k8s_json_patch: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + api_version: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: "{{ az_to_group_map[_compute_az] }}-edpm" + namespace: "openstack" + patch: + - op: "remove" + path: "/spec/nodes/edpm-{{ _node_to_remove }}" + +- name: Create OpenStackDataPlaneDeployment CR file + ansible.builtin.template: + src: dataplane_remove_node_deploy.yaml.j2 + dest: "{{ ci_dcn_site_arch_path }}/dataplane_remove_node_deploy.yaml" + mode: "0644" + backup: true + +- name: Apply OpenStackDataPlaneDeployment CR + kubernetes.core.k8s: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + state: present + namespace: openstack + src: "{{ ci_dcn_site_arch_path }}/dataplane_remove_node_deploy.yaml" + +- name: Wait for the node to be removed from the OpenStackDataPlaneNodeSet CR + kubernetes.core.k8s_info: + api_version: openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + name: "{{ az_to_group_map[_compute_az] }}-edpm" + namespace: openstack + register: nodeset_status + until: (_node_to_remove not in (nodeset_status.resources[0].spec.nodes | default({})).keys()) + retries: 30 + delay: 10 + +- name: Stop the VM + ansible.builtin.shell: | + virsh destroy cifmw-{{ _node_to_remove }} + delegate_to: hypervisor + changed_when: false + +# Remove the node from ci-framework variables and inventory files +- name: Remove the node from ci-framework variables and inventory files + block: + - name: Load the YAML file + ansible.builtin.slurp: + src: "{{ item }}" + register: vars_files + with_items: + - /etc/ci/env/networking-environment-definition.yml + - "{{ ansible_user_dir }}/reproducer-variables.yml" + - "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + + - name: Remove the node and save the updated YAML file + become: true + ansible.builtin.copy: + dest: "{{ item.item }}" + content: "{{ item.content | b64decode | from_yaml | ansible.utils.remove_keys(target=[_node_to_remove]) | to_nice_yaml }}" + mode: '0644' + with_items: "{{ vars_files.results }}" + no_log: true diff --git a/roles/ci_dcn_site/tasks/scaledown_site.yml b/roles/ci_dcn_site/tasks/scaledown_site.yml index 23ba5da09b..5665a058d0 100644 --- a/roles/ci_dcn_site/tasks/scaledown_site.yml +++ b/roles/ci_dcn_site/tasks/scaledown_site.yml @@ -42,6 +42,24 @@ | list }} when: not az_hosts.failed +- name: List instances which are running on the scale-downed AZ + register: osp_instances + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack server list --availability-zone {{ _az_to_scaledown }} --all-projects -f value -c ID + +- name: Clean the running instances from the AZ up before deleting the hosts + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack server delete --force {{ item }} + loop: "{{ osp_instances.stdout_lines }}" + - name: Delete the compute nodes from the aggregate loop: "{{ az_hosts_list }}" kubernetes.core.k8s_exec: @@ -103,24 +121,6 @@ ansible.builtin.set_fact: ci_dcn_site_glance_map: "{{ ci_dcn_site_glance_map | combine( { item: ['az0', item ] } ) }}" -- name: List instances which are running on the scale-downed AZ - register: osp_instances - kubernetes.core.k8s_exec: - api_key: "{{ _auth_results.openshift_auth.api_key }}" - namespace: openstack - pod: openstackclient - command: >- - openstack server list --availability-zone {{ _az_to_scaledown }} --all-projects -f value -c ID - -- name: Clean the running instances from the AZ up before deleting the hosts from Cell - kubernetes.core.k8s_exec: - api_key: "{{ _auth_results.openshift_auth.api_key }}" - namespace: openstack - pod: openstackclient - command: >- - openstack server delete --force {{ item }} - loop: "{{ osp_instances.stdout_lines }}" - - name: Get the Cell UUID register: cell_uuid kubernetes.core.k8s_exec: @@ -177,16 +177,30 @@ az1: cell2 az2: cell3 ansible.builtin.shell: | - oc delete rabbitmqclusters rabbitmq-{{ az_to_cell_map[_az_to_scaledown] }} - oc delete galera openstack-{{ az_to_cell_map[_az_to_scaledown] }} + oc delete -n openstack rabbitmqclusters rabbitmq-{{ az_to_cell_map[_az_to_scaledown] }} + oc delete -n openstack galera openstack-{{ az_to_cell_map[_az_to_scaledown] }} + +- name: Get list of pods in the openstack namespace + kubernetes.core.k8s_info: + kind: Pod + namespace: openstack + api_key: "{{ _auth_results.openshift_auth.api_key }}" + register: pod_list + +- name: Find the cinder scheduler pod prefix + ansible.builtin.set_fact: + cinder_prefix: "{{ (item.metadata.name | regex_search('^(cinder-[a-z0-9]+)')) }}" + loop: "{{ pod_list.resources }}" + when: item.metadata.name is match('^cinder-.*-scheduler-0$') + no_log: true - name: Delete the cinder-volume service kubernetes.core.k8s_exec: api_key: "{{ _auth_results.openshift_auth.api_key }}" namespace: openstack - pod: cinder-scheduler-0 + pod: "{{ cinder_prefix }}-scheduler-0" command: >- - cinder-manage service remove cinder-volume cinder-volume-{{ _az_to_scaledown }}-0@ceph + cinder-manage service remove cinder-volume {{ cinder_prefix }}-volume-{{ _az_to_scaledown }}-0@ceph - name: Fetch ceph-conf-files secret register: secret_info @@ -304,5 +318,14 @@ - name: Delete each Secret which contains TLS certificate for the NodeSet nodes ansible.builtin.command: - cmd: oc delete Secret {{ item }} + cmd: oc -n openstack delete Secret {{ item }} loop: "{{ osdpns_info.resources[0].status.secretHashes.keys() | select('search', 'cert') | list }}" + +- name: Delete temporary files with ceph client variables and keys + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /tmp/ceph_conf_files/{{ _az_to_scaledown }}.conf + - /tmp/ceph_conf_files/{{ _az_to_scaledown }}.client.openstack.keyring + - /tmp/ceph_client_{{ _az_to_scaledown }}.yml diff --git a/roles/ci_dcn_site/tasks/update_conf_new_node.yml b/roles/ci_dcn_site/tasks/update_conf_new_node.yml new file mode 100644 index 0000000000..03f24670da --- /dev/null +++ b/roles/ci_dcn_site/tasks/update_conf_new_node.yml @@ -0,0 +1,145 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Read the existing inventory file + ansible.builtin.slurp: + src: "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + register: inventory_content + +- name: Parse YAML content of inventory file + ansible.builtin.set_fact: + inventory_data: "{{ inventory_content.content | b64decode | from_yaml }}" + +- name: Replicate and modify the host entry in the inventory + ansible.builtin.set_fact: + new_host_entry: "{{ inventory_data[_group_name]['hosts'] | dict2items | first | json_query('value') | combine({'ansible_host': _node_to_add + '.utility'}, recursive=True) }}" + +- name: Add the new host to the group of computes in the inventory + ansible.builtin.set_fact: + updated_inventory: "{{ inventory_data | combine({_group_name: {'hosts': inventory_data[_group_name]['hosts'] | combine({_node_to_add: new_host_entry})}}, recursive=True) }}" + +- name: Write the updated inventory back to the file + ansible.builtin.copy: + content: "{{ updated_inventory | to_nice_yaml }}" + dest: "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + mode: '0644' + +- name: Read the existing networking-environment-definition.yml file + ansible.builtin.slurp: + src: /etc/ci/env/networking-environment-definition.yml + register: net_env_content + +- name: Parse YAML content of networking-environment-definition.yml file + ansible.builtin.set_fact: + net_env_data: "{{ net_env_content.content | b64decode | from_yaml }}" + +- name: The map for az0 contains all AZ backends + ansible.builtin.set_fact: + az_to_dcn: + az0: "" + az1: dcn1 + az2: dcn2 + +- name: Load new host networking environment definition from template into a variable + ansible.builtin.set_fact: + new_net_host_entry: "{{ lookup('template', 'node_network_env_definitions.yaml.j2') | from_yaml }}" + +- name: Merge both YAML files of networking environment definition + ansible.builtin.set_fact: + merged_net_env_data: "{{ net_env_data | combine(new_net_host_entry, recursive=True) }}" + +- name: Save merged YAML file of networking environment definition + become: true + ansible.builtin.copy: + dest: /etc/ci/env/networking-environment-definition.yml + content: "{{ merged_net_env_data | to_nice_yaml }}" + mode: '0644' + +- name: Get MAC address of public interface on the new host + ansible.builtin.shell: | + set -o pipefail + virsh domiflist cifmw-{{ _node_to_add }} | grep {{ az_to_dcn[_az] }}_pb | awk '{print $5}' + register: mac_pb + delegate_to: hypervisor + +- name: Get MAC address of trunk interface on the new host + ansible.builtin.shell: | + set -o pipefail + virsh domiflist cifmw-{{ _node_to_add }} | grep {{ az_to_dcn[_az] }}_tr | awk '{print $5}' + register: mac_tr + delegate_to: hypervisor + +- name: Get UUID of the VM hosting the new host + ansible.builtin.shell: | + virsh domuuid cifmw-{{ _node_to_add }} + register: vm_uuid + delegate_to: hypervisor + +- name: Create content of new item of cifmw_baremetal_hosts in reproducer-variables.yml + ansible.builtin.set_fact: + new_variable_host_entry: >- + {{ + { + "cifmw_baremetal_hosts": { + _node_to_add: { + "boot_mode": "legacy", + "connection": "redfish-virtualmedia+http://sushy.utility:8000/redfish/v1/Systems/" + vm_uuid.stdout, + "nics": [ + { "mac": mac_pb.stdout, "network": az_to_dcn[_az] + "_pb" }, + { "mac": mac_tr.stdout, "network": az_to_dcn[_az] + "_tr" } + ], + "password": "password", + "username": "admin", + "uuid": vm_uuid.stdout + } + } + } + }} + +- name: Read the existing reproducer-variables.yml file + ansible.builtin.slurp: + src: "{{ ansible_user_dir }}/reproducer-variables.yml" + register: vars_env_content + +- name: Parse YAML content of reproducer-variables.yml file + ansible.builtin.set_fact: + vars_env_data: "{{ vars_env_content.content | b64decode | from_yaml }}" + +- name: Merge both YAML files of reproducer variables + ansible.builtin.set_fact: + merged_vars_env_data: "{{ vars_env_data | combine(new_variable_host_entry, recursive=True) }}" + +- name: Save merged YAML file + ansible.builtin.copy: + dest: "{{ ansible_user_dir }}/reproducer-variables.yml" + content: "{{ merged_vars_env_data | to_nice_yaml }}" + mode: '0644' + +- name: Created updated cifmw_baremetal_hosts fact + ansible.builtin.set_fact: + updated_cifmw_baremetal_hosts: "{{ merged_vars_env_data['cifmw_baremetal_hosts'] }}" + +- name: Load reproducer-variables + ansible.builtin.include_vars: + file: "~/reproducer-variables.yml" + +- name: Load networking-environment-definition + ansible.builtin.include_vars: + file: "/etc/ci/env/networking-environment-definition.yml" + name: cifmw_networking_env_definition + +- name: Load updated inventory + ansible.builtin.meta: refresh_inventory diff --git a/roles/ci_dcn_site/templates/dataplane_remove_node_deploy.yaml.j2 b/roles/ci_dcn_site/templates/dataplane_remove_node_deploy.yaml.j2 new file mode 100644 index 0000000000..238ba678b5 --- /dev/null +++ b/roles/ci_dcn_site/templates/dataplane_remove_node_deploy.yaml.j2 @@ -0,0 +1,9 @@ +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: {{ 'remove-node-' + az_to_group_map[_compute_az] }} +spec: + nodeSets: + - {{ az_to_group_map[_compute_az] }}-edpm + servicesOverride: + - ssh-known-hosts diff --git a/roles/ci_dcn_site/templates/deployment/values.yaml.j2 b/roles/ci_dcn_site/templates/deployment/values.yaml.j2 index 24c13f7730..058e3abe05 100644 --- a/roles/ci_dcn_site/templates/deployment/values.yaml.j2 +++ b/roles/ci_dcn_site/templates/deployment/values.yaml.j2 @@ -15,6 +15,6 @@ data: - install-certs - ceph-client - ovn - - neutron-metadata-cell{{ _all_azs | length }} + - neutron-metadata-cell{{ _all_azs.index(_az) +1 }} - libvirt - nova-custom-ceph-{{ _az }} diff --git a/roles/ci_dcn_site/templates/node_network_env_definitions.yaml.j2 b/roles/ci_dcn_site/templates/node_network_env_definitions.yaml.j2 new file mode 100644 index 0000000000..ad70c3f726 --- /dev/null +++ b/roles/ci_dcn_site/templates/node_network_env_definitions.yaml.j2 @@ -0,0 +1,68 @@ +# The format and values mirror the automation/net-env/dcn.yaml from architecture repository +instances: + {{ _node_to_add }}: + hostname: {{ _node_to_add }} + name: {{ _node_to_add }} + networks: + ctlplane{{ az_to_dcn[_az] }}: + interface_name: eth1 + ip_v4: 192.168.1{{ _subnet[-1] | int + 1 }}{{ _subnet[-1] | int + 1 }}.114 + is_trunk_parent: true + mac_addr: 52:54:09:77:24:c7 + mtu: 1500 + netmask_v4: 255.255.255.0 + network_name: ctlplane{{ az_to_dcn[_az] }} + prefix_length_v4: 24 + skip_nm: false + internalapi{{ az_to_dcn[_az] }}: + interface_name: eth1.{{ _subnet[-1] | int - 1 }}0 + ip_v4: 172.17.{{ _subnet[-1] | int - 1 }}0.114 + is_trunk_parent: false + mac_addr: 52:54:00:74:63:57 + mtu: 1496 + netmask_v4: 255.255.255.0 + network_name: internalapi{{ az_to_dcn[_az] }} + parent_interface: eth1 + prefix_length_v4: 24 + skip_nm: false + trunk_parent: ctlplane{{ az_to_dcn[_az] }} + vlan_id: {{ _subnet[-1] | int + 1 }}0 + storage{{ az_to_dcn[_az] }}: + interface_name: eth1.{{ _subnet[-1] | int + 1 }}1 + ip_v4: 172.18.{{ _subnet[-1] | int - 1 }}0.114 + is_trunk_parent: false + mac_addr: 52:54:00:36:ba:ae + mtu: 1496 + netmask_v4: 255.255.255.0 + network_name: storage{{ az_to_dcn[_az] }} + parent_interface: eth1 + prefix_length_v4: 24 + skip_nm: false + trunk_parent: ctlplane{{ az_to_dcn[_az] }} + vlan_id: {{ _subnet[-1] | int + 1 }}1 + storagemgmt{{ az_to_dcn[_az] }}: + interface_name: eth1.{{ _subnet[-1] | int + 1 }}3 + ip_v4: 172.20.{{ _subnet[-1] | int - 1 }}0.114 + is_trunk_parent: false + mac_addr: 52:54:00:7a:e9:e0 + mtu: 1500 + netmask_v4: 255.255.255.0 + network_name: storagemgmt{{ az_to_dcn[_az] }} + parent_interface: eth1 + prefix_length_v4: 24 + skip_nm: false + trunk_parent: ctlplane{{ az_to_dcn[_az] }} + vlan_id: {{ _subnet[-1] | int + 1 }}3 + tenant{{ az_to_dcn[_az] }}: + interface_name: eth1.{{ _subnet[-1] | int + 1 }}2 + ip_v4: 172.19.{{ _subnet[-1] | int - 1 }}0.114 + is_trunk_parent: false + mac_addr: 52:54:00:3a:cf:53 + mtu: 1496 + netmask_v4: 255.255.255.0 + network_name: tenant{{ az_to_dcn[_az] }} + parent_interface: eth1 + prefix_length_v4: 24 + skip_nm: false + trunk_parent: ctlplane{{ az_to_dcn[_az] }} + vlan_id: {{ _subnet[-1] | int + 1 }}2 diff --git a/roles/ci_dcn_site/templates/values.yaml.j2 b/roles/ci_dcn_site/templates/values.yaml.j2 index e72fd5ffae..4d0815660a 100644 --- a/roles/ci_dcn_site/templates/values.yaml.j2 +++ b/roles/ci_dcn_site/templates/values.yaml.j2 @@ -21,7 +21,7 @@ data: - install-certs - ceph-client - ovn - - neutron-metadata-cell{{ _all_azs | length }} + - neutron-metadata-cell{{ _all_azs.index(_az) +1 }} - libvirt - nova-custom-ceph-{{ _az }} nova: @@ -32,8 +32,8 @@ data: [libvirt] images_type=rbd images_rbd_pool=vms - images_rbd_ceph_conf=/etc/ceph/{{ cifmw_ceph_client_cluster }}.conf - images_rbd_glance_store_name={{ cifmw_ceph_client_cluster }} + images_rbd_ceph_conf=/etc/ceph/{{ _az }}.conf + images_rbd_glance_store_name={{ _az }} images_rbd_glance_copy_poll_interval=15 images_rbd_glance_copy_timeout=600 rbd_user=openstack @@ -49,17 +49,17 @@ data: - configMapRef: name: ceph-nova-{{ _az }} - secretRef: - name: nova-cell{{ _all_azs | length }}-compute-config + name: nova-cell{{ _all_azs.index(_az) +1 }}-compute-config - secretRef: name: nova-migration-ssh-key neutron-metadata: customDataplaneService: - name: neutron-metadata-cell{{ _all_azs | length }} + name: neutron-metadata-cell{{ _all_azs.index(_az) +1 }} dataSources: - secretRef: name: neutron-ovn-metadata-agent-neutron-config - secretRef: - name: nova-cell{{ _all_azs | length }}-metadata-neutron-config + name: nova-cell{{ _all_azs.index(_az) +1 }}-metadata-neutron-config kind: ConfigMap metadata: annotations: diff --git a/roles/cifmw_cephadm/tasks/scale_down_node.yml b/roles/cifmw_cephadm/tasks/scale_down_node.yml new file mode 100644 index 0000000000..3ec79e2544 --- /dev/null +++ b/roles/cifmw_cephadm/tasks/scale_down_node.yml @@ -0,0 +1,97 @@ +--- +# Copyright 2025 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Fail if Ceph FSID is not set + ansible.builtin.fail: + msg: "Ceph FSID must be defined" + when: cifmw_cephadm_fsid is undefined + +- name: Get ceph_cli + ansible.builtin.include_tasks: ceph_cli.yml + +- name: Get list of Ceph pools + become: true + ansible.builtin.command: >- + {{ cifmw_cephadm_ceph_cli }} osd pool ls --format json + register: ceph_pools + delegate_to: "{{ ceph_bootstrap_node }}" + +- name: Get number of Ceph nodes + become: true + ansible.builtin.command: >- + {{ cifmw_cephadm_ceph_cli }} orch host ls --format json + register: ceph_hosts + delegate_to: "{{ ceph_bootstrap_node }}" + +- name: Set number of Ceph nodes + ansible.builtin.set_fact: + ceph_node_count: "{{ ceph_hosts.stdout | from_json | length }}" + +# We may need to reduce the replica size for the pools to be able to drain the node +- name: Process each Ceph pool to reduce replica size + vars: + ceph_pools_list: "{{ ceph_pools.stdout | from_json }}" + block: + - name: Get current replica size for each pool + become: true + ansible.builtin.command: >- + {{ cifmw_cephadm_ceph_cli }} osd pool get {{ item }} size --format json + register: pool_sizes + with_items: "{{ ceph_pools_list }}" + delegate_to: "{{ ceph_bootstrap_node }}" + + - name: Extract pool sizes per ceph pool into a dictionary + ansible.builtin.set_fact: + pool_size_map: "{{ pool_size_map | default({}) | combine({item.item: (item.stdout | from_json).size | int}) }}" + with_items: "{{ pool_sizes.results }}" + + - name: Reduce replica size if it matches node count + become: true + ansible.builtin.command: >- + {{ cifmw_cephadm_ceph_cli }} osd pool set {{ item.key }} size {{ (item.value - 1) }} + when: + - item.value | int == ceph_node_count | int + - item.value | int > 2 + with_dict: "{{ pool_size_map }}" + delegate_to: "{{ ceph_bootstrap_node }}" + +- name: Drain all Ceph daemons from the host + become: true + ansible.builtin.command: "{{ cifmw_cephadm_ceph_cli }} orch host drain {{ ceph_node_to_remove }}" + delegate_to: "{{ ceph_bootstrap_node }}" + +- name: Check OSD removal status + become: true + ansible.builtin.command: "{{ cifmw_cephadm_ceph_cli }} orch osd rm status" + register: osd_rm_status + until: osd_rm_status.stdout == "No OSD remove/replace operations reported" + retries: 10 + delay: 30 + delegate_to: "{{ ceph_bootstrap_node }}" + +- name: Check if all daemons are removed from host + become: true + ansible.builtin.command: "{{ cifmw_cephadm_ceph_cli }} orch ps {{ ceph_node_to_remove }} --format json" + register: ps_result + until: (ps_result.stdout | from_json) | length == 0 + retries: 10 + delay: 30 + delegate_to: "{{ ceph_bootstrap_node }}" + +- name: Remove host from Ceph cluster + become: true + ansible.builtin.command: "{{ cifmw_cephadm_ceph_cli }} orch host rm {{ ceph_node_to_remove }}" + delegate_to: "{{ ceph_bootstrap_node }}" From 24abaf4bf6f9934a7516892681f97d58b8e2e651 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 15 Apr 2025 10:16:25 +0200 Subject: [PATCH 055/480] Retry nmcli command when timeout reached From time to time, Ansible is raising error: TASK [Ensure crc knows about its second NIC] crc | ERROR crc | { crc | "msg": "Error: Failed to modify connection 'ci-private-network': Timeout was reached\n", crc | "name": "ci-private-network", crc | "rc": 1 crc | } Add retry parameter to avoid CI job to fail. --- ci/playbooks/multinode-customizations.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ci/playbooks/multinode-customizations.yml b/ci/playbooks/multinode-customizations.yml index 5d49587a04..72f318d830 100644 --- a/ci/playbooks/multinode-customizations.yml +++ b/ci/playbooks/multinode-customizations.yml @@ -55,6 +55,10 @@ ip4: "{{ _crc_default_net_ip }}" gw4: "{{ _crc_default_gw }}" state: present + register: _nmcli_result + until: _nmcli_result is success + retries: 5 + delay: 10 - name: Ensure crc does not get "public" DNS become: true From d8e3ca40589fdccaaf4cea7328661577f92577a0 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Wed, 26 Mar 2025 09:21:33 +0200 Subject: [PATCH 056/480] Start virtqemud service instead of checking for the sock path --- roles/libvirt_manager/tasks/clean_layout.yml | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/roles/libvirt_manager/tasks/clean_layout.yml b/roles/libvirt_manager/tasks/clean_layout.yml index d7467c2dd4..28d3fbaaba 100644 --- a/roles/libvirt_manager/tasks/clean_layout.yml +++ b/roles/libvirt_manager/tasks/clean_layout.yml @@ -2,10 +2,19 @@ - name: Get installed packages list ansible.builtin.package_facts: {} -- name: Get virtqemud socket - register: _virtqemud - ansible.builtin.stat: - path: "/var/run/libvirt/virtqemud-sock" +- name: Populate service facts + ansible.builtin.service_facts: + +- name: Start virtqemud socket service + ansible.builtin.service: + name: "{{ item }}" + state: started + enabled: true + loop: + - virtqemud.service + - virtqemud.socket + when: ansible_facts['services']['virtqemud.service']['status'] | default('not-found') != 'not-found' + become: true - name: Set _is_deepscrub internal fact ansible.builtin.set_fact: @@ -22,7 +31,7 @@ cifmw_libvirt_manager_dependency_packages | difference(ansible_facts.packages.keys()) | length == 0 - - _virtqemud.stat.exists + - ansible_facts['services']['virtqemud.service']['status'] | default('not-found') != 'not-found' block: - name: List all of the existing virtual machines register: vms_list From 1762e470eec62c7adb7d85034aa90a59800b4764 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 16 Apr 2025 13:58:21 +0200 Subject: [PATCH 057/480] Drop kubelet reconfiguration That playbook should be removed long time ago. Similar change was done in RDO config base job [1]. [1] https://review.rdoproject.org/r/c/config/+/53694 Signed-off-by: Daniel Pawlik --- ci/playbooks/crc/reconfigure-kubelet.yml | 50 ------------------------ zuul.d/base.yaml | 2 - 2 files changed, 52 deletions(-) delete mode 100644 ci/playbooks/crc/reconfigure-kubelet.yml diff --git a/ci/playbooks/crc/reconfigure-kubelet.yml b/ci/playbooks/crc/reconfigure-kubelet.yml deleted file mode 100644 index dc16725c5e..0000000000 --- a/ci/playbooks/crc/reconfigure-kubelet.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -# Currently, the CRC is using: -# --system-reserved=cpu=200m,memory=350Mi,ephemeral-storage=350Mi -# Which means: -# - SYSTEM_RESERVED_CPU = 200m -# - SYSTEM_RESERVED_MEMORY = 350Mi -# - SYSTEM_RESERVED_ES = 350Mi -# Which might be not enough for basic services on high utilized worker node. -# Those values are set in /etc/node-sizing.env (base on kubelet service file) -# with values: https://github.com/crc-org/snc/blob/release-4.12/node-sizing-enabled.env -# Helpful doc: https://docs.openshift.com/container-platform/4.12/nodes/nodes/nodes-nodes-resources-configuring.html - -- hosts: crc - tasks: - - name: Reconfigure kubelet service - become: true - block: - - name: Change the kubelet service EnvironmentFile - ansible.builtin.lineinfile: - path: /etc/node-sizing.env - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - loop: - - regexp: "^SYSTEM_RESERVED_CPU=200m" - line: "SYSTEM_RESERVED_CPU={{ bootstrap_ci_crc_systemd_cpu | default('800m') }}" - - regexp: "^SYSTEM_RESERVED_MEMORY=350Mi" - line: "SYSTEM_RESERVED_MEMORY={{ bootstrap_ci_crc_systemd_mem | default('700Mi') }}" - - regexp: "^SYSTEM_RESERVED_ES=350Mi" - line: "SYSTEM_RESERVED_ES={{ bootstrap_ci_crc_systemd_disk | default('700Mi') }}" - - - name: Change the kubelet sizing enabled - ansible.builtin.lineinfile: - path: /etc/node-sizing-enabled.env - regexp: "{{ item.regexp }}" - line: "{{ item.line }}" - loop: - - regexp: "^SYSTEM_RESERVED_CPU=200m" - line: "SYSTEM_RESERVED_CPU={{ bootstrap_ci_crc_systemd_cpu | default('800m') }}" - - regexp: "^SYSTEM_RESERVED_MEMORY=350Mi" - line: "SYSTEM_RESERVED_MEMORY={{ bootstrap_ci_crc_systemd_mem | default('700Mi') }}" - - regexp: "^SYSTEM_RESERVED_ES=350Mi" - line: "SYSTEM_RESERVED_ES={{ bootstrap_ci_crc_systemd_disk | default('700Mi') }}" - - regexp: "^NODE_SIZING_ENABLED=false" - line: "NODE_SIZING_ENABLED={{ bootstrap_ci_crc_systemd_autosizing | default('false') }}" - - - name: Reboot host after kubelet is reconfigured - ansible.builtin.reboot: - - - include_role: - name: start-zuul-console diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 41e2f169a9..3333cafc78 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -217,7 +217,6 @@ roles: *multinode_edpm_roles pre-run: - ci/playbooks/bootstrap-networking-mapper.yml - - ci/playbooks/crc/reconfigure-kubelet.yml - ci/playbooks/multinode-customizations.yml post-run: *multinode_edpm_post_run vars: @@ -238,7 +237,6 @@ roles: *multinode_edpm_roles pre-run: - ci/playbooks/bootstrap-networking-mapper.yml - - ci/playbooks/crc/reconfigure-kubelet.yml - ci/playbooks/multinode-customizations.yml post-run: *multinode_edpm_post_run vars: From 3ddba02658ec98161bc8ec934d33d5179dd2df6a Mon Sep 17 00:00:00 2001 From: Sofer Athlan-Guyot Date: Wed, 9 Apr 2025 02:29:06 +0200 Subject: [PATCH 058/480] New OLM Parameter for Managing Deployment of Specific Versions A new parameter, `cifmw_ci_gen_kustomize_values_deployment_version`, has been introduced for overriding OLM startingCSV values in kustomize. This also ensures the appropriate architecture directory is utilized and all necessary subscriptions are deployed based on the installed version. This eventually leverages the overlay `kustomize` feature, hence the new `olm_subscriptions_overlay.yml` that helps set the right overlay. When configured, this parameter will stop the update playbook from executing the `set_openstack_containers` role and the deployment playbook from executing the `update_container` role. The assumption is that OLM will be used to update any necessary operators. Depends-On: https://github.com/openstack-k8s-operators/architecture/pull/537 Resolves-Part-Of: [OSPRH-15056](https://issues.redhat.com//browse/OSPRH-15056) --- playbooks/06-deploy-architecture.yml | 5 +- playbooks/update.yml | 1 + roles/ci_gen_kustomize_values/README.md | 12 ++++ .../tasks/olm_subscriptions_overlay.yml | 57 +++++++++++++++++++ .../common/olm-values/values.yaml.j2 | 5 ++ .../tasks/install_operators.yml | 13 ++++- roles/update/tasks/main.yml | 41 ++++++++++++- 7 files changed, 129 insertions(+), 5 deletions(-) create mode 100644 roles/ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml diff --git a/playbooks/06-deploy-architecture.yml b/playbooks/06-deploy-architecture.yml index 187517a792..edcb69300f 100644 --- a/playbooks/06-deploy-architecture.yml +++ b/playbooks/06-deploy-architecture.yml @@ -226,9 +226,12 @@ tags: - update_containers - edpm_bootstrap + when: cifmw_ci_gen_kustomize_values_deployment_version is not defined - name: Update containers in deployed OSP operators using set_openstack_containers role - when: cifmw_set_openstack_containers | default(false) | bool + when: + - cifmw_set_openstack_containers | default(false) | bool + - cifmw_ci_gen_kustomize_values_deployment_version is not defined ansible.builtin.include_role: name: set_openstack_containers tags: diff --git a/playbooks/update.yml b/playbooks/update.yml index 11a9a824ec..ff2bbc0031 100644 --- a/playbooks/update.yml +++ b/playbooks/update.yml @@ -39,6 +39,7 @@ cifmw_set_openstack_containers_openstack_final_env: "operator_env_after_update.txt" ansible.builtin.include_role: name: set_openstack_containers + when: cifmw_ci_gen_kustomize_values_deployment_version is not defined - name: Sync repos for controller to compute hosts: computes diff --git a/roles/ci_gen_kustomize_values/README.md b/roles/ci_gen_kustomize_values/README.md index 6c23440cdf..9555140b92 100644 --- a/roles/ci_gen_kustomize_values/README.md +++ b/roles/ci_gen_kustomize_values/README.md @@ -54,6 +54,18 @@ Optional parameters: * `cifmw_ci_gen_kustomize_values_edpm_net_template_b64`: (String) The base64 content of `edpm_network_config_template`. +### Specific parameters for olm-values +This ConfigMap specifies parameters to override those in `architecture/example/common/olm/values.yaml`. + +* `cifmw_ci_gen_kustomize_values_ooi_image`: (String) The URI for the image providing the OpenStack operator index. Defaults to `quay.io/openstack-k8s-operators/openstack-operator-index:latest`. +* `cifmw_ci_gen_kustomize_values_sub_channel`: (String) Specifies the channel to be used. + +If the following parameter is set, it overrides the associated parameter in `architecture/example/common/olm-subscriptions/values.yaml`. + +* `cifmw_ci_gen_kustomize_values_deployment_version`: (String) The version to be deployed by setting the `startingCSV` of the subscription for the OpenStack operator. Versions `v1.0.3` and `v1.0.6` are unique as they configure the subscription for all operators. The right kustomize overlay is selected by the `ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml` file. + +Access the remaining parameters in the `olm-subscription/values.yaml` file and override them with the `cifmw_architecture_user_kustomize_` variable, which should set the `common.olm-values` hash. The earlier version parameter shouldn't be modified using this method, as it won't activate the additional code required for proper functionality. + ## Adding a new template The template must have a leading comment staging its source. For example, if your template is located in diff --git a/roles/ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml b/roles/ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml new file mode 100644 index 0000000000..972ab07da6 --- /dev/null +++ b/roles/ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml @@ -0,0 +1,57 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Description: This playbook generates the kustomize file that +# retrieves the appropriate overlay based on +# cifmw_ci_gen_kustomize_values_deployment_version. It allows for +# specifying the deployment version when multiple versions are +# available in the OLM catalog. This is particularly useful for +# testing updates but can also be used to deploy any version from the +# OLM catalog. + +- name: Set the right overlay for the subscriptions + ansible.builtin.set_fact: + _cifmw_update_deployment_version_dir: >- + {{ + cifmw_ci_gen_kustomize_values_deployment_version + if cifmw_ci_gen_kustomize_values_deployment_version in ['v1.0.3', 'v1.0.6'] + else + 'default' + }} + +- name: Point to the right overlay for OLM when deploying old version + vars: + _kustomization: + components: + - "../../../lib/olm-deps" + - "../../../lib/olm-openstack-subscriptions/overlays/{{ _cifmw_update_deployment_version_dir }}" + resources: + - "values.yaml" + ansible.builtin.copy: + content: | + --- + {{ _kustomization | ansible.builtin.to_nice_yaml(indent=2) }} + dest: "{{ cifmw_ci_gen_kustomize_values_architecture_repo }}/examples/common/olm-subscriptions/kustomization.yaml" + mode: "0644" + when: _cifmw_update_deployment_version_dir != 'default' + +# Pass down the new value for `cifmw_kustomize_deploy_olm_source_files` +- name: Change directory for the customization file when deploying old version + ansible.builtin.set_fact: + cifmw_kustomize_deploy_olm_source_files: >- + {{ + cifmw_ci_gen_kustomize_values_architecture_repo + }}/examples/common/olm-subscriptions diff --git a/roles/ci_gen_kustomize_values/templates/common/olm-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/common/olm-values/values.yaml.j2 index 757a5b609e..16c387f7ce 100644 --- a/roles/ci_gen_kustomize_values/templates/common/olm-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/common/olm-values/values.yaml.j2 @@ -4,3 +4,8 @@ data: {% if cifmw_ci_gen_kustomize_values_sub_channel is defined %} openstack-operator-channel: {{ cifmw_ci_gen_kustomize_values_sub_channel }} {% endif %} +{% if cifmw_ci_gen_kustomize_values_deployment_version is defined %} +{% if cifmw_ci_gen_kustomize_values_deployment_version not in ['v1.0.3', 'v1.0.6'] %} + openstack-operator-version: openstack-operator.{{ cifmw_ci_gen_kustomize_values_deployment_version }} +{% endif %} +{% endif %} diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index 37d9c6405c..1af171ebaf 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -14,9 +14,20 @@ # License for the specific language governing permissions and limitations # under the License. +- name: Install subscriptions + ansible.builtin.include_role: + name: ci_gen_kustomize_values + tasks_from: olm_subscriptions_overlay.yml + when: cifmw_ci_gen_kustomize_values_deployment_version is defined + - name: Generate values.yaml for OLM resources vars: - cifmw_architecture_scenario: 'common/olm' + cifmw_architecture_scenario: >- + {{ + 'common/olm' + if cifmw_ci_gen_kustomize_values_deployment_version is not defined + else 'common/olm-subscriptions' + }} cifmw_ci_gen_kustomize_values_src_file: >- {{ ( diff --git a/roles/update/tasks/main.yml b/roles/update/tasks/main.yml index b8d53cf99c..aff643550a 100644 --- a/roles/update/tasks/main.yml +++ b/roles/update/tasks/main.yml @@ -36,6 +36,28 @@ ansible.builtin.shell: | {{ cifmw_update_artifacts_basedir }}/control_plane_test_start.sh +# Get the next available version available when using OLM +- name: Handle the next version when using OLM + when: + - cifmw_ci_gen_kustomize_values_deployment_version is defined + block: + - name: Make sure we get a new version available, block until we do. + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: core.openstack.org/v1beta1 + kind: OpenStackVersion + namespace: "{{ cifmw_update_namespace }}" + register: openstackversion_info + until: openstackversion_info.resources[0].spec.targetVersion != openstackversion_info.resources[0].status.availableVersion + retries: 20 + delay: 15 + + - name: Capture the available version in openstackversion + ansible.builtin.set_fact: + cifmw_update_next_available_version: >- + {{ openstackversion_info.resources[0].status.availableVersion }} - name: Set openstack_update_run Makefile environment variables tags: @@ -43,15 +65,28 @@ ansible.builtin.set_fact: _make_openstack_update_run_params: | TIMEOUT: {{ cifmw_update_openstack_update_run_timeout }} - {% if not cifmw_update_openstack_update_run_operators_updated | bool -%} + {% if _cifmw_update_use_fake_update | bool -%} FAKE_UPDATE: true CONTAINERS_NAMESPACE: {{ cifmw_update_openstack_update_run_containers_namespace }} CONTAINERS_TARGET_TAG: {{ cifmw_update_openstack_update_run_containers_target_tag }} OPENSTACK_VERSION: {{ cifmw_update_openstack_update_run_target_version }} {% else -%} - OPENSTACK_VERSION: {{ cifmw_update_openstack_update_run_target_version }} + OPENSTACK_VERSION: {{ _cifmw_update_openstack_version }} {% endif -%} - + vars: + # When using OLM style of update, or if + # cifmw_update_openstack_update_run_operators_updated is true do + # not use fake update in openstack-update.sh. + _cifmw_update_use_fake_update: >- + {{ + not ( cifmw_ci_gen_kustomize_values_deployment_version is defined ) and + not ( cifmw_update_openstack_update_run_operators_updated | bool ) + }} + _cifmw_update_openstack_version: >- + {{ + cifmw_update_next_available_version | + default(cifmw_update_openstack_update_run_target_version) + }} - name: Run make openstack_update_run vars: From c63e6fbd9f0c73e75e0291e059bb4e1bd29e6f70 Mon Sep 17 00:00:00 2001 From: Sofer Athlan-Guyot Date: Wed, 9 Apr 2025 11:26:27 +0200 Subject: [PATCH 059/480] New OLM parameter for managing deployment of installPlan approval. A new parameter, `cifmw_ci_gen_kustomize_values_installplan_approval`, is introduced for overriding OLM installPlanApproval values in kustomize. It can be set to `Automatic` or `Manual`. When set to `Manual` it has to be used in conjonction with `cifmw_ci_gen_kustomize_values_deployment_version` as it won't work if the the later is not set. It shouldn't be necessary to set this to `Automatic` as it's the default of operator installation. Depends-On: https://github.com/openstack-k8s-operators/ci-framework/pull/2881 Resolves: [OSPRH-15054](https://issues.redhat.com//browse/OSPRH-15054) --- roles/ci_gen_kustomize_values/README.md | 4 +- .../tasks/olm_subscriptions_overlay.yml | 10 +++ .../common/olm-values/values.yaml.j2 | 3 + .../tasks/install_operators.yml | 10 ++- roles/kustomize_deploy/tasks/install_plan.yml | 90 +++++++++++++++++++ roles/update/tasks/main.yml | 8 ++ 6 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 roles/kustomize_deploy/tasks/install_plan.yml diff --git a/roles/ci_gen_kustomize_values/README.md b/roles/ci_gen_kustomize_values/README.md index 9555140b92..72baafe29a 100644 --- a/roles/ci_gen_kustomize_values/README.md +++ b/roles/ci_gen_kustomize_values/README.md @@ -64,7 +64,9 @@ If the following parameter is set, it overrides the associated parameter in `arc * `cifmw_ci_gen_kustomize_values_deployment_version`: (String) The version to be deployed by setting the `startingCSV` of the subscription for the OpenStack operator. Versions `v1.0.3` and `v1.0.6` are unique as they configure the subscription for all operators. The right kustomize overlay is selected by the `ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml` file. -Access the remaining parameters in the `olm-subscription/values.yaml` file and override them with the `cifmw_architecture_user_kustomize_` variable, which should set the `common.olm-values` hash. The earlier version parameter shouldn't be modified using this method, as it won't activate the additional code required for proper functionality. +* `cifmw_ci_gen_kustomize_values_installplan_approval`: (String) Options are `Manual` or `Automatic`. This determines how the OpenStack operator is installed. In `Manual` mode, the install plan requires approval, which is automatically handled in the `kustomize_deploy/tasks/install_operators.yml` task file. + +Access to the other parameters defined in the `olm-subscription/values.yaml` file is doable by overriding them using the `cifmw_architecture_user_kustomize_` variable, which should set the `common.olm-values` hash. However, the last two variables should not be modified using this method, as it won't activate the additional code required for them to function correctly. ## Adding a new template diff --git a/roles/ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml b/roles/ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml index 972ab07da6..6b4d23b8f4 100644 --- a/roles/ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml +++ b/roles/ci_gen_kustomize_values/tasks/olm_subscriptions_overlay.yml @@ -22,6 +22,16 @@ # testing updates but can also be used to deploy any version from the # OLM catalog. +- name: Fail if installplan_approval is defined without deployment_version + ansible.builtin.fail: + msg: > + You cannot have 'cifmw_ci_gen_kustomize_values_installplan_approval' + set to Manual without 'cifmw_ci_gen_kustomize_values_deployment_version' + when: + - cifmw_ci_gen_kustomize_values_installplan_approval is defined + - cifmw_ci_gen_kustomize_values_installplan_approval | lower == 'manual' + - cifmw_ci_gen_kustomize_values_deployment_version is not defined + - name: Set the right overlay for the subscriptions ansible.builtin.set_fact: _cifmw_update_deployment_version_dir: >- diff --git a/roles/ci_gen_kustomize_values/templates/common/olm-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/common/olm-values/values.yaml.j2 index 16c387f7ce..c9d1ac8ef9 100644 --- a/roles/ci_gen_kustomize_values/templates/common/olm-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/common/olm-values/values.yaml.j2 @@ -9,3 +9,6 @@ data: openstack-operator-version: openstack-operator.{{ cifmw_ci_gen_kustomize_values_deployment_version }} {% endif %} {% endif %} +{% if cifmw_ci_gen_kustomize_values_installplan_approval is defined %} + openstack-operator-installplanapproval: {{ cifmw_ci_gen_kustomize_values_installplan_approval }} +{% endif %} diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index 1af171ebaf..28584c6d19 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -18,7 +18,9 @@ ansible.builtin.include_role: name: ci_gen_kustomize_values tasks_from: olm_subscriptions_overlay.yml - when: cifmw_ci_gen_kustomize_values_deployment_version is defined + when: > + cifmw_ci_gen_kustomize_values_deployment_version is defined or + cifmw_ci_gen_kustomize_values_installplan_approval is defined - name: Generate values.yaml for OLM resources vars: @@ -119,6 +121,12 @@ - _cifmw_kustomize_deploy_olm_osp_operator_sub_out.resources | length == 1 - (_cifmw_kustomize_deploy_olm_osp_operator_sub_out.resources | first)['status']['installPlanRef'] is defined + - name: Install plan + ansible.builtin.include_tasks: install_plan.yml + when: + - cifmw_ci_gen_kustomize_values_installplan_approval is defined + - cifmw_ci_gen_kustomize_values_installplan_approval | lower == 'manual' + - name: Wait for the openstack operators InstallPlan to be finished vars: _install_plan: >- diff --git a/roles/kustomize_deploy/tasks/install_plan.yml b/roles/kustomize_deploy/tasks/install_plan.yml new file mode 100644 index 0000000000..2bd19de0dc --- /dev/null +++ b/roles/kustomize_deploy/tasks/install_plan.yml @@ -0,0 +1,90 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Description: +# Set of tasks to accept the latest Manual installPlan provided by OLM. + +- name: Wait for unapproved InstallPlan creation + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: operators.coreos.com/v1alpha1 + kind: InstallPlan + namespace: openstack-operators + register: _cifmw_kustomize_deploy_install_plans + until: > + _cifmw_kustomize_deploy_install_plans.resources | + selectattr('spec.approval', 'equalto', 'Manual') | + selectattr('spec.approved', 'equalto', false) | length > 0 + retries: 30 + delay: 10 + +- name: Get InstallPlan name + ansible.builtin.set_fact: + _cifmw_kustomize_deploy_installplan_name: >- + {{ + (_cifmw_kustomize_deploy_install_plans.resources + | selectattr('spec.approval', 'equalto', 'Manual') + | selectattr('spec.approved', 'equalto', false) + | first) + .metadata.name + }} + +- name: Approve the InstallPlan + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + state: present + namespace: openstack-operators + definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: InstallPlan + metadata: + name: "{{ _cifmw_kustomize_deploy_installplan_name }}" + spec: + approved: true + +- name: Display the status of the installPlan found + ansible.builtin.debug: + msg: "Waiting for InstallPlan {{ _cifmw_kustomize_deploy_installplan_name }}." + +- name: Wait for the InstallPlan to complete + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: operators.coreos.com/v1alpha1 + kind: InstallPlan + namespace: openstack-operators + name: "{{ _cifmw_kustomize_deploy_installplan_name }}" + register: _cifmw_kustomize_deploy_installplan + until: + - _cifmw_kustomize_deploy_installplan.failed is false + - _cifmw_kustomize_deploy_installplan.resources is defined + - _cifmw_kustomize_deploy_installplan.resources | length == 1 + - >- + ( + _cifmw_kustomize_deploy_installplan.resources | first + ).status.phase | lower == 'complete' + retries: "{{ cifmw_kustomize_deploy_retries_install_plan }}" + delay: "{{ cifmw_kustomize_deploy_delay }}" + +- name: Display the status of the installPlan found + ansible.builtin.debug: + msg: > + InstallPlan {{ _cifmw_kustomize_deploy_installplan_name }} deployed. diff --git a/roles/update/tasks/main.yml b/roles/update/tasks/main.yml index aff643550a..800aab5209 100644 --- a/roles/update/tasks/main.yml +++ b/roles/update/tasks/main.yml @@ -36,6 +36,14 @@ ansible.builtin.shell: | {{ cifmw_update_artifacts_basedir }}/control_plane_test_start.sh +- name: Install plan + ansible.builtin.include_role: + name: kustomize_deploy + tasks_from: install_plan.yml + when: + - cifmw_ci_gen_kustomize_values_installplan_approval is defined + - cifmw_ci_gen_kustomize_values_installplan_approval | lower == 'manual' + # Get the next available version available when using OLM - name: Handle the next version when using OLM when: From dd268ed634cce92c0a1a9bbbd5cf25d7b0597cf1 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 5 Mar 2025 18:19:51 +0530 Subject: [PATCH 060/480] Mask YAML files --- scripts/crawl_n_mask.py | 275 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 275 insertions(+) create mode 100755 scripts/crawl_n_mask.py diff --git a/scripts/crawl_n_mask.py b/scripts/crawl_n_mask.py new file mode 100755 index 0000000000..a269a38a1a --- /dev/null +++ b/scripts/crawl_n_mask.py @@ -0,0 +1,275 @@ +#!/usr/bin/env python3 + +# core logic borrowed from https://github.com/openstack-k8s-operators/openstack-must-gather/blob/main/pyscripts/mask.py +# and modified to match our use-case +import argparse +import os +import re +import yaml +import sys +from typing import Dict, Optional, Any, Union + +# files which are yaml but do not end with .yaml or .yml +ALLOWED_YAML_FILES = [ + "Standalone", +] +# dirs which we do not want to scan +EXCLUDED_DIRS = [ + "openstack-k8s-operators-openstack-must-gather", + "tmp", + "venv", +] +# file extensions which we do not want to process +EXCLUDED_FILE_EXT = [ + ".py", + ".html", + ".DS_Store", + ".tar.gz", + ".zip", + ".j2", +] +# keys in files whose values need to be masked +PROTECT_KEYS = [ + "literals", + "PASSWORD", + "Password", + "password", + "_pwd", + "_PWD", + "Token", + "Secret", + "secret", + "SECRET", + "Authkey", + "authkey", + "private_key", + "privatekey", + "Passphrase", + "passphrase", + "PASSPHRASE", + "encryption_key", + "ENCRYPTION_KEY", + "HeatAuthEncryptionKey", + "oc_login_command", + "METADATA_SHARED_SECRET", + "KEYSTONE_FEDERATION_CLIENT_SECRET", + "rabbit", + "database_connection", + "slave_connection", + "sql_connection", + "cifmw_openshift_login_password", + "cifmw_openshift_login_token", + "BarbicanSimpleCryptoKEK", + "OctaviaHeartbeatKey", + "server-ca-passphrase", + "KeystoneFernetKeys", + "KeystoneFernetKey", + "KeystoneCredential", + "DesignateRndcKey", + "CephRgwKey", + "CephClusterFSID", + "CephClientKey", + "BarbicanSimpleCryptoKek", + "HashSuffix", + "RabbitCookie", + "erlang_cookie", + "ClientKey", + "swift_store_key", + "secret_key", + "heartbeat_key", + "fernet_keys", +] +# connection keys which may be part of the value itself +CONNECTION_KEYS = [ + "rabbit", + "database_connection", + "slave_connection", + "sql_connection", +] +# Masking string +MASK_STR = "**********" + +# general and connection regexes are used to match the pattern that should  ̰be +# applied to both Protect keys and connection keys, which is the same thing +# done in SoS reports +gen_regex = r"(\w*(%s)\s*=\s*)(.*)" % "|".join(PROTECT_KEYS) +con_regex = r"((%s)\s*://)(\w*):(.*)(@(.*))" % "|".join(CONNECTION_KEYS) + +# regex of excluded file extensions +excluded_file_ext_regex = r"(^.*(%s).*)" % "|".join(EXCLUDED_FILE_EXT) + +# regex of keys which will be checked against every key +# as in yaml files, we have data in format = +# if a key is sensitive, it will be found using this regex +key_regex = r"(%s)\d*$" % "|".join(PROTECT_KEYS) +regexes = [gen_regex, con_regex] + + +class SecretMask: + + def __init__(self, path: Optional[Any] = None) -> None: + self.path: Union[Any, None] = path + + def mask(self) -> None: + """ + Method responsible to begin masking on a provided + log file. It checks for file type, and calls + respective masking methods for that file. + """ + if ( + self.path.endswith((tuple(["yaml", "yml"]))) + or os.path.basename(self.path).split(".")[0] in ALLOWED_YAML_FILES + ): + self._mask_yaml() + + def _process_list(self, lst: list) -> None: + for item in lst: + if isinstance(item, dict): + self._apply_mask(item) + elif isinstance(item, list): + self._process_list(item) + + def _apply_regex(self, value: str) -> str: + """ + For each string value passed as argument, try + to match the pattern according to the provided + regexes and mask any potential sensitive info. + """ + for pattern in regexes: + value = re.sub(pattern, r"\1{}".format(MASK_STR), value, flags=re.I) + return value + + def _apply_mask(self, yaml_dict: Dict[str, Any]) -> None: + """ + Check and mask value if key of dict matches + with key_regex, else perform action on data + type of value. Call _process_list if value + is of type list, call _apply_regex for strings, + recursively call _apply_mask in case value is + of type dict. + """ + for k, v in yaml_dict.items(): + if re.findall(key_regex, k): + yaml_dict[k] = MASK_STR + + elif isinstance(v, str): + yaml_dict[k] = self._apply_regex(v) + + elif isinstance(v, list): + self._process_list(v) + + elif isinstance(v, dict): + self._apply_mask(v) + + def _mask_yaml(self) -> None: + """ + Method to handle masking of yaml files. + Begin with reading yaml and storing in + list (check _read_yaml for return type + info), then process the list to mask + secrets, and then write the encoded + data back. + """ + yaml_list = self._read_yaml() + + if not yaml_list: + return + # we are directly calling _process_list as + # yaml.safe_load_all returns an Iterator of + # dictionaries which we have converted into + # a list (return type of _read_yaml) + self._process_list(yaml_list) + + self._write_yaml(yaml_list) + + def _read_yaml(self) -> Optional[Union[list, None]]: + """ + Read and Load the yaml file for + processing. Using yaml.safe_load_all + to handle all documents within a + single yaml file stream. Return + type (Iterator) is parsed to list + to make in-place change easy. + """ + try: + assert self.path is not None + with open(self.path, "r") as f: + return list(yaml.safe_load_all(f)) + except (FileNotFoundError, yaml.YAMLError) as e: + print(f"Error while reading YAML: {e}") + # sys.exit(-1) + return None + + def _write_yaml(self, encoded_secret: Any) -> None: + """ + Re-write the processed yaml file in + the same path. + Writing will occur only if there are + changes to the content. + """ + try: + assert self.path is not None + if self._read_yaml() != encoded_secret: + with open(self.path, "w") as f: + yaml.safe_dump_all(encoded_secret, f, default_flow_style=False) + except (IOError, yaml.YAMLError) as e: + print(f"Error while writing the masked file: {e}") + + +def crawl(path) -> None: + """ + Crawler function which will crawl through the log directory + and find eligible files for masking. + """ + for root, _, files in os.walk(path, onerror=handle_error): + if any(excluded in root for excluded in EXCLUDED_DIRS): + continue + + for f in files: + if re.search(excluded_file_ext_regex, f) is None: + SecretMask(os.path.join(root, f)).mask() + + +def handle_error(e): + print(f"Error processing file {e}") + + +def parse_opts(argv: list[str]) -> argparse.Namespace: + """ + Utility for the main function: it provides a way to parse + options and return the arguments. + """ + parser = argparse.ArgumentParser(description="Parameters") + parser.add_argument( + "-p", + "--path", + metavar="PATH", + help="Path of the file where the masking \ + should be applied", + ) + parser.add_argument( + "-d", + "--dir", + metavar="DIR_PATH", + help="Path of the directory where the masking \ + should be applied", + ) + opts = parser.parse_args(argv[1:]) + return opts + + +if __name__ == "__main__": + # parse the provided options + OPTS = parse_opts(sys.argv) + + if OPTS.dir is not None and os.path.exists(OPTS.dir): + # craw through the provided directly and then + # process eligible files individually + crawl(OPTS.dir) + + if ( + OPTS.path is not None + and os.path.exists(OPTS.path) + and re.search(excluded_file_ext_regex, OPTS.path) is None + ): + SecretMask(OPTS.path).mask() From e9fc7194139ca350f761f1afc00b037205ec78d3 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 11 Mar 2025 15:48:23 +0530 Subject: [PATCH 061/480] Mask YAML files --- scripts/crawl_n_mask.py | 1 - scripts/tests/samples/nochange.yaml | 7 ++++ scripts/tests/samples/secret1.yaml | 44 +++++++++++++++++++++ scripts/tests/samples/secret2.yaml | 44 +++++++++++++++++++++ scripts/tests/test_crawl_n_mask.py | 61 +++++++++++++++++++++++++++++ 5 files changed, 156 insertions(+), 1 deletion(-) create mode 100644 scripts/tests/samples/nochange.yaml create mode 100644 scripts/tests/samples/secret1.yaml create mode 100644 scripts/tests/samples/secret2.yaml create mode 100644 scripts/tests/test_crawl_n_mask.py diff --git a/scripts/crawl_n_mask.py b/scripts/crawl_n_mask.py index a269a38a1a..51e07d247a 100755 --- a/scripts/crawl_n_mask.py +++ b/scripts/crawl_n_mask.py @@ -197,7 +197,6 @@ def _read_yaml(self) -> Optional[Union[list, None]]: return list(yaml.safe_load_all(f)) except (FileNotFoundError, yaml.YAMLError) as e: print(f"Error while reading YAML: {e}") - # sys.exit(-1) return None def _write_yaml(self, encoded_secret: Any) -> None: diff --git a/scripts/tests/samples/nochange.yaml b/scripts/tests/samples/nochange.yaml new file mode 100644 index 0000000000..fbc158ace3 --- /dev/null +++ b/scripts/tests/samples/nochange.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openstack + labels: + pod-security.kubernetes.io/enforce: privileged + security.openshift.io/scc.podSecurityLabelSync: "false" diff --git a/scripts/tests/samples/secret1.yaml b/scripts/tests/samples/secret1.yaml new file mode 100644 index 0000000000..b393444740 --- /dev/null +++ b/scripts/tests/samples/secret1.yaml @@ -0,0 +1,44 @@ +parameter_defaults: + AdminPassword: dummyvalue + AdminToken: dummyvalue + AodhPassword: dummyvalue + HeatStackDomainAdminPassword: dummyvalue + HorizonSecret: dummyvalue + IronicPassword: dummyvalue + KeystoneCredential0: testpass + KeystoneCredential1: testpass + KeystoneFernetKey0: testpass + KeystoneFernetKey1: testpass + MigrationSshKey: + private_key: '-----BEGIN RSA PRIVATE KEY----- + + dummy value + + -----END RSA PRIVATE KEY----- + + ' + public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCj7vO8eBgY5uoU4SheQENoon6NegupSYeQ5OZwFYM9alTMXXapk4Qq1aZbkEsKfNwqL8ZdURUAxHO94SktFf8/OwoI0aNQur7kHWP2x8fEvhE6FtC5xKsOU+jasn8zooTnAhFEv9MLG5HTzPhSZkdMcAtrMjtas3e1kBWhfkVJVIUrQbjelksf1E7l1wADCYxErcwpuSsgaKxv/3M2kDoW1TF4Z1Deb7eY4q87rgpcCMWQ4PihWAhfHpGSZ+GbsSA0KOG1agQIvsqUidFwiaJWsGAyB+WooYa/znhPYNdVQcLoNa7ajHDeWqB6aRCYMVRkbieoMmhNRvJsfpe7JxVx + Generated by TripleO + MysqlClustercheckPassword: dummyvalue + MysqlMariabackupPassword: dummyvalue + MysqlRootPassword: dummyvalue + NeutronMetadataProxySharedSecret: dummyvalue + NeutronPassword: dummyvalue + NotifyPassword: dummyvalue + NovaPassword: dummyvalue + NovajoinPassword: dummyvalue + OctaviaCaKeyPassphrase: dummyvalue + OctaviaHeartbeatKey: dummyvalue + OctaviaPassword: dummyvalue + OctaviaServerCertsKeyPassphrase: dummyvalue + PankoPassword: dummyvalue + PcsdPassword: dummyvalue + PlacementPassword: dummyvalue + RabbitCookie: dummyvalue + RabbitPassword: dummyvalue + RedisPassword: dummyvalue + RpcPassword: dummyvalue + SaharaPassword: dummyvalue + SnmpdReadonlyUserPassword: dummyvalue + SwiftHashSuffix: dummyvalue + SwiftPassword: dummyvalue diff --git a/scripts/tests/samples/secret2.yaml b/scripts/tests/samples/secret2.yaml new file mode 100644 index 0000000000..c336fe3767 --- /dev/null +++ b/scripts/tests/samples/secret2.yaml @@ -0,0 +1,44 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +labels: + - pairs: + created-by: install_yamls +secretGenerator: +- name: osp-secret + literals: + - AdminPassword=12345678 + - AodhPassword=12345678 + - BarbicanPassword=12345678 + - BarbicanSimpleCryptoKEK=mypass + - CeilometerPassword=12345678 + - DbRootPassword=12345678 + - DatabasePassword=12345678 + - DesignatePassword=12345678 + - PlacementPassword=12345678 + - GlancePassword=12345678 + - NeutronPassword=12345678 + - CinderPassword=12345678 + - IronicPassword=12345678 + - IronicInspectorPassword=12345678 + - KeystoneClientSecret=mysecret + - KeystoneCryptoPassphrase=dummyvalue + - OctaviaPassword=12345678 + - OctaviaHeartbeatKey=12345678 + - NovaPassword=12345678 + - ManilaPassword=12345678 + - MetadataSecret=1234567842 + - HeatPassword=12345678 + - HeatAuthEncryptionKey=mykey + - HeatStackDomainAdminPassword=12345678 + - SwiftPassword=12345678 +- name: libvirt-secret + literals: + - LibvirtPassword=12345678 +- name: octavia-ca-passphrase + literals: + - server-ca-passphrase=12345678 +generatorOptions: + disableNameSuffixHash: true + labels: + type: osp-secret diff --git a/scripts/tests/test_crawl_n_mask.py b/scripts/tests/test_crawl_n_mask.py new file mode 100644 index 0000000000..586bd05659 --- /dev/null +++ b/scripts/tests/test_crawl_n_mask.py @@ -0,0 +1,61 @@ +#!/usr/bin/python +import copy +import unittest +import os +import yaml +from typing import Optional, Union +from scripts.crawl_n_mask import SecretMask + +# sample directory used to load yaml files +SAMPLE_DIR = "samples" + + +class TestSecretMask(unittest.TestCase): + """ + The class that implements basic tests for + SecretMask. + """ + + def _read_yaml_sample(self, path) -> list: + """ + utility function to load a sample yaml file. + """ + with open(path, "r") as f: + return list(yaml.safe_load_all(f)) + + def test_mask_yaml(self): + """ + For each file present in the tests/samples we: + - Load the file by reading the yaml definition + - Process using the SecreMask module + - assert the content of the secret is + different + """ + for root, _, files in os.walk(SAMPLE_DIR): + for f in files: + actual = self._read_yaml_sample(os.path.join(root, f)) + expected = copy.deepcopy(actual) + # Mask secret by processing the content + # of the yaml file we got + SecretMask(os.path.join(root, f))._process_list(expected) + + """ + files are named secret{1, 2, 3, ... N}: for these secrets + we expect a change in their content because sensitive + data has been masked; the sample file named "nochange.yaml", + instead, is the one that does not contain any sensitive data, + hence no masking is applied and we expect the original data + content being the same as the processed one. + """ + if "nochange" in f: + # the content in nochange.yaml should + # not change after processing it + self.assertEqual(actual, expected) + else: + # The content (secret values) should be + # different + self.assertNotEqual(actual, expected) + + +if __name__ == "__main__": + unittest.main() From 7238ead46e9b863d9cdc156136a10728de8c7230 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 11 Mar 2025 15:48:23 +0530 Subject: [PATCH 062/480] Mask YAML files --- plugins/modules/crawl_n_mask.py | 359 ++++++++++++++++++++++++++++ scripts/crawl_n_mask.py | 274 --------------------- scripts/tests/samples/nochange.yaml | 7 - scripts/tests/samples/secret1.yaml | 44 ---- scripts/tests/samples/secret2.yaml | 44 ---- scripts/tests/test_crawl_n_mask.py | 61 ----- 6 files changed, 359 insertions(+), 430 deletions(-) create mode 100755 plugins/modules/crawl_n_mask.py delete mode 100755 scripts/crawl_n_mask.py delete mode 100644 scripts/tests/samples/nochange.yaml delete mode 100644 scripts/tests/samples/secret1.yaml delete mode 100644 scripts/tests/samples/secret2.yaml delete mode 100644 scripts/tests/test_crawl_n_mask.py diff --git a/plugins/modules/crawl_n_mask.py b/plugins/modules/crawl_n_mask.py new file mode 100755 index 0000000000..c11619c1bf --- /dev/null +++ b/plugins/modules/crawl_n_mask.py @@ -0,0 +1,359 @@ +#!/usr/bin/python + +# Copyright Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# core logic borrowed from https://github.com/openstack-k8s-operators/openstack-must-gather/blob/main/pyscripts/mask.py +# and modified to a module according to our requirement +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: crawl_n_mask + +short_description: This module mask secrets in yaml files/dirs + +version_added: "1.0.0" + +description: + - This module crawls over a directory (default) and find yaml files which may have secrets in it, and proceeds with masking it. + - If you pass a yaml file, it will directly check and mask secret in it. + - If you pass a directory, it will crawl the directory and find eligible files to mask. + +options: + path: + description: + - This is the target file/dir you want to mask. + required: true + type: path + isdir: + description: + - Tells if the path is dir or not. + - Supported options are True and False. + - Set value to False if path is file, else True. + - Defaults to False. + required: false + default: False + type: bool + +author: + - Amartya Sinha (@amartyasinha) +""" + +EXAMPLES = r""" +- name: Mask secrets in all yaml files within /home/zuul/logs + crawl_n_mask: + path: /home/zuul/logs + isdir: True + +- name: Mask my_secrets.yaml + crawl_n_mask: + path: /home/zuul/logs/my_secrets.yaml +""" + +RETURN = r""" +success: + description: Status of the execution + type: bool + returned: always + sample: true +""" + +import os +import re +import yaml +from typing import Dict, Optional, Any, Union +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +# files which are yaml but do not end with .yaml or .yml +ALLOWED_YAML_FILES = [ + "Standalone", +] +# dirs which we do not want to scan +EXCLUDED_DIRS = [ + "openstack-k8s-operators-openstack-must-gather", + "tmp", + "venv", +] +# file extensions which we do not want to process +EXCLUDED_FILE_EXT = [ + ".py", + ".html", + ".DS_Store", + ".tar.gz", + ".zip", + ".j2", +] +# keys in files whose values need to be masked +PROTECT_KEYS = [ + "literals", + "PASSWORD", + "Password", + "password", + "_pwd", + "_PWD", + "Token", + "Secret", + "secret", + "SECRET", + "Authkey", + "authkey", + "private_key", + "privatekey", + "Passphrase", + "passphrase", + "PASSPHRASE", + "encryption_key", + "ENCRYPTION_KEY", + "HeatAuthEncryptionKey", + "oc_login_command", + "METADATA_SHARED_SECRET", + "KEYSTONE_FEDERATION_CLIENT_SECRET", + "rabbit", + "database_connection", + "slave_connection", + "sql_connection", + "cifmw_openshift_login_password", + "cifmw_openshift_login_token", + "BarbicanSimpleCryptoKEK", + "OctaviaHeartbeatKey", + "server-ca-passphrase", + "KeystoneFernetKeys", + "KeystoneFernetKey", + "KeystoneCredential", + "DesignateRndcKey", + "CephRgwKey", + "CephClusterFSID", + "CephClientKey", + "BarbicanSimpleCryptoKek", + "HashSuffix", + "RabbitCookie", + "erlang_cookie", + "ClientKey", + "swift_store_key", + "secret_key", + "heartbeat_key", + "fernet_keys", +] +# connection keys which may be part of the value itself +CONNECTION_KEYS = [ + "rabbit", + "database_connection", + "slave_connection", + "sql_connection", +] +# Masking string +MASK_STR = "**********" + +# general and connection regexes are used to match the pattern that should  ̰be +# applied to both Protect keys and connection keys, which is the same thing +# done in SoS reports +gen_regex = r"(\w*(%s)\s*=\s*)(.*)" % "|".join(PROTECT_KEYS) +con_regex = r"((%s)\s*://)(\w*):(.*)(@(.*))" % "|".join(CONNECTION_KEYS) + +# regex of excluded file extensions +excluded_file_ext_regex = r"(^.*(%s).*)" % "|".join(EXCLUDED_FILE_EXT) + +# regex of keys which will be checked against every key +# as in yaml files, we have data in format = +# if a key is sensitive, it will be found using this regex +key_regex = r"(%s)\d*$" % "|".join(PROTECT_KEYS) +regexes = [gen_regex, con_regex] + + +def handle_walk_errors(e): + raise e + + +def crawl(module, path) -> bool: + """ + Crawler function which will crawl through the log directory + and find eligible files for masking. + """ + changed = False + for root, _, files in os.walk(path, onerror=handle_walk_errors): + if any(excluded in root for excluded in EXCLUDED_DIRS): + continue + + for f in files: + if re.search(excluded_file_ext_regex, f) is None: + file_changed = mask(module, os.path.join(root, f)) + # even if one file is masked, the final result will be True + if file_changed: + changed = True + return changed + + +def mask(module, path: str) -> bool: + """ + Method responsible to begin masking on a provided + log file. It checks for file type, and calls + respective masking methods for that file. + """ + changed = False + if ( + path.endswith((tuple(["yaml", "yml"]))) + or os.path.basename(path).split(".")[0] in ALLOWED_YAML_FILES + ): + changed = mask_yaml(module, path) + return changed + + +def process_list(lst: list) -> None: + for item in lst: + if isinstance(item, dict): + apply_mask(item) + elif isinstance(item, list): + process_list(item) + + +def apply_regex(value: str) -> str: + """ + For each string value passed as argument, try + to match the pattern according to the provided + regexes and mask any potential sensitive info. + """ + for pattern in regexes: + value = re.sub(pattern, r"\1{}".format(MASK_STR), value, flags=re.I) + return value + + +def apply_mask(yaml_dict: Dict[str, Any]) -> None: + """ + Check and mask value if key of dict matches + with key_regex, else perform action on data + type of value. Call _process_list if value + is of type list, call _apply_regex for strings, + recursively call _apply_mask in case value is + of type dict. + """ + for k, v in yaml_dict.items(): + if re.findall(key_regex, k): + yaml_dict[k] = MASK_STR + + elif isinstance(v, str): + yaml_dict[k] = apply_regex(v) + + elif isinstance(v, list): + process_list(v) + + elif isinstance(v, dict): + apply_mask(v) + + +def mask_yaml(module, path) -> bool: + """ + Method to handle masking of yaml files. + Begin with reading yaml and storing in + list (check _read_yaml for return type + info), then process the list to mask + secrets, and then write the encoded + data back. + """ + yaml_content = read_yaml(module, path) + changed = False + if not yaml_content: + return changed + # we are directly calling _process_list as + # yaml.safe_load_all returns an Iterator of + # dictionaries which we have converted into + # a list (return type of _read_yaml) + process_list(yaml_content) + + changed = write_yaml(module, path, yaml_content) + return changed + + +def read_yaml(module, file_path: str) -> Optional[Union[list, None]]: + """ + Read and Load the yaml file for + processing. Using yaml.safe_load_all + to handle all documents within a + single yaml file stream. Return + type (Iterator) is parsed to list + to make in-place change easy. + """ + try: + assert file_path is not None + with open(file_path, "r") as f: + return list(yaml.safe_load_all(f)) + except (FileNotFoundError, yaml.YAMLError) as e: + module.warn("Error opening file: %s" % e) + return None + + +def write_yaml(module, path, encoded_secret: Any) -> bool: + """ + Re-write the processed yaml file in + the same path. + Writing will occur only if there are + changes to the content. + """ + changed = False + try: + assert path is not None + if read_yaml(module, path) != encoded_secret: + with open(path, "w") as f: + yaml.safe_dump_all(encoded_secret, f, default_flow_style=False) + changed = True + except (IOError, yaml.YAMLError) as e: + module.fail_json( + msg=f"Error writing to file: {to_native(e, nonstring='simplerepr')}", + path=path, + ) + return changed + + +def run_module(): + # define available arguments/parameters a user can pass to the module + module_args = dict( + path=dict(type="path", required=True), isdir=dict(type="bool", default=False) + ) + + # seed the result dict in the object + # we primarily care about changed and state + # changed is if this module effectively modified the target + # state will include any data that you want your module to pass back + # for consumption, for example, in a subsequent task + changed = False + result = dict(changed=changed) + + # the AnsibleModule object will be our abstraction working with Ansible + # this includes instantiation, a couple of common attr would be the + # args/params passed to the execution, as well as if the module + # supports check mode + module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) + + params = module.params + path = params["path"] + isdir = params["isdir"] + + # if the user is working with this module in only check mode we do not + # want to make any changes to the environment, just return the current + # state with no modifications + if module.check_mode: + module.exit_json(**result) + + if isdir and os.path.exists(path): + # craw through the provided directly and then + # process eligible files individually + changed = crawl(module, path) + + if ( + not isdir + and os.path.exists(path) + and re.search(excluded_file_ext_regex, path) is None + ): + changed = mask(module, path) + + result.update(changed=changed) + # in the event of a successful module execution, you will want to + # simple AnsibleModule.exit_json(), passing the key/value results + module.exit_json(**result) + + +if __name__ == "__main__": + run_module() diff --git a/scripts/crawl_n_mask.py b/scripts/crawl_n_mask.py deleted file mode 100755 index 51e07d247a..0000000000 --- a/scripts/crawl_n_mask.py +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/env python3 - -# core logic borrowed from https://github.com/openstack-k8s-operators/openstack-must-gather/blob/main/pyscripts/mask.py -# and modified to match our use-case -import argparse -import os -import re -import yaml -import sys -from typing import Dict, Optional, Any, Union - -# files which are yaml but do not end with .yaml or .yml -ALLOWED_YAML_FILES = [ - "Standalone", -] -# dirs which we do not want to scan -EXCLUDED_DIRS = [ - "openstack-k8s-operators-openstack-must-gather", - "tmp", - "venv", -] -# file extensions which we do not want to process -EXCLUDED_FILE_EXT = [ - ".py", - ".html", - ".DS_Store", - ".tar.gz", - ".zip", - ".j2", -] -# keys in files whose values need to be masked -PROTECT_KEYS = [ - "literals", - "PASSWORD", - "Password", - "password", - "_pwd", - "_PWD", - "Token", - "Secret", - "secret", - "SECRET", - "Authkey", - "authkey", - "private_key", - "privatekey", - "Passphrase", - "passphrase", - "PASSPHRASE", - "encryption_key", - "ENCRYPTION_KEY", - "HeatAuthEncryptionKey", - "oc_login_command", - "METADATA_SHARED_SECRET", - "KEYSTONE_FEDERATION_CLIENT_SECRET", - "rabbit", - "database_connection", - "slave_connection", - "sql_connection", - "cifmw_openshift_login_password", - "cifmw_openshift_login_token", - "BarbicanSimpleCryptoKEK", - "OctaviaHeartbeatKey", - "server-ca-passphrase", - "KeystoneFernetKeys", - "KeystoneFernetKey", - "KeystoneCredential", - "DesignateRndcKey", - "CephRgwKey", - "CephClusterFSID", - "CephClientKey", - "BarbicanSimpleCryptoKek", - "HashSuffix", - "RabbitCookie", - "erlang_cookie", - "ClientKey", - "swift_store_key", - "secret_key", - "heartbeat_key", - "fernet_keys", -] -# connection keys which may be part of the value itself -CONNECTION_KEYS = [ - "rabbit", - "database_connection", - "slave_connection", - "sql_connection", -] -# Masking string -MASK_STR = "**********" - -# general and connection regexes are used to match the pattern that should  ̰be -# applied to both Protect keys and connection keys, which is the same thing -# done in SoS reports -gen_regex = r"(\w*(%s)\s*=\s*)(.*)" % "|".join(PROTECT_KEYS) -con_regex = r"((%s)\s*://)(\w*):(.*)(@(.*))" % "|".join(CONNECTION_KEYS) - -# regex of excluded file extensions -excluded_file_ext_regex = r"(^.*(%s).*)" % "|".join(EXCLUDED_FILE_EXT) - -# regex of keys which will be checked against every key -# as in yaml files, we have data in format = -# if a key is sensitive, it will be found using this regex -key_regex = r"(%s)\d*$" % "|".join(PROTECT_KEYS) -regexes = [gen_regex, con_regex] - - -class SecretMask: - - def __init__(self, path: Optional[Any] = None) -> None: - self.path: Union[Any, None] = path - - def mask(self) -> None: - """ - Method responsible to begin masking on a provided - log file. It checks for file type, and calls - respective masking methods for that file. - """ - if ( - self.path.endswith((tuple(["yaml", "yml"]))) - or os.path.basename(self.path).split(".")[0] in ALLOWED_YAML_FILES - ): - self._mask_yaml() - - def _process_list(self, lst: list) -> None: - for item in lst: - if isinstance(item, dict): - self._apply_mask(item) - elif isinstance(item, list): - self._process_list(item) - - def _apply_regex(self, value: str) -> str: - """ - For each string value passed as argument, try - to match the pattern according to the provided - regexes and mask any potential sensitive info. - """ - for pattern in regexes: - value = re.sub(pattern, r"\1{}".format(MASK_STR), value, flags=re.I) - return value - - def _apply_mask(self, yaml_dict: Dict[str, Any]) -> None: - """ - Check and mask value if key of dict matches - with key_regex, else perform action on data - type of value. Call _process_list if value - is of type list, call _apply_regex for strings, - recursively call _apply_mask in case value is - of type dict. - """ - for k, v in yaml_dict.items(): - if re.findall(key_regex, k): - yaml_dict[k] = MASK_STR - - elif isinstance(v, str): - yaml_dict[k] = self._apply_regex(v) - - elif isinstance(v, list): - self._process_list(v) - - elif isinstance(v, dict): - self._apply_mask(v) - - def _mask_yaml(self) -> None: - """ - Method to handle masking of yaml files. - Begin with reading yaml and storing in - list (check _read_yaml for return type - info), then process the list to mask - secrets, and then write the encoded - data back. - """ - yaml_list = self._read_yaml() - - if not yaml_list: - return - # we are directly calling _process_list as - # yaml.safe_load_all returns an Iterator of - # dictionaries which we have converted into - # a list (return type of _read_yaml) - self._process_list(yaml_list) - - self._write_yaml(yaml_list) - - def _read_yaml(self) -> Optional[Union[list, None]]: - """ - Read and Load the yaml file for - processing. Using yaml.safe_load_all - to handle all documents within a - single yaml file stream. Return - type (Iterator) is parsed to list - to make in-place change easy. - """ - try: - assert self.path is not None - with open(self.path, "r") as f: - return list(yaml.safe_load_all(f)) - except (FileNotFoundError, yaml.YAMLError) as e: - print(f"Error while reading YAML: {e}") - return None - - def _write_yaml(self, encoded_secret: Any) -> None: - """ - Re-write the processed yaml file in - the same path. - Writing will occur only if there are - changes to the content. - """ - try: - assert self.path is not None - if self._read_yaml() != encoded_secret: - with open(self.path, "w") as f: - yaml.safe_dump_all(encoded_secret, f, default_flow_style=False) - except (IOError, yaml.YAMLError) as e: - print(f"Error while writing the masked file: {e}") - - -def crawl(path) -> None: - """ - Crawler function which will crawl through the log directory - and find eligible files for masking. - """ - for root, _, files in os.walk(path, onerror=handle_error): - if any(excluded in root for excluded in EXCLUDED_DIRS): - continue - - for f in files: - if re.search(excluded_file_ext_regex, f) is None: - SecretMask(os.path.join(root, f)).mask() - - -def handle_error(e): - print(f"Error processing file {e}") - - -def parse_opts(argv: list[str]) -> argparse.Namespace: - """ - Utility for the main function: it provides a way to parse - options and return the arguments. - """ - parser = argparse.ArgumentParser(description="Parameters") - parser.add_argument( - "-p", - "--path", - metavar="PATH", - help="Path of the file where the masking \ - should be applied", - ) - parser.add_argument( - "-d", - "--dir", - metavar="DIR_PATH", - help="Path of the directory where the masking \ - should be applied", - ) - opts = parser.parse_args(argv[1:]) - return opts - - -if __name__ == "__main__": - # parse the provided options - OPTS = parse_opts(sys.argv) - - if OPTS.dir is not None and os.path.exists(OPTS.dir): - # craw through the provided directly and then - # process eligible files individually - crawl(OPTS.dir) - - if ( - OPTS.path is not None - and os.path.exists(OPTS.path) - and re.search(excluded_file_ext_regex, OPTS.path) is None - ): - SecretMask(OPTS.path).mask() diff --git a/scripts/tests/samples/nochange.yaml b/scripts/tests/samples/nochange.yaml deleted file mode 100644 index fbc158ace3..0000000000 --- a/scripts/tests/samples/nochange.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: openstack - labels: - pod-security.kubernetes.io/enforce: privileged - security.openshift.io/scc.podSecurityLabelSync: "false" diff --git a/scripts/tests/samples/secret1.yaml b/scripts/tests/samples/secret1.yaml deleted file mode 100644 index b393444740..0000000000 --- a/scripts/tests/samples/secret1.yaml +++ /dev/null @@ -1,44 +0,0 @@ -parameter_defaults: - AdminPassword: dummyvalue - AdminToken: dummyvalue - AodhPassword: dummyvalue - HeatStackDomainAdminPassword: dummyvalue - HorizonSecret: dummyvalue - IronicPassword: dummyvalue - KeystoneCredential0: testpass - KeystoneCredential1: testpass - KeystoneFernetKey0: testpass - KeystoneFernetKey1: testpass - MigrationSshKey: - private_key: '-----BEGIN RSA PRIVATE KEY----- - - dummy value - - -----END RSA PRIVATE KEY----- - - ' - public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCj7vO8eBgY5uoU4SheQENoon6NegupSYeQ5OZwFYM9alTMXXapk4Qq1aZbkEsKfNwqL8ZdURUAxHO94SktFf8/OwoI0aNQur7kHWP2x8fEvhE6FtC5xKsOU+jasn8zooTnAhFEv9MLG5HTzPhSZkdMcAtrMjtas3e1kBWhfkVJVIUrQbjelksf1E7l1wADCYxErcwpuSsgaKxv/3M2kDoW1TF4Z1Deb7eY4q87rgpcCMWQ4PihWAhfHpGSZ+GbsSA0KOG1agQIvsqUidFwiaJWsGAyB+WooYa/znhPYNdVQcLoNa7ajHDeWqB6aRCYMVRkbieoMmhNRvJsfpe7JxVx - Generated by TripleO - MysqlClustercheckPassword: dummyvalue - MysqlMariabackupPassword: dummyvalue - MysqlRootPassword: dummyvalue - NeutronMetadataProxySharedSecret: dummyvalue - NeutronPassword: dummyvalue - NotifyPassword: dummyvalue - NovaPassword: dummyvalue - NovajoinPassword: dummyvalue - OctaviaCaKeyPassphrase: dummyvalue - OctaviaHeartbeatKey: dummyvalue - OctaviaPassword: dummyvalue - OctaviaServerCertsKeyPassphrase: dummyvalue - PankoPassword: dummyvalue - PcsdPassword: dummyvalue - PlacementPassword: dummyvalue - RabbitCookie: dummyvalue - RabbitPassword: dummyvalue - RedisPassword: dummyvalue - RpcPassword: dummyvalue - SaharaPassword: dummyvalue - SnmpdReadonlyUserPassword: dummyvalue - SwiftHashSuffix: dummyvalue - SwiftPassword: dummyvalue diff --git a/scripts/tests/samples/secret2.yaml b/scripts/tests/samples/secret2.yaml deleted file mode 100644 index c336fe3767..0000000000 --- a/scripts/tests/samples/secret2.yaml +++ /dev/null @@ -1,44 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: -labels: - - pairs: - created-by: install_yamls -secretGenerator: -- name: osp-secret - literals: - - AdminPassword=12345678 - - AodhPassword=12345678 - - BarbicanPassword=12345678 - - BarbicanSimpleCryptoKEK=mypass - - CeilometerPassword=12345678 - - DbRootPassword=12345678 - - DatabasePassword=12345678 - - DesignatePassword=12345678 - - PlacementPassword=12345678 - - GlancePassword=12345678 - - NeutronPassword=12345678 - - CinderPassword=12345678 - - IronicPassword=12345678 - - IronicInspectorPassword=12345678 - - KeystoneClientSecret=mysecret - - KeystoneCryptoPassphrase=dummyvalue - - OctaviaPassword=12345678 - - OctaviaHeartbeatKey=12345678 - - NovaPassword=12345678 - - ManilaPassword=12345678 - - MetadataSecret=1234567842 - - HeatPassword=12345678 - - HeatAuthEncryptionKey=mykey - - HeatStackDomainAdminPassword=12345678 - - SwiftPassword=12345678 -- name: libvirt-secret - literals: - - LibvirtPassword=12345678 -- name: octavia-ca-passphrase - literals: - - server-ca-passphrase=12345678 -generatorOptions: - disableNameSuffixHash: true - labels: - type: osp-secret diff --git a/scripts/tests/test_crawl_n_mask.py b/scripts/tests/test_crawl_n_mask.py deleted file mode 100644 index 586bd05659..0000000000 --- a/scripts/tests/test_crawl_n_mask.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/python -import copy -import unittest -import os -import yaml -from typing import Optional, Union -from scripts.crawl_n_mask import SecretMask - -# sample directory used to load yaml files -SAMPLE_DIR = "samples" - - -class TestSecretMask(unittest.TestCase): - """ - The class that implements basic tests for - SecretMask. - """ - - def _read_yaml_sample(self, path) -> list: - """ - utility function to load a sample yaml file. - """ - with open(path, "r") as f: - return list(yaml.safe_load_all(f)) - - def test_mask_yaml(self): - """ - For each file present in the tests/samples we: - - Load the file by reading the yaml definition - - Process using the SecreMask module - - assert the content of the secret is - different - """ - for root, _, files in os.walk(SAMPLE_DIR): - for f in files: - actual = self._read_yaml_sample(os.path.join(root, f)) - expected = copy.deepcopy(actual) - # Mask secret by processing the content - # of the yaml file we got - SecretMask(os.path.join(root, f))._process_list(expected) - - """ - files are named secret{1, 2, 3, ... N}: for these secrets - we expect a change in their content because sensitive - data has been masked; the sample file named "nochange.yaml", - instead, is the one that does not contain any sensitive data, - hence no masking is applied and we expect the original data - content being the same as the processed one. - """ - if "nochange" in f: - # the content in nochange.yaml should - # not change after processing it - self.assertEqual(actual, expected) - else: - # The content (secret values) should be - # different - self.assertNotEqual(actual, expected) - - -if __name__ == "__main__": - unittest.main() From dcd027039a670515c084c6424d465ea5a37d9db0 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Fri, 28 Mar 2025 11:58:31 +0530 Subject: [PATCH 063/480] Add Mask Secret Task --- plugins/modules/crawl_n_mask.py | 30 ++++++++++++++++++++++-------- roles/artifacts/tasks/main.yml | 8 ++++++++ 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/plugins/modules/crawl_n_mask.py b/plugins/modules/crawl_n_mask.py index c11619c1bf..696ae82798 100755 --- a/plugins/modules/crawl_n_mask.py +++ b/plugins/modules/crawl_n_mask.py @@ -179,7 +179,7 @@ def crawl(module, path) -> bool: continue for f in files: - if re.search(excluded_file_ext_regex, f) is None: + if not re.search(excluded_file_ext_regex, f): file_changed = mask(module, os.path.join(root, f)) # even if one file is masked, the final result will be True if file_changed: @@ -203,6 +203,17 @@ def mask(module, path: str) -> bool: def process_list(lst: list) -> None: + """ + For each list we get in our yaml dict, + this method will check the type of item. + If the item in list is dict, it will call + apply_mask method to process it, else if + we get nested list, process_list will be + recursively called. + We are not checking for string as secrets + are mainly in form : in dict, + not in list as item. + """ for item in lst: if isinstance(item, dict): apply_mask(item) @@ -282,7 +293,7 @@ def read_yaml(module, file_path: str) -> Optional[Union[list, None]]: return list(yaml.safe_load_all(f)) except (FileNotFoundError, yaml.YAMLError) as e: module.warn("Error opening file: %s" % e) - return None + return def write_yaml(module, path, encoded_secret: Any) -> bool: @@ -331,22 +342,25 @@ def run_module(): path = params["path"] isdir = params["isdir"] + # validate if the path exists and no wrong value of isdir and path is + # provided + if not os.path.exists(path): + module.fail_json(msg=f"Provided path doesn't exist", path=path) + if os.path.isdir(path) != isdir: + module.fail_json(msg=f"Value of isdir/path is incorrect. Please check it") + # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current # state with no modifications if module.check_mode: module.exit_json(**result) - if isdir and os.path.exists(path): + if isdir: # craw through the provided directly and then # process eligible files individually changed = crawl(module, path) - if ( - not isdir - and os.path.exists(path) - and re.search(excluded_file_ext_regex, path) is None - ): + if not isdir and not re.search(excluded_file_ext_regex, path): changed = mask(module, path) result.update(changed=changed) diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index 7519b41063..c16d4a633f 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -87,3 +87,11 @@ find {{ cifmw_artifacts_basedir }}/logs -type d -exec chmod 0755 '{}' \; find {{ cifmw_artifacts_basedir }}/artifacts -type f -exec chmod 0644 '{}' \; find {{ cifmw_artifacts_basedir }}/artifacts -type d -exec chmod 0755 '{}' \; + +- name: Mask secrets in yaml log files + crawl_n_mask: + path: "{{ item }}" + isdir: true + loop: + - "{{ cifmw_artifacts_basedir }}/logs" + - "{{ cifmw_artifacts_basedir }}/artifacts" From 80b86240bb73c073585b56a92e728d94efdc4ca0 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 16 Apr 2025 12:48:50 +0530 Subject: [PATCH 064/480] Add test cases for mask_n_crawl module --- plugins/modules/crawl_n_mask.py | 2 +- tests/unit/modules/test_crawl_n_mask.py | 80 +++++++++++++++++++++++++ 2 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 tests/unit/modules/test_crawl_n_mask.py diff --git a/plugins/modules/crawl_n_mask.py b/plugins/modules/crawl_n_mask.py index 696ae82798..01dc57d7ec 100755 --- a/plugins/modules/crawl_n_mask.py +++ b/plugins/modules/crawl_n_mask.py @@ -175,7 +175,7 @@ def crawl(module, path) -> bool: """ changed = False for root, _, files in os.walk(path, onerror=handle_walk_errors): - if any(excluded in root for excluded in EXCLUDED_DIRS): + if any(excluded in root.split("/") for excluded in EXCLUDED_DIRS): continue for f in files: diff --git a/tests/unit/modules/test_crawl_n_mask.py b/tests/unit/modules/test_crawl_n_mask.py new file mode 100644 index 0000000000..79c3aca210 --- /dev/null +++ b/tests/unit/modules/test_crawl_n_mask.py @@ -0,0 +1,80 @@ +import pytest +from unittest.mock import patch, MagicMock, mock_open + +from plugins.modules import crawl_n_mask as cnm + + +class TestCrawlNMask: + + @pytest.mark.parametrize( + "test_dir, expected_files", + [ + ("/test", [("/test", [], ["file.yaml"])]), + ("/controller", [("/controller", [], ["another.yaml"])]), + ], + ) + def test_crawl_true(self, test_dir, expected_files): + with patch("os.walk") as mock_walk, patch( + "plugins.modules.crawl_n_mask.mask" + ) as mock_mask: + mock_walk.return_value = expected_files + mock_mask.return_value = True + module = MagicMock() + changed = cnm.crawl(module, test_dir) + assert changed + + @pytest.mark.parametrize( + "test_dir, expected_files", + [ + ("/tmp", [("/tmp", [], ["ignore.yaml"])]), + ("/controller", [("/controller", [], ["notyaml.log"])]), + ("venv", [("venv", [], ["should_be_skipped.yaml"])]), + ("crc", [("crc", [], ["skip_me_venv.yaml"])]), + ], + ) + def test_crawl_false(self, test_dir, expected_files): + with patch("os.walk") as mock_walk, patch( + "plugins.modules.crawl_n_mask.mask" + ) as mock_mask: + mock_walk.return_value = expected_files + mock_mask.return_value = False + module = MagicMock() + changed = cnm.crawl(module, test_dir) + assert not changed + + @patch("builtins.open", new_callable=mock_open, read_data="key: value") + @patch("yaml.safe_load_all") + def test_read_yaml_success(self, mock_load, mock_open_file): + mock_load.return_value = [{"key": "value"}] + module = MagicMock() + result = cnm.read_yaml(module, "/fake/file.yaml") + assert result == [{"key": "value"}] + + def test_apply_regex(self): + value = "password=supersecret" + masked = cnm.apply_regex(value) + assert cnm.MASK_STR in masked + + def test_apply_regex_no_match(self): + value = "normal=stuff" + result = cnm.apply_regex(value) + assert result == value + + def test_process_list(self): + data = [{"password": "secret"}, ["nested", {"token": "value"}]] + cnm.process_list(data) + assert data[0]["password"] == cnm.MASK_STR + + def test_apply_mask(self): + data = {"password": "secret", "normal": "data"} + cnm.apply_mask(data) + assert data["password"] == cnm.MASK_STR + + @patch("plugins.modules.crawl_n_mask.read_yaml") + @patch("plugins.modules.crawl_n_mask.write_yaml") + def test_mask_yaml(self, mock_write, mock_read): + mock_read.return_value = [{"password": "secret"}] + mock_write.return_value = True + module = MagicMock() + changed = cnm.mask_yaml(module, "/fake/file.yaml") + assert changed From ac69f5ea2639916ec37dc2a6407ffbca96ed0552 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 22 Apr 2025 08:42:11 +0530 Subject: [PATCH 065/480] Set `ignore_errors` to true for crawl_n_mask We do not want to fail the entire job if our crawl_n_mask module faces any error. --- roles/artifacts/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index c16d4a633f..1c4cae62ec 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -89,6 +89,7 @@ find {{ cifmw_artifacts_basedir }}/artifacts -type d -exec chmod 0755 '{}' \; - name: Mask secrets in yaml log files + ignore_errors: true # noqa: ignore-errors crawl_n_mask: path: "{{ item }}" isdir: true From 784b2807043b384c22f3fcce8df5aac3a5539973 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 22 Apr 2025 17:05:15 +0530 Subject: [PATCH 066/480] Timeout crawl_n_mask task if it's taking too long. We do not want to keep hanged for hours. --- roles/artifacts/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index 1c4cae62ec..d34474677a 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -90,6 +90,7 @@ - name: Mask secrets in yaml log files ignore_errors: true # noqa: ignore-errors + timeout: 3600 crawl_n_mask: path: "{{ item }}" isdir: true From 342f5e29c3f36e3e8381c8c1d50b83eac05eadb1 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 22 Apr 2025 17:05:34 +0530 Subject: [PATCH 067/480] Add more protect keys and excluded dirs --- plugins/modules/crawl_n_mask.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/modules/crawl_n_mask.py b/plugins/modules/crawl_n_mask.py index 01dc57d7ec..36229161c2 100755 --- a/plugins/modules/crawl_n_mask.py +++ b/plugins/modules/crawl_n_mask.py @@ -77,6 +77,7 @@ "openstack-k8s-operators-openstack-must-gather", "tmp", "venv", + ".github", ] # file extensions which we do not want to process EXCLUDED_FILE_EXT = [ @@ -137,6 +138,8 @@ "secret_key", "heartbeat_key", "fernet_keys", + "sshkey", + "keytab_base64", ] # connection keys which may be part of the value itself CONNECTION_KEYS = [ From 4b4fc4900afefb57e70ad22a5e282e1c1214d4f6 Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Mon, 21 Apr 2025 09:30:12 +0200 Subject: [PATCH 068/480] Increase ControlPersist timeout to 300 seconds This ControlPersist socket periodically closing seems to be the cause of intermittent MODULE FAULIRE, happening sometimes in downstream CI. Increase the timeout to make this less likely to happen. See https://github.com/ansible/ansible/issues/78344 OSPRH-15947 --- ansible.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible.cfg b/ansible.cfg index 3719059b61..e9a4e4181c 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -16,4 +16,4 @@ inventory = inventory.yml pipelining = True any_errors_fatal = True [ssh_connection] -ssh_args = -o ControlMaster=auto -o ControlPersist=60s +ssh_args = -o ControlMaster=auto -o ControlPersist=300 From 1dcf732aff87c08a084073283f8ff5c324528004 Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Mon, 14 Apr 2025 09:47:37 +0200 Subject: [PATCH 069/480] [kustomize_deploy] use recursive=True to combine user kustomize values Currently ci-fmw supports defining user kustomize values for a certain architecture step referring to that step with stage_ or . If those values are provided using the both available methods, they should be combined using recursive=True. OSPRH-15775 --- roles/kustomize_deploy/tasks/execute_step.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/kustomize_deploy/tasks/execute_step.yml b/roles/kustomize_deploy/tasks/execute_step.yml index 251dbea744..bfa89a3488 100644 --- a/roles/kustomize_deploy/tasks/execute_step.yml +++ b/roles/kustomize_deploy/tasks/execute_step.yml @@ -147,7 +147,8 @@ ) | combine( _cifmw_kustomize_deploy_user_kustomize[_stage_name][_name] is defined | - ternary(_cifmw_kustomize_deploy_user_kustomize[_stage_name][_name], {}) + ternary(_cifmw_kustomize_deploy_user_kustomize[_stage_name][_name], {}), + recursive=True ) }} cifmw_ci_gen_kustomize_values_userdata_b64: >- From 242c9468bcf8e91243a6458c5ed89500a97de2c4 Mon Sep 17 00:00:00 2001 From: Ella Shulman Date: Mon, 21 Apr 2025 10:52:03 +0000 Subject: [PATCH 070/480] Clean deployment resources as well as operators Allowed cleaning up deployment resources as well as operators. --- clean_openstack_deployment.yaml | 10 +++++ roles/kustomize_deploy/tasks/cleanup.yml | 48 +++++++++++++++++++++--- 2 files changed, 53 insertions(+), 5 deletions(-) create mode 100644 clean_openstack_deployment.yaml diff --git a/clean_openstack_deployment.yaml b/clean_openstack_deployment.yaml new file mode 100644 index 0000000000..e70aacad66 --- /dev/null +++ b/clean_openstack_deployment.yaml @@ -0,0 +1,10 @@ +- name: Clean OpenStack deployment + hosts: "{{ target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Clean up OpenStack operators + vars: + cifmw_kustomize_deploy_keep_generated_crs: false + ansible.builtin.include_role: + name: kustomize_deploy + tasks_from: cleanup diff --git a/roles/kustomize_deploy/tasks/cleanup.yml b/roles/kustomize_deploy/tasks/cleanup.yml index adb7d43c63..0b3d6b4320 100644 --- a/roles/kustomize_deploy/tasks/cleanup.yml +++ b/roles/kustomize_deploy/tasks/cleanup.yml @@ -14,14 +14,52 @@ # License for the specific language governing permissions and limitations # under the License. +- name: Load architecture automation file + register: _automation + ansible.builtin.slurp: + path: "{{ cifmw_architecture_automation_file }}" + +- name: Prepare automation data + vars: + _parsed: "{{ _automation.content | b64decode | from_yaml }}" + ansible.builtin.set_fact: + cifmw_deploy_architecture_steps: >- + {{ _parsed['vas'][cifmw_architecture_scenario] }} + +- name: Generate list of CRs to delete + vars: + _stages_crs: >- + {{ + cifmw_deploy_architecture_steps['stages'] | + reverse | + selectattr('build_output', 'defined') | + map(attribute='build_output') | + list + }} + _stages_crs_path: >- + {{ + [cifmw_kustomize_deploy_kustomizations_dest_dir] + | product(_stages_crs) + | map('join', '/') + | unique + }} + _operators_crs: + - "{{ cifmw_kustomize_deploy_nmstate_dest_file }}" + - "{{ cifmw_kustomize_deploy_metallb_dest_file }}" + - "{{ cifmw_kustomize_deploy_kustomizations_dest_dir }}/openstack.yaml" + - "{{ cifmw_kustomize_deploy_olm_dest_file }}" + register: _cifmw_kustomize_files + ansible.builtin.set_fact: + cifmw_kustomize_deploy_crs_to_delete: >- + {{ + _stages_crs_path + + _operators_crs + }} + - name: Ensure that kustomization files are present ansible.builtin.stat: path: "{{ item }}" - loop: - - "{{ cifmw_kustomize_deploy_cp_dest_file }}" - - "{{ cifmw_kustomize_deploy_nmstate_dest_file }}" - - "{{ cifmw_kustomize_deploy_metallb_dest_file }}" - - "{{ cifmw_kustomize_deploy_olm_dest_file }}" + loop: "{{ cifmw_kustomize_deploy_crs_to_delete }}" register: _cifmw_kustomize_files - name: Cleaning operators resources From 643b4a5a7d20fcc1ab1db9571bfdd92cccd805cb Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 18 Apr 2025 14:07:08 +0200 Subject: [PATCH 071/480] Fix get cri-o stats script The script was not providing stats properly, because the oc binary is located in different place. Signed-off-by: Daniel Pawlik --- scripts/get-stats.sh | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/get-stats.sh b/scripts/get-stats.sh index ae8195a625..51bb597377 100755 --- a/scripts/get-stats.sh +++ b/scripts/get-stats.sh @@ -21,12 +21,24 @@ set -x DURATION_TIME=${DURATION_TIME:-10} -NODE_NAMES=$(/usr/local/bin/oc get node -o name -l node-role.kubernetes.io/worker) +if ! command -v oc; then + PATH=$PATH:/home/zuul/bin +fi + +if ! [ -f "$HOME/.kube/config" ]; then + if [ -f "/home/zuul/.crc/machines/crc/kubeconfig" ]; then + export KUBECONFIG=/home/zuul/.crc/machines/crc/kubeconfig + elif [ -f "/home/zuul/.kube/config" ]; then + export KUBECONFIG=/home/zuul/.kube/config + fi +fi + +NODE_NAMES=$(oc get node -o name -l node-role.kubernetes.io/worker) if [ -z "$NODE_NAMES" ]; then echo "Unable to determine node name with 'oc' command." exit 1 fi for node in $NODE_NAMES; do - /usr/local/bin/oc debug $node -T -- chroot /host /usr/bin/bash -c "crictl stats -a -s $DURATION_TIME | (sed -u 1q; sort -k 2 -h -r)" + oc debug "$node" -T -- chroot /host /usr/bin/bash -c "crictl stats -a -s $DURATION_TIME | (sed -u 1q; sort -k 2 -h -r)" done From adaa8a98bc0a42633b82998a33c876fbb9721fc8 Mon Sep 17 00:00:00 2001 From: Dariusz Smigiel Date: Fri, 18 Apr 2025 13:36:50 -0700 Subject: [PATCH 072/480] Removed variant of cifmw-molecule-tofu job Multiple job definitions with the same name are called variants. These may have different selection criteria which indicate to Zuul that, for instance, the job should behave differently on a different git branch. In this case, we have the same jobs, doing almost the same thing. Removed duplicated job definition. --- ci/config/molecule.yaml | 5 +++++ ci/templates/molecule.yaml.j2 | 2 ++ zuul.d/molecule.yaml | 2 ++ zuul.d/tofu.yaml | 12 ------------ 4 files changed, 9 insertions(+), 12 deletions(-) delete mode 100644 zuul.d/tofu.yaml diff --git a/ci/config/molecule.yaml b/ci/config/molecule.yaml index 67821bec97..6030afeb10 100644 --- a/ci/config/molecule.yaml +++ b/ci/config/molecule.yaml @@ -98,3 +98,8 @@ - job: name: cifmw-molecule-shiftstack nodeset: centos-9-crc-2-48-0-xl-ibm +- job: + name: cifmw-molecule-tofu + nodeset: centos-9-crc-2-48-0-xl + files: + - ^ci_framework/playbooks/run_tofu.yml diff --git a/ci/templates/molecule.yaml.j2 b/ci/templates/molecule.yaml.j2 index c70ca7a9e9..551032c8ab 100644 --- a/ci/templates/molecule.yaml.j2 +++ b/ci/templates/molecule.yaml.j2 @@ -1,3 +1,5 @@ +# Don't modify this file. +# If you need apply custom molecule changes, please edit ci/config/molecule.yaml {% set want_list = ['defaults', 'files', 'handlers', 'library', 'lookup_plugins', 'module_utils', 'molecule', 'tasks', 'templates', 'vars'] -%} diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 0b82ef1470..04b2d43469 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -789,7 +789,9 @@ - ^roles/tofu/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* - ^ci/playbooks/molecule.* - ^.config/molecule/.* + - ^ci_framework/playbooks/run_tofu.yml name: cifmw-molecule-tofu + nodeset: centos-9-crc-2-48-0-xl parent: cifmw-molecule-base vars: TEST_RUN: tofu diff --git a/zuul.d/tofu.yaml b/zuul.d/tofu.yaml deleted file mode 100644 index a96151fab3..0000000000 --- a/zuul.d/tofu.yaml +++ /dev/null @@ -1,12 +0,0 @@ -- job: - files: - - ^common-requirements.txt - - ^test-requirements.txt - - ^roles/tofu/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^ci/playbooks/molecule.* - - ^ci_framework/playbooks/run_tofu.yml - name: cifmw-molecule-tofu - nodeset: centos-9-crc-2-48-0-xl - parent: cifmw-molecule-base - vars: - TEST_RUN: tofu From 481fc90c6cc87bfbc4a16f3d9473bc9f383e75b4 Mon Sep 17 00:00:00 2001 From: Dariusz Smigiel Date: Mon, 21 Apr 2025 13:13:27 -0700 Subject: [PATCH 073/480] Use python from user's environment To allow for virtualenv usage, force the script to search for python set in an environment variable. By avoiding hard-coding, we don't need to modify system-wide requirements. --- scripts/create_role_molecule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/create_role_molecule.py b/scripts/create_role_molecule.py index ebe5657026..c03bf0f072 100755 --- a/scripts/create_role_molecule.py +++ b/scripts/create_role_molecule.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 # Copyright Red Hat, Inc. # All Rights Reserved. From e055fc3f9acd04d71d703b2fffd19f7bd10bab1e Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 23 Apr 2025 16:06:49 +0530 Subject: [PATCH 074/480] Ensure findall receives string During apply_mask, there is a possibility that our key in yaml_dict is of other data type (bool or int). In such cases, re.findall will fail as it expects string. Due to this, key is typecast to str to avoid this runtime error --- plugins/modules/crawl_n_mask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/crawl_n_mask.py b/plugins/modules/crawl_n_mask.py index 36229161c2..74c5619405 100755 --- a/plugins/modules/crawl_n_mask.py +++ b/plugins/modules/crawl_n_mask.py @@ -245,7 +245,7 @@ def apply_mask(yaml_dict: Dict[str, Any]) -> None: of type dict. """ for k, v in yaml_dict.items(): - if re.findall(key_regex, k): + if re.findall(key_regex, str(k)): yaml_dict[k] = MASK_STR elif isinstance(v, str): From 060c392a35ba49c064943034e332e777b99660c1 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 23 Apr 2025 16:10:35 +0530 Subject: [PATCH 075/480] Add testcase for boolean key Since we are now handling boolean keys too, added a data with boolean key for testing --- tests/unit/modules/test_crawl_n_mask.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_crawl_n_mask.py b/tests/unit/modules/test_crawl_n_mask.py index 79c3aca210..3c5320d859 100644 --- a/tests/unit/modules/test_crawl_n_mask.py +++ b/tests/unit/modules/test_crawl_n_mask.py @@ -61,9 +61,10 @@ def test_apply_regex_no_match(self): assert result == value def test_process_list(self): - data = [{"password": "secret"}, ["nested", {"token": "value"}]] + data = [{"password": "secret"}, ["nested", {"token": "value"}], {True: "test_bool_key"}] cnm.process_list(data) assert data[0]["password"] == cnm.MASK_STR + assert data[2][True] == "test_bool_key" def test_apply_mask(self): data = {"password": "secret", "normal": "data"} From da5de2a49ea2f380e58ce4608831f10a2626201b Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 23 Apr 2025 16:22:42 +0530 Subject: [PATCH 076/480] Add testcase for other data type key Since we are now handling other data type keys too, added data's with int and float key to process_list --- tests/unit/modules/test_crawl_n_mask.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tests/unit/modules/test_crawl_n_mask.py b/tests/unit/modules/test_crawl_n_mask.py index 3c5320d859..e3eb65fea0 100644 --- a/tests/unit/modules/test_crawl_n_mask.py +++ b/tests/unit/modules/test_crawl_n_mask.py @@ -1,3 +1,5 @@ +from xmlrpc.client import Fault + import pytest from unittest.mock import patch, MagicMock, mock_open @@ -60,11 +62,22 @@ def test_apply_regex_no_match(self): result = cnm.apply_regex(value) assert result == value - def test_process_list(self): - data = [{"password": "secret"}, ["nested", {"token": "value"}], {True: "test_bool_key"}] + @pytest.mark.parametrize( + "data, ismasked", + [ + ([{"password": "secret"}], True), + ([{"secret": "value"}], True), + ([{True: "test_bool_key"}], False), + ([{1: "int_key"}], False), + ([{1.1: "float_key"}], False), + ], + ) + def test_process_list(self, data, ismasked): cnm.process_list(data) - assert data[0]["password"] == cnm.MASK_STR - assert data[2][True] == "test_bool_key" + if ismasked: + assert cnm.MASK_STR in [list(item.values())[0] for item in data] + else: + assert cnm.MASK_STR not in [list(item.values())[0] for item in data] def test_apply_mask(self): data = {"password": "secret", "normal": "data"} From 4e73b883d54e81bb1a1049d2fa1e885be6da39a3 Mon Sep 17 00:00:00 2001 From: mkatari Date: Tue, 8 Apr 2025 12:25:04 +0530 Subject: [PATCH 077/480] use ceph ingress service vars Using vars will help in overriding the spec as per the requirement during adoptoin. Also count:1 will create issues as ingress handles both haproxy and keepalived --- roles/cifmw_cephadm/defaults/main.yml | 2 ++ roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 | 6 ++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/cifmw_cephadm/defaults/main.yml b/roles/cifmw_cephadm/defaults/main.yml index 5ebfb9f971..5cace63145 100644 --- a/roles/cifmw_cephadm/defaults/main.yml +++ b/roles/cifmw_cephadm/defaults/main.yml @@ -149,3 +149,5 @@ cifmw_cephadm_version: "squid" cifmw_cephadm_prepare_host: false cifmw_cephadm_wait_install_retries: 8 cifmw_cephadm_wait_install_delay: 15 +cifmw_cephadm_rgw_ingress_service_name: "ingress.rgw.default" +cifmw_cephadm_rgw_ingress_service_id: "rgw.default" diff --git a/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 b/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 index 0c0b2f52c4..d0eb12547e 100644 --- a/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 +++ b/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 @@ -21,10 +21,8 @@ spec: --- {% if _hosts|length > 1 %} service_type: ingress - service_id: rgw.default - service_name: ingress.rgw.default - placement: - count: 1 + service_id: {{ cifmw_cephadm_rgw_ingress_service_id }} + service_name: {{ cifmw_cephadm_rgw_ingress_service_name }} spec: backend_service: rgw.rgw frontend_port: 8080 From 6aa39bf38e471e455142dceaf780b0bc950c7efc Mon Sep 17 00:00:00 2001 From: Ricardo Diaz Date: Mon, 27 Jan 2025 19:14:33 +0100 Subject: [PATCH 078/480] [reproducers][nfv] Add Networker node to deployment --- .../tasks/edpm_compute_nodeset_values.yml | 1 + .../tasks/edpm_networker_nodeset_values.yml | 1 + .../edpm-common-nodeset-values/values.yaml.j2 | 59 +++++++++ .../values.yaml.j2 | 4 + .../values.yaml.j2 | 4 + .../dt-nfv-ovs-dpdk-sriov-networker.yml | 117 ++++++++++++++++++ 6 files changed, 186 insertions(+) create mode 120000 roles/ci_gen_kustomize_values/tasks/edpm_compute_nodeset_values.yml create mode 120000 roles/ci_gen_kustomize_values/tasks/edpm_networker_nodeset_values.yml create mode 100644 roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-common-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-compute-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-networker-nodeset-values/values.yaml.j2 create mode 100644 scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-networker.yml diff --git a/roles/ci_gen_kustomize_values/tasks/edpm_compute_nodeset_values.yml b/roles/ci_gen_kustomize_values/tasks/edpm_compute_nodeset_values.yml new file mode 120000 index 0000000000..042fdb35f3 --- /dev/null +++ b/roles/ci_gen_kustomize_values/tasks/edpm_compute_nodeset_values.yml @@ -0,0 +1 @@ +edpm_nodeset_values.yml \ No newline at end of file diff --git a/roles/ci_gen_kustomize_values/tasks/edpm_networker_nodeset_values.yml b/roles/ci_gen_kustomize_values/tasks/edpm_networker_nodeset_values.yml new file mode 120000 index 0000000000..042fdb35f3 --- /dev/null +++ b/roles/ci_gen_kustomize_values/tasks/edpm_networker_nodeset_values.yml @@ -0,0 +1 @@ +edpm_nodeset_values.yml \ No newline at end of file diff --git a/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-common-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-common-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..9961a6fe55 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-common-nodeset-values/values.yaml.j2 @@ -0,0 +1,59 @@ +# source: ovs-dpdk-sriov-networker/edpm-common-nodeset-values/values.yaml.j2 +{% set instance_names = [] %} +{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_services = _original_nodeset['services'] | default([]) %} +{% if cifmw_baremetal_hosts | default([]) | length > 0 %} +{% for _inst in cifmw_baremetal_hosts.keys() %} +{% if (('label' in cifmw_baremetal_hosts[_inst]) and + (cifmw_baremetal_hosts[_inst]['label'] == 'openstack-' ~ node_type)) %} +{% set _ = instance_names.append(_inst) %} +{% endif %} +{% endfor %} +{% else %} +# Needed for verification gate +{% set _vm_type = (_original_nodes.keys() | first).split('-')[1] %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith(_vm_type) %} +{% set _ = instance_names.append(_inst) %} +{% endif %} +{% endfor %} +{% endif %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} +{% if node_type == 'compute' %} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} +{% endif %} + nodeset: + ansible: + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" +{% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} + edpm_sshd_allowed_ranges: +{% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} + - "{{ range }}" +{% endfor %} +{% endif %} + nodes: +{% for instance in instance_names %} + edpm-{{ instance }}: + hostName: {{ instance }} +{% endfor %} + +{% if ('repo-setup' not in (_original_nodeset['services'] | default([]))) and + ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} + services: + - "repo-setup" +{% for svc in _original_services %} + - "{{ svc }}" +{% endfor %} +{% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-compute-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-compute-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..1dc6c360cd --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-compute-nodeset-values/values.yaml.j2 @@ -0,0 +1,4 @@ +--- +# source: ovs-dpdk-sriov-networker/edpm-compute-nodeset-values/values.yaml.j2 +{% set node_type = "compute" %} +{% include 'templates/ovs-dpdk-sriov-networker/edpm-common-nodeset-values/values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-networker-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-networker-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..7bed362d53 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/ovs-dpdk-sriov-networker/edpm-networker-nodeset-values/values.yaml.j2 @@ -0,0 +1,4 @@ +--- +# source: ovs-dpdk-sriov-networker/edpm-networker-nodeset-values/values.yaml.j2 +{% set node_type = "networker" %} +{% include 'templates/ovs-dpdk-sriov-networker/edpm-common-nodeset-values/values.yaml.j2' %} diff --git a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-networker.yml b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-networker.yml new file mode 100644 index 0000000000..5c12324eb4 --- /dev/null +++ b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-networker.yml @@ -0,0 +1,117 @@ +--- +cifmw_architecture_scenario: "ovs-dpdk-sriov-networker" + +# Automation section. Most of those parameters will be passed to the +# controller-0 as-is and be consumed by the `deploy-va.sh` script. +# Please note, all paths are on the controller-0, meaning managed by the +# Framework. Please do not edit them! +_arch_repo: "{{ cifmw_architecture_repo | default('/home/zuul/src/github.com/openstack-k8s-operators/architecture') }}" + +# HERE if you want to override kustomization, you can uncomment this parameter +# and push the data structure you want to apply. +# cifmw_architecture_user_kustomize: +# stage_0: +# 'network-values': +# data: +# starwars: Obiwan + +# HERE, if you want to stop the deployment loop at any stage, you can uncomment +# the following parameter and update the value to match the stage you want to +# reach. Known stages are: +# pre_kustomize_stage_INDEX +# pre_apply_stage_INDEX +# post_apply_stage_INDEX +# +# cifmw_deploy_architecture_stopper: + +cifmw_libvirt_manager_net_prefix_add: false +cifmw_libvirt_manager_fixed_networks: + - ocpbm + - ocppr + - osp_external + - osp_trunk + +cifmw_libvirt_manager_configuration: + networks: + ocpbm: | + + ocpbm + + + + ocppr: | + + ocppr + + + + osp_external: | + + osp_external + + + + osp_trunk: | + + osp_trunk + + + + vms: + controller: + uefi: "{{ cifmw_use_uefi }}" + root_part_id: "{{ cifmw_root_partition_id }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - osp_trunk + ocp: + amount: 3 + uefi: true + root_part_id: 4 + admin_user: core + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "ocp_master" + disksize: "100" + extra_disks_num: 3 + extra_disks_size: "50G" + cpus: 10 + memory: 32 + nets: + - ocppr + - ocpbm + - osp_trunk + - osp_external + +# Note: with that extra_network_names "osp_trunk", we instruct +# devscripts role to create a new network, and associate it to +# the OCP nodes. This one is a "private network", and will hold +# the VLANs used for network isolation. + +# Please create a custom env file to provide: +# cifmw_devscripts_ci_token: +# cifmw_devscripts_pull_secret: + +# Baremetal host configuration +cifmw_config_bmh: true + +# BMH are deployed in a differnt NS than the secret OSP BMO +# references in each BMH. Metal3 requires the referenced +# secrets to be in the same NS or be allowed to access them +cifmw_openshift_setup_metal3_watch_all_ns: true + +# Use EDPM image for computes +cifmw_update_containers_edpm_image_url: "{{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/edpm-hardened-uefi:{{ cifmw_update_containers_tag }}" + +# Set Logical Volume Manager Storage by default for local storage +cifmw_use_lvms: true +cifmw_lvms_disk_list: + - /dev/vda + - /dev/vdb + - /dev/vdc From 92fd73773ca97c81521441c92426a01a3f798ca7 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 11 Apr 2025 13:27:51 +0200 Subject: [PATCH 079/480] Change hardcoded eth0 value in multinode-customizations play The hardcoded value for controller default network interface is making problems on local deployment of kuttl job. With that patch, few steps would be less to execute. Signed-off-by: Daniel Pawlik --- ci/playbooks/multinode-customizations.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ci/playbooks/multinode-customizations.yml b/ci/playbooks/multinode-customizations.yml index 72f318d830..25932e4974 100644 --- a/ci/playbooks/multinode-customizations.yml +++ b/ci/playbooks/multinode-customizations.yml @@ -283,7 +283,9 @@ - name: Get the default iface connection register: controller_default_connection_out ansible.builtin.command: - cmd: "nmcli -g general.connection device show eth0" + cmd: >- + nmcli -g general.connection + device show {{ cifmw_controller_interface_name | default('eth0') }} - name: Prepend CRC DNS server in the controllers default Network Manager connection configuation vars: From b2e186d8207ce417cfcf88a4b6e362f60b2ac148 Mon Sep 17 00:00:00 2001 From: jamepark4 Date: Thu, 8 Aug 2024 15:12:36 -0400 Subject: [PATCH 080/480] Add insecure registries to DT/VA style deployments --- roles/openshift_setup/tasks/main.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/roles/openshift_setup/tasks/main.yml b/roles/openshift_setup/tasks/main.yml index 8ee2ff3592..0d4fc9159c 100644 --- a/roles/openshift_setup/tasks/main.yml +++ b/roles/openshift_setup/tasks/main.yml @@ -168,6 +168,33 @@ additionalTrustedCA: name: "registry-cas" +- name: Add insecure registry + when: cifmw_update_containers_registry is defined + vars: + default_allowed_registries: + - "quay.io" + - "gcr.io" + - "registry.redhat.io" + - "registry-proxy.engineering.redhat.com" + - "images.paas.redhat.com" + - "image-registry.openshift-image-registry.svc:5000" + all_registries: "{{ [cifmw_update_containers_registry] + default_allowed_registries | unique }}" + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + merge_type: "merge" + definition: + apiVersion: config.openshift.io/v1 + kind: Image + metadata: + name: cluster + spec: + registrySources: + insecureRegistries: + - "{{ cifmw_update_containers_registry }}" + allowedRegistries: "{{ all_registries }}" + - name: Create a ICSP with repository digest mirrors when: - cifmw_openshift_setup_digest_mirrors is defined From 052cac686d7b3eb561dbf0b6e5710468c9e0a115 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Tue, 28 Jan 2025 09:39:52 +0100 Subject: [PATCH 081/480] Remove rgw default zone and realm We don't test RGW multisite in OpenStack. From squid+ a default realm is not deployed anymore, resulting in RGW deployment failures. This patch removes both default realm and zone from the RGW spec. Signed-off-by: Francesco Pantano --- roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 | 2 -- 1 file changed, 2 deletions(-) diff --git a/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 b/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 index d0eb12547e..76ba5ee265 100644 --- a/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 +++ b/roles/cifmw_cephadm/templates/ceph_rgw.yml.j2 @@ -11,8 +11,6 @@ networks: - {{ cifmw_cephadm_rgw_network }} spec: rgw_frontend_port: 8082 - rgw_realm: default - rgw_zone: default {% if rgw_frontend_cert is defined %} ssl: true rgw_frontend_ssl_certificate: | From f8e17fe8789766d409037ec5cbf6e8cac77816e0 Mon Sep 17 00:00:00 2001 From: David Rosenfeld Date: Wed, 9 Apr 2025 11:01:11 -0400 Subject: [PATCH 082/480] Create bmh compute replace validator Add a validator that verifies a faulty bmh compute node may be replaced --- roles/validations/defaults/main.yml | 6 + .../tasks/edpm/bmh_compute_replace.yml | 196 ++++++++++++++++++ 2 files changed, 202 insertions(+) create mode 100644 roles/validations/tasks/edpm/bmh_compute_replace.yml diff --git a/roles/validations/defaults/main.yml b/roles/validations/defaults/main.yml index 9657c2d352..f33253a419 100644 --- a/roles/validations/defaults/main.yml +++ b/roles/validations/defaults/main.yml @@ -51,3 +51,9 @@ cifmw_validations_xml_status_file_dir: "{{ cifmw_validations_basedir }}/tests/va cifmw_validations_edpm_scale_down_hostname: compute-2.ctlplane.example.com cifmw_validations_edpm_scale_down_nodename: edpm-compute-2 cifmw_validations_timeout: 100 + +# variables needed for bmh compute replacement +cifmw_validations_bmh_replace_leaf_label: leaf0-1 +cifmw_validations_bmh_spare_leaf_label: leaf0-0 +cifmw_validations_bmh_spare_nodename: edpm-compute-0-0 +cifmw_validations_bmh_spare_hostname: edpm-compute-0-0.ctlplane.openstack.lab diff --git a/roles/validations/tasks/edpm/bmh_compute_replace.yml b/roles/validations/tasks/edpm/bmh_compute_replace.yml new file mode 100644 index 0000000000..2947c2756d --- /dev/null +++ b/roles/validations/tasks/edpm/bmh_compute_replace.yml @@ -0,0 +1,196 @@ +# This job tests the functionality of the openstack-operator to replace a +# bmh compute node. +# +# This job was created to satisfy: +# https://issues.redhat.com/browse/OSPRH-15061 + +- name: Get name of nodeset containing bmh node to be used as spare node + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} get bmh {{ cifmw_validations_bmh_spare_leaf_label }} -o jsonpath='{.spec.consumerRef.name}' + register: bmh_nodeset_name + +- name: Verify bmh node being used as spare is provisioned before scale down + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} get bmh {{ cifmw_validations_bmh_spare_leaf_label }} -o jsonpath='{.status.provisioning.state}' + register: bmh_nodes_before_scale_down + failed_when: bmh_nodes_before_scale_down.stdout != "provisioned" + +- name: Get compute service list + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service list + register: compute_service_list_out + until: '"{{ cifmw_validations_bmh_spare_hostname }}" in compute_service_list_out.stdout' + +- name: Disable nova-compute for node being removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service set {{ cifmw_validations_bmh_spare_hostname }} nova-compute --disable + register: compute_service_set_out + until: '"Failed" not in compute_service_set_out.stdout' + +- name: Get ovn controller id of host to be removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack network agent list --host {{ cifmw_validations_bmh_spare_hostname }} | grep "OVN Controller agent" | awk '{print $2}' + register: remove_ovn_id + +- name: Delete network agent for compute being removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack network agent delete {{ remove_ovn_id.stdout }} + +- name: Get compute service id of host to be removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service list --host {{ cifmw_validations_bmh_spare_hostname }} -f value -c ID + register: remove_compute_service_id + +- name: Delete compute service for node being removed + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} rsh openstackclient openstack compute service delete {{ remove_compute_service_id.stdout }} + +- name: Patch nodeset to remove node + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc patch -n {{ cifmw_validations_namespace }} osdpns/"{{ bmh_nodeset_name.stdout | trim}}" --type=json --patch '[{ "op": "remove", "path": "/spec/nodes/{{ cifmw_validations_bmh_spare_nodename }}" }]' + +- name: Wait for nodeset to be SetupReady again + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc wait osdpns "{{ bmh_nodeset_name.stdout | trim }}" + --namespace={{ cifmw_validations_namespace }} + --for=condition=SetupReady + --timeout={{ cifmw_validations_timeout }}m + +- name: Patch spare bmh node to change its label to match label of node being replaced + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc patch -n {{ cifmw_validations_namespace }} bmh/"{{ cifmw_validations_bmh_spare_leaf_label }}" --type=json --patch '[{ "op": "replace", "path": "/metadata/labels/nodeName", "value": "{{ cifmw_validations_bmh_replace_leaf_label }}" }]' + +- name: Verify bmh node being used as spare is available after changing label + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} get bmh {{ cifmw_validations_bmh_spare_leaf_label }} -o jsonpath='{.status.provisioning.state}' + register: bmh_nodes_after_scale_down + until: bmh_nodes_after_scale_down.stdout == "available" + retries: 20 + delay: 20 + +- name: Create openstackdataplanedeployment to deploy the scaledown + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: | + oc apply -f - <- + oc wait openstackdataplanedeployment edpm-scaledown + --namespace={{ cifmw_validations_namespace }} + --for=condition=ready + --timeout={{ cifmw_validations_timeout }}m + +- name: Delete faulty baremetal node + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc -n {{ cifmw_validations_namespace }} delete bmh {{ cifmw_validations_bmh_replace_leaf_label }} + +- name: Wait for nodeset to be SetupReady + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc wait osdpns "{{ bmh_nodeset_name.stdout | trim }}" + --namespace={{ cifmw_validations_namespace }} + --for=condition=SetupReady + --timeout={{ cifmw_validations_timeout }}m + +- name: Create openstackdataplanedeployment to deploy the compute replacement + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: | + oc apply -f - <- + oc wait openstackdataplanedeployment edpm-compute-replacement + --namespace={{ cifmw_validations_namespace }} + --for=condition=ready + --timeout={{ cifmw_validations_timeout }}m + +- name: Wait for nodeset to be Ready + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc wait osdpns "{{ bmh_nodeset_name.stdout | trim }}" + --namespace={{ cifmw_validations_namespace }} + --for=condition=Ready + --timeout={{ cifmw_validations_timeout }}m From db98dc955bb73183687c949224430e4c168fe996 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 24 Apr 2025 11:44:09 +0200 Subject: [PATCH 083/480] Do not print oc completion When Ansible is executed in verbose mode, the oc completion output is not needed to print. Signed-off-by: Daniel Pawlik --- roles/ci_setup/tasks/packages.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/ci_setup/tasks/packages.yml b/roles/ci_setup/tasks/packages.yml index 35a2089fba..4cf04256a2 100644 --- a/roles/ci_setup/tasks/packages.yml +++ b/roles/ci_setup/tasks/packages.yml @@ -73,6 +73,7 @@ {{ cifmw_ci_setup_oc_install_path }}/oc completion bash | tee -a ~/.oc_completion creates: "{{ ansible_user_dir }}/.oc_completion" + no_log: true - name: Source completion from within .bashrc ansible.builtin.blockinfile: From 6b50f52000538c392eb5196970417a36ff3a858e Mon Sep 17 00:00:00 2001 From: Marian Krcmarik Date: Fri, 25 Apr 2025 23:56:40 +0200 Subject: [PATCH 084/480] cd_dcn_site: Set the correct hostname Set the correct hostname (without the extra prefix "edpm-"). The hostname of the compute nodes is set to a name without the "edpm-" prefix by ci-fmw when the VMs are provisioned. Once the dataplane is deployed with a different hostname which would be configured in the OCP DNS used by dataplane nodes (after dataplane deployed) The original hostname which is still set on the dataplane nodes would not be recognized by the DNS and the correct domain would not be used. Instead the original domain which was set by DHCP during the VM nodes provisioning by ci-framework would be set until the DHCP lease is lost and that eventually leads to fqdn of dataplane nodes mismatch --- roles/ci_dcn_site/tasks/az.yml | 6 ++---- .../templates/edpm-pre-ceph/nodeset/values.yaml.j2 | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/roles/ci_dcn_site/tasks/az.yml b/roles/ci_dcn_site/tasks/az.yml index 7cd11b9208..94a85b46e9 100644 --- a/roles/ci_dcn_site/tasks/az.yml +++ b/roles/ci_dcn_site/tasks/az.yml @@ -24,13 +24,11 @@ command: >- openstack aggregate show {{ _az }} -c hosts -f value -- name: Convert az_hosts string to list and remove extra text +- name: Convert az_hosts string to list ansible.builtin.set_fact: az_hosts_list: > {{ az_hosts.stdout | default([]) - | from_yaml - | map('regex_replace', 'edpm-compute-(.*?)\\..*', 'compute-\\1') | list }} - name: Create AZ if it does not exist @@ -54,4 +52,4 @@ namespace: openstack pod: openstackclient command: >- - openstack aggregate add host {{ _az }} edpm-{{ item.key }}.ctlplane.example.com + openstack aggregate add host {{ _az }} {{ item.key }}.ctlplane.example.com diff --git a/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 b/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 index 99f1f46974..90dc6a16b6 100644 --- a/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 +++ b/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 @@ -130,7 +130,7 @@ data: edpm-{{ _host_name }}: ansible: ansibleHost: {{ network_data['ip_v4'] }} - hostName: edpm-{{ _host_name }} + hostName: {{ _host_name }} networks: - defaultRoute: true fixedIP: {{ network_data['ip_v4'] }} From d897d3cfa1a467553df1e8c904d191f1ecc22cf1 Mon Sep 17 00:00:00 2001 From: Dariusz Smigiel Date: Tue, 29 Apr 2025 11:22:59 -0700 Subject: [PATCH 085/480] Convert pre-run/post-run to lists Zuul accept list or string as a value for pre-run and post-run. During a process of building jobs, Zuul combines pre/post-runs into lists. To avoid confusions, convert values into lists. https://zuul-ci.org/docs/zuul/latest/config/job.html#attr-job.pre-run https://zuul-ci.org/docs/zuul/latest/config/job.html#attr-job.post-run --- zuul.d/base.yaml | 3 ++- zuul.d/edpm_build_images.yaml | 3 ++- zuul.d/edpm_build_images_content_provider.yaml | 3 ++- zuul.d/molecule-base.yaml | 6 ++++-- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 3333cafc78..25e92d2fc8 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -15,7 +15,8 @@ pre-run: - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml - post-run: ci/playbooks/collect-logs.yml + post-run: + - ci/playbooks/collect-logs.yml # # CONTENT PROVIDER diff --git a/zuul.d/edpm_build_images.yaml b/zuul.d/edpm_build_images.yaml index 958b84649f..e790dad22e 100644 --- a/zuul.d/edpm_build_images.yaml +++ b/zuul.d/edpm_build_images.yaml @@ -12,7 +12,8 @@ run: - ci/playbooks/dump_zuul_data.yml - ci/playbooks/edpm_build_images/run.yml - post-run: ci/playbooks/collect-logs.yml + post-run: + - ci/playbooks/collect-logs.yml vars: cifmw_zuul_target_host: controller cifmw_repo_setup_branch: antelope diff --git a/zuul.d/edpm_build_images_content_provider.yaml b/zuul.d/edpm_build_images_content_provider.yaml index e0dc658d59..da36923a60 100644 --- a/zuul.d/edpm_build_images_content_provider.yaml +++ b/zuul.d/edpm_build_images_content_provider.yaml @@ -14,7 +14,8 @@ - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml - ci/playbooks/edpm_build_images/edpm_build_images_content_provider_run.yaml - post-run: ci/playbooks/collect-logs.yml + post-run: + - ci/playbooks/collect-logs.yml vars: cifmw_artifacts_basedir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}" cifmw_repo_setup_branch: antelope diff --git a/zuul.d/molecule-base.yaml b/zuul.d/molecule-base.yaml index 22919e9cab..61ed2efe6d 100644 --- a/zuul.d/molecule-base.yaml +++ b/zuul.d/molecule-base.yaml @@ -10,7 +10,8 @@ - ci/playbooks/dump_zuul_data.yml - ci/playbooks/molecule-prepare.yml run: ci/playbooks/molecule-test.yml - post-run: ci/playbooks/collect-logs.yml + post-run: + - ci/playbooks/collect-logs.yml roles: - zuul: rdo-jobs required-projects: @@ -31,7 +32,8 @@ - ci/playbooks/dump_zuul_data.yml - ci/playbooks/molecule-prepare.yml run: ci/playbooks/molecule-test.yml - post-run: ci/playbooks/collect-logs.yml + post-run: + - ci/playbooks/collect-logs.yml required-projects: - github.com/openstack-k8s-operators/install_yamls vars: From 0f6096b1e451dc53be1b2bac125c5c11a150a754 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 24 Apr 2025 15:12:05 +0200 Subject: [PATCH 086/480] Change callback plugin to default; format callback result to yaml The yaml callback plugin is deprecated [1] and it has been replaced by ansible.builtin.default. Also enable callback_format_pretty to yaml that is more easy to read comparing to json. [1] https://docs.ansible.com/ansible/latest/collections/community/general/yaml_callback.html Signed-off-by: Daniel Pawlik --- ansible.cfg | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index e9a4e4181c..1a19201b31 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -5,8 +5,11 @@ roles_path = ~/ci-framework-data/artifacts/roles:./roles:/usr/share/ansible/role filter_plugins = ./plugins/filter:~/plugins/filter:/usr/share/ansible/plugins/filter log_path = ~/ansible.log # We may consider ansible.builtin.junit -callbacks_enabled = ansible.posix.profile_tasks,yaml -stdout_callback = yaml +callbacks_enabled = ansible.posix.profile_tasks,ansible.builtin.default +stdout_callback = ansible.builtin.default +callback_format_pretty = yaml +callback_result_format = yaml +show_task_path_on_failure = true display_args_to_stdout = True gathering = smart fact_caching = jsonfile From 50379de3def5908ef08edffbf013ff33295611cc Mon Sep 17 00:00:00 2001 From: Dariusz Smigiel Date: Fri, 2 May 2025 10:33:30 -0700 Subject: [PATCH 087/480] Updated dictionary The spellcheck is failing due to missing words. --- docs/dictionary/en-custom.txt | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 3139e7802e..f19c53b9dd 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -3,6 +3,8 @@ abcdefghij addr afuscoar alertmanager +Amartya +amartyasinha ansible ansibleee ansibletest @@ -66,7 +68,10 @@ chandan changeme changerefspec changerepository +chattr chdir +chmod +chown chrony chronyc cidr @@ -112,8 +117,8 @@ ctl ctlplane ctrl ctx -cve customizations +cve dashboard dataplane dataplanedeployments @@ -137,6 +142,7 @@ dfg dhcp dib dicts +dirs disablecertificateverification disksize distro @@ -239,6 +245,7 @@ ipmi ips ipv iscsi +isdir itldwuw iybbbnnpymxlig iywxdcgpmc @@ -280,9 +287,9 @@ lajly ldp libguestfs libvirt +libvirt's libvirtd libvirterror -libvirt's ljaumtawojy ljaumtaxojy ljaumtayojy @@ -295,6 +302,7 @@ logserver lookups loopback losetup +lsattr lsblk luks lv @@ -495,6 +503,7 @@ sha shiftstack shiftstackclient sig +Sinha sizepercent skbg skiplist From d15dd58e8b7322e61f59c4af1c4964644cb75ebd Mon Sep 17 00:00:00 2001 From: Dariusz Smigiel Date: Fri, 2 May 2025 12:24:39 -0700 Subject: [PATCH 088/480] Removed rdo-jobs requirement from molecule Decouple rdo-jobs from cifmw-molecule --- zuul.d/molecule-base.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/zuul.d/molecule-base.yaml b/zuul.d/molecule-base.yaml index 61ed2efe6d..01c41e6aa2 100644 --- a/zuul.d/molecule-base.yaml +++ b/zuul.d/molecule-base.yaml @@ -12,12 +12,8 @@ run: ci/playbooks/molecule-test.yml post-run: - ci/playbooks/collect-logs.yml - roles: - - zuul: rdo-jobs required-projects: - github.com/openstack-k8s-operators/install_yamls - - name: rdo-jobs - override-checkout: master vars: roles_dir: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}/roles/{{ TEST_RUN }}" mol_config_dir: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}/.config/molecule/config_local.yml" From 3656f9402a9ff87f2097248c7ae9019df99d6143 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 28 Apr 2025 08:59:32 +0200 Subject: [PATCH 089/480] Change the way how the hooks playbook is executed In many places, it was done that Ansible playbook is importing another ansible playbook, which is importing again another playbook and another. It is too complex and after doing simple improvement as it was done in this change by removing import_playbook: hooks.yml, we see that those tasks can be running in same play. With that approach we can: - easy understand playbook/role execution, - better control variables, - in some part faster execution due there is no need to make delegation or gather_facts, - easier to debug Signed-off-by: Daniel Pawlik --- ci/playbooks/kuttl/e2e-kuttl.yml | 41 ++++++++-------------------- deploy-edpm.yml | 16 +++++++---- playbooks/02-infra.yml | 21 +++++++++----- playbooks/03-build-packages.yml | 21 ++++++++------ playbooks/04-build-containers.yml | 21 ++++++++------ playbooks/05-build-operators.yml | 21 ++++++++------ playbooks/06-deploy-architecture.yml | 28 ++++++++++--------- playbooks/06-deploy-edpm.yml | 41 +++++++++++++++------------- playbooks/07-admin-setup.yml | 21 ++++++++------ playbooks/08-run-tests.yml | 21 ++++++++------ playbooks/98-pre-end.yml | 4 --- playbooks/hooks.yml | 3 ++ playbooks/update.yml | 21 ++++++++------ 13 files changed, 148 insertions(+), 132 deletions(-) delete mode 100644 playbooks/98-pre-end.yml diff --git a/ci/playbooks/kuttl/e2e-kuttl.yml b/ci/playbooks/kuttl/e2e-kuttl.yml index 0ac6b746ef..4cd9dd4c93 100644 --- a/ci/playbooks/kuttl/e2e-kuttl.yml +++ b/ci/playbooks/kuttl/e2e-kuttl.yml @@ -33,42 +33,25 @@ name: "install_yamls_makes" tasks_from: "make_crc_attach_default_interface" -- name: Run pre_kuttl hooks - vars: - hooks: "{{ pre_kuttl | default([]) }}" - step: pre_kuttl - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - 'hooks.yml' - ] | ansible.builtin.path_join - }} + - name: Run pre_kuttl hooks + vars: + hooks: "{{ pre_kuttl | default([]) }}" + step: pre_kuttl + ansible.builtin.import_role: + name: run_hook -- name: Run KUTTL operator tests - hosts: "{{ cifmw_target_host | default('localhost') }}" - tasks: - name: Run kuttl tests ansible.builtin.include_tasks: run-kuttl-tests.yml loop: "{{ cifmw_kuttl_tests_operator_list | default(['cinder' 'keystone']) }}" loop_control: loop_var: operator -- name: Run post_kuttl hooks - vars: - hooks: "{{ post_kuttl | default([]) }}" - step: post_kuttl - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - 'hooks.yml' - ] | ansible.builtin.path_join - }} + - name: Run post_kuttl hooks + vars: + hooks: "{{ post_kuttl | default([]) }}" + step: post_kuttl + ansible.builtin.import_role: + name: run_hook - name: Run log related tasks ansible.builtin.import_playbook: >- diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 83cd164578..79d5065d26 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -69,14 +69,18 @@ tags: - compliance -- name: Run log related tasks - ansible.builtin.import_playbook: playbooks/98-pre-end.yml - tags: - - pre-end - -- name: Inject status flag +- name: Run hooks and inject status flag hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false tasks: + - name: Run pre_end hooks + tags: + - pre-end + vars: + step: pre_end + ansible.builtin.import_role: + name: run_hook + - name: Inject success flag ansible.builtin.file: path: "{{ ansible_user_dir }}/cifmw-success" diff --git a/playbooks/02-infra.yml b/playbooks/02-infra.yml index 14a07e8fa7..06d2ce30cf 100644 --- a/playbooks/02-infra.yml +++ b/playbooks/02-infra.yml @@ -1,7 +1,13 @@ +--- - name: Run pre_infra hooks - vars: - step: pre_infra - ansible.builtin.import_playbook: ./hooks.yml + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run pre_infra hooks + vars: + step: pre_infra + ansible.builtin.import_role: + name: run_hook - name: Prepare host virtualization hosts: "{{ ('virthosts' in groups) | ternary('virthosts', cifmw_target_host | default('localhost') ) }}" @@ -129,7 +135,8 @@ ansible.builtin.include_role: name: pkg_build -- name: Run post_infra hooks - vars: - step: post_infra - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_infra hooks + vars: + step: post_infra + ansible.builtin.import_role: + name: run_hook diff --git a/playbooks/03-build-packages.yml b/playbooks/03-build-packages.yml index a2a1f5ab43..b29129a38e 100644 --- a/playbooks/03-build-packages.yml +++ b/playbooks/03-build-packages.yml @@ -1,12 +1,14 @@ -- name: Run pre_package_build hooks - vars: - step: pre_package_build - ansible.builtin.import_playbook: ./hooks.yml - +--- - name: Build package playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: + - name: Run pre_package_build hooks + vars: + step: pre_package_build + ansible.builtin.import_role: + name: run_hook + - name: Load parameters files ansible.builtin.include_vars: dir: "{{ cifmw_basedir }}/artifacts/parameters" @@ -19,7 +21,8 @@ name: pkg_build tasks_from: build.yml -- name: Run post_package_build hooks - vars: - step: post_package_build - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_package_build hooks + vars: + step: post_package_build + ansible.builtin.import_role: + name: run_hook diff --git a/playbooks/04-build-containers.yml b/playbooks/04-build-containers.yml index 4e486c77ff..6c0a303231 100644 --- a/playbooks/04-build-containers.yml +++ b/playbooks/04-build-containers.yml @@ -1,12 +1,14 @@ -- name: Run pre_container_build hooks - vars: - step: pre_container_build - ansible.builtin.import_playbook: ./hooks.yml - +--- - name: Build container playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: + - name: Run pre_container_build hooks + vars: + step: pre_container_build + ansible.builtin.import_role: + name: run_hook + - name: Load parameters files ansible.builtin.include_vars: dir: "{{ cifmw_basedir }}/artifacts/parameters" @@ -15,7 +17,8 @@ ansible.builtin.debug: msg: "No support for that step yet" -- name: Run post_container_build hooks - vars: - step: post_container_build - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_container_build hooks + vars: + step: post_container_build + ansible.builtin.import_role: + name: run_hook diff --git a/playbooks/05-build-operators.yml b/playbooks/05-build-operators.yml index e6e5f6a7bf..28218f4908 100644 --- a/playbooks/05-build-operators.yml +++ b/playbooks/05-build-operators.yml @@ -1,14 +1,16 @@ -- name: Run pre_operator_build hooks - vars: - step: pre_operator_build - ansible.builtin.import_playbook: ./hooks.yml - +--- - name: Build operators playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false environment: PATH: "{{ cifmw_path }}" tasks: + - name: Run pre_operator_build hooks + vars: + step: pre_operator_build + ansible.builtin.import_role: + name: run_hook + - name: Load parameters files ansible.builtin.include_vars: dir: "{{ cifmw_basedir }}/artifacts/parameters" @@ -20,7 +22,8 @@ ansible.builtin.import_role: name: operator_build -- name: Run post_operator_build hooks - vars: - step: post_operator_build - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_operator_build hooks + vars: + step: post_operator_build + ansible.builtin.import_role: + name: run_hook diff --git a/playbooks/06-deploy-architecture.yml b/playbooks/06-deploy-architecture.yml index edcb69300f..d87a7c3125 100644 --- a/playbooks/06-deploy-architecture.yml +++ b/playbooks/06-deploy-architecture.yml @@ -1,14 +1,15 @@ --- -- name: Run pre_deploy hooks - when: - - cifmw_architecture_scenario is defined - vars: - step: pre_deploy - ansible.builtin.import_playbook: ./hooks.yml - - name: Deploy VA hosts: "{{ cifmw_target_host | default('localhost') }}" tasks: + - name: Run pre_deploy hooks + when: + - cifmw_architecture_scenario is defined + vars: + step: pre_deploy + ansible.builtin.import_role: + name: run_hook + # end_play will end only current play, not the main edpm-deploy.yml - name: Early end if not architecture deploy tags: @@ -280,12 +281,13 @@ nova-cell0-conductor-0 nova-manage cell_v2 discover_hosts --verbose -- name: Run post_deploy hooks - when: - - cifmw_architecture_scenario is defined - vars: - step: post_deploy - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_deploy hooks + when: + - cifmw_architecture_scenario is defined + vars: + step: post_deploy + ansible.builtin.import_role: + name: run_hook - name: Validations workflow ansible.builtin.import_playbook: validations.yml diff --git a/playbooks/06-deploy-edpm.yml b/playbooks/06-deploy-edpm.yml index 9a4718e843..014705112c 100644 --- a/playbooks/06-deploy-edpm.yml +++ b/playbooks/06-deploy-edpm.yml @@ -1,15 +1,16 @@ --- -- name: Run pre_deploy hooks - when: - - cifmw_architecture_scenario is not defined - vars: - step: pre_deploy - ansible.builtin.import_playbook: ./hooks.yml - - name: Deploy podified control plane hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: + - name: Run pre_deploy hooks + when: + - cifmw_architecture_scenario is not defined + vars: + step: pre_deploy + ansible.builtin.import_role: + name: run_hook + # end_play will end only current play, not the main edpm-deploy.yml - name: Early end if architecture deploy when: @@ -34,12 +35,13 @@ ansible.builtin.include_role: name: edpm_prepare -- name: Run post_ctlplane_deploy hooks - when: - - cifmw_architecture_scenario is undefined - vars: - step: post_ctlplane_deploy - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_ctlplane_deploy hooks + when: + - cifmw_architecture_scenario is undefined + vars: + step: post_ctlplane_deploy + ansible.builtin.import_role: + name: run_hook - name: EDPM deployment on virtual baremetal hosts: "{{ cifmw_target_host | default('localhost') }}" @@ -150,12 +152,13 @@ vars: cifmw_edpm_deploy_prepare_run: false -- name: Run post_deploy hooks - when: - - cifmw_architecture_scenario is not defined - vars: - step: post_deploy - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_deploy hooks + when: + - cifmw_architecture_scenario is not defined + vars: + step: post_deploy + ansible.builtin.import_role: + name: run_hook - name: Validations workflow # If we're doing an architecture deployment, we need to skip validations here. diff --git a/playbooks/07-admin-setup.yml b/playbooks/07-admin-setup.yml index 3e8c524585..7513263a98 100644 --- a/playbooks/07-admin-setup.yml +++ b/playbooks/07-admin-setup.yml @@ -1,12 +1,14 @@ -- name: Run pre_admin_setup hooks - vars: - step: pre_admin_setup - ansible.builtin.import_playbook: ./hooks.yml - +--- - name: Post-deployment admin setup steps hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: + - name: Run pre_admin_setup hooks + vars: + step: pre_admin_setup + ansible.builtin.import_role: + name: run_hook + - name: Load parameters files ansible.builtin.include_vars: dir: "{{ cifmw_basedir }}/artifacts/parameters" @@ -16,7 +18,8 @@ name: os_net_setup when: not cifmw_skip_os_net_setup | default('false') | bool -- name: Run post_admin_setup hooks - vars: - step: post_admin_setup - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_admin_setup hooks + vars: + step: post_admin_setup + ansible.builtin.import_role: + name: run_hook diff --git a/playbooks/08-run-tests.yml b/playbooks/08-run-tests.yml index 31bf5ee818..70fbf9a105 100644 --- a/playbooks/08-run-tests.yml +++ b/playbooks/08-run-tests.yml @@ -1,12 +1,14 @@ -- name: "Run pre_tests hooks" - vars: - step: pre_tests - ansible.builtin.import_playbook: ./hooks.yml - +--- - name: "Test playbook" hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: + - name: Run pre_tests hooks + vars: + step: pre_tests + ansible.builtin.import_role: + name: run_hook + # end_play will end only current play, not the main edpm-deploy.yml - name: Early exit if no tests when: @@ -19,7 +21,8 @@ ansible.builtin.import_role: name: "{{ cifmw_run_test_role | default('tempest') }}" -- name: "Run post_tests hooks" - vars: - step: post_tests - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_tests hooks + vars: + step: post_tests + ansible.builtin.import_role: + name: run_hook diff --git a/playbooks/98-pre-end.yml b/playbooks/98-pre-end.yml deleted file mode 100644 index 3c83593695..0000000000 --- a/playbooks/98-pre-end.yml +++ /dev/null @@ -1,4 +0,0 @@ -- name: "Run pre_end hooks" - vars: - step: pre_end - ansible.builtin.import_playbook: ./hooks.yml diff --git a/playbooks/hooks.yml b/playbooks/hooks.yml index 4db245be70..16be300236 100644 --- a/playbooks/hooks.yml +++ b/playbooks/hooks.yml @@ -1,4 +1,7 @@ --- +##### DEPRECATION ##### +# Do not use that playbook. Execute the role directly. +####################### - name: Hook playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false diff --git a/playbooks/update.yml b/playbooks/update.yml index ff2bbc0031..631205ee54 100644 --- a/playbooks/update.yml +++ b/playbooks/update.yml @@ -1,12 +1,14 @@ -- name: Run pre_update hooks - vars: - step: pre_update - ansible.builtin.import_playbook: ./hooks.yml - +--- - name: Add comptatibility support to install_yamls hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: + - name: Run pre_update hooks + vars: + step: pre_update + ansible.builtin.import_role: + name: run_hook + - name: Comptatibility layer with install_yamls when: - cifmw_architecture_scenario is defined @@ -99,7 +101,8 @@ ansible.builtin.import_role: name: update -- name: Run post_update hooks - vars: - step: post_update - ansible.builtin.import_playbook: ./hooks.yml + - name: Run post_update hooks + vars: + step: post_update + ansible.builtin.import_role: + name: run_hook From 7c731210430cd628f63b3b636cabcccd273a95df Mon Sep 17 00:00:00 2001 From: bshewale Date: Fri, 4 Apr 2025 17:41:15 +0530 Subject: [PATCH 090/480] Use correct ansible module As in the ansible we don't have `var` in ansible.builtin.debug instead it should be `msg` so changed that. --- roles/dlrn_promote/tasks/check_for_previous_promotions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/dlrn_promote/tasks/check_for_previous_promotions.yml b/roles/dlrn_promote/tasks/check_for_previous_promotions.yml index 2985801429..3f1c32bb5f 100644 --- a/roles/dlrn_promote/tasks/check_for_previous_promotions.yml +++ b/roles/dlrn_promote/tasks/check_for_previous_promotions.yml @@ -51,4 +51,4 @@ - name: Print the cifmw_dlrn_promote_hash_in_promote_target value ansible.builtin.debug: - var: cifmw_dlrn_promote_hash_in_promote_target + msg: "{{ cifmw_dlrn_promote_hash_in_promote_target }}" From 32a56142e7529bd7e225cbd336d484207e413413 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Mon, 28 Apr 2025 14:58:45 +0200 Subject: [PATCH 091/480] Increase CEPH_TIMEOUT for component tests Signed-off-by: Francesco Pantano --- scenarios/centos-9/ceph_backends.yml | 2 +- scenarios/centos-9/ci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scenarios/centos-9/ceph_backends.yml b/scenarios/centos-9/ceph_backends.yml index 63bd81967a..b8f1c2cc93 100644 --- a/scenarios/centos-9/ceph_backends.yml +++ b/scenarios/centos-9/ceph_backends.yml @@ -8,7 +8,7 @@ cifmw_install_yamls_vars: cifmw_edpm_prepare_skip_crc_storage_creation: true cifmw_make_ceph_environment: - CEPH_TIMEOUT: 90 + CEPH_TIMEOUT: 120 CEPH_DATASIZE: "10Gi" pre_deploy: diff --git a/scenarios/centos-9/ci.yml b/scenarios/centos-9/ci.yml index 0e5d8e994d..e289b70a47 100644 --- a/scenarios/centos-9/ci.yml +++ b/scenarios/centos-9/ci.yml @@ -38,4 +38,4 @@ cifmw_run_tests: true # to load it and consume the parameters properly # Check hooks/playbooks/ceph-deploy.yml for the whole logic. cifmw_make_ceph_environment: - CEPH_TIMEOUT: 90 + CEPH_TIMEOUT: 120 From 38e58336d35e3aba7cd577ea05127a87d15ec2ca Mon Sep 17 00:00:00 2001 From: Milana Levy Date: Tue, 25 Mar 2025 18:12:00 +0200 Subject: [PATCH 092/480] Add reporting for playbook testing Add reporting for playbook testing --- roles/validations/tasks/security/invoke_tlse_playbooks.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/validations/tasks/security/invoke_tlse_playbooks.yml b/roles/validations/tasks/security/invoke_tlse_playbooks.yml index 8af1f4c14a..c20e1f84a3 100644 --- a/roles/validations/tasks/security/invoke_tlse_playbooks.yml +++ b/roles/validations/tasks/security/invoke_tlse_playbooks.yml @@ -9,3 +9,4 @@ cd "{{ ansible_user_dir }}/src/gitlab.cee.redhat.com/OSP-DFG-security/automation" ansible-playbook -vv playbooks/renew_internal_cert_outer.yml || echo "renew_internal_cert_outer failed, continuing..." ansible-playbook -vv playbooks/data_plane_cert_testing_with_delete.yml + ansible-playbook -vv playbooks/get_test_results_of_playbook_tests.yml From eb65e023c6cb0cdd9cfd100c8dd4e8dfcda65c92 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Tue, 6 May 2025 09:18:46 +0530 Subject: [PATCH 093/480] Add epoxy release mapping in build_openstack_packages role It will allow us to build rpms from opendev epoxy depends-on in the content provider jobs. Signed-off-by: Chandan Kumar (raukadah) --- roles/build_openstack_packages/defaults/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/build_openstack_packages/defaults/main.yml b/roles/build_openstack_packages/defaults/main.yml index 43aa1a000a..a8502bf04e 100644 --- a/roles/build_openstack_packages/defaults/main.yml +++ b/roles/build_openstack_packages/defaults/main.yml @@ -70,6 +70,7 @@ cifmw_bop_change_list: [] cifmw_bop_release_mapping: master: master antelope: unmaintained/2023.1 + epoxy: stable/2025.1 cifmw_bop_versions_url: rhos-18.0: "https://trunk.rdoproject.org/centos9-antelope/current-podified/versions.csv" From 6b75dcc9b2c22b6bf606437c40202a9174c3c522 Mon Sep 17 00:00:00 2001 From: Fiorella Yanac Date: Mon, 28 Apr 2025 14:22:29 +0100 Subject: [PATCH 094/480] Update adoption scenario uni06zeta It's empty patch, the configuration is defined in d/s job --- scenarios/adoption/uni06zeta.yml | 100 +------------------------------ 1 file changed, 2 insertions(+), 98 deletions(-) diff --git a/scenarios/adoption/uni06zeta.yml b/scenarios/adoption/uni06zeta.yml index 3e11dd49f8..4e9e5200e7 100644 --- a/scenarios/adoption/uni06zeta.yml +++ b/scenarios/adoption/uni06zeta.yml @@ -1,98 +1,2 @@ ---- -# By default, the OSP VMs will run using a default image. -# In upstream, it's usually latest centos-stream-9 -# For downstream, it's usually rhel-9.4 image, depending on -# the job configuration. -# -# Since OSP infra must use an older RHEL image, you can override it -# by setting "osp_base_img_url" to point to the downstream QCOW2 image, -# and "osp_base_img_sha256" holding the SHA256SUM of the image. -# -# We can't automatically discover the image, the role/module sets the -# value globally, and it would clash with the needs for RHOSO images. - -# Use anchor to avoid repetitions. This block is common to all of OSP nodes. -_osp_img_data: &osp_base_conf - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: osp-base.qcow2 - image_url: "{{ osp_base_img_url | default(cifmw_discovered_image_url) }}" - sha256_image_name: >- - {{ osp_base_img_sha256 | default(cifmw_discovered_hash) }} -libvirt_manager_patch_layout: - vms: - # Let's remove the default computes, since we want to adopt the - # OSP ones - compute: - amount: 0 - osp-undercloud: - <<: *osp_base_conf - amount: 1 - memory: 16 - cpus: 8 - disksize: 80 - nets: - - ocpbm - - osp_trunk - osp-controller: - <<: *osp_base_conf - amount: 3 - memory: 16 - cpus: 8 - disksize: 80 - nets: - - ocpbm - - osp_trunk - osp-compute: - <<: *osp_base_conf - amount: 2 - memory: 16 - cpus: 8 - disksize: 120 - extra_disks_num: 2 - extra_disks_size: 30G - nets: - - ocpbm - - osp_trunk - -networking_mapper_definition_patch: - networks: - external: - network: "192.168.32.0/20" - vlan: 99 - mtu: 1496 - group-templates: - computes: - network-template: - # ensure this range does not collide with osp-computes one, even if we - # don't create any vms for the compute group (the computes for - # greenfield jobs) we need to make sure their ip ranges do not overlap - range: - start: 200 - length: 1 - osp-controllers: - network-template: - range: - start: 103 - length: 3 - networks: &osp_nets - ctlplane: {} - external: - trunk-parent: ctlplane - internalapi: - trunk-parent: ctlplane - tenant: - trunk-parent: ctlplane - storage: - trunk-parent: ctlplane - osp-computes: - network-template: - range: - start: 106 - length: 2 - networks: *osp_nets - osp-underclouds: - network-template: - range: - start: 100 - length: 1 - networks: *osp_nets +libvirt_manager_patch_layout: {} +networking_mapper_definition_patch: {} From 82b65a0f53293c707ba1064af1e46f06eea83a3c Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 6 May 2025 13:41:34 +0200 Subject: [PATCH 095/480] Simplify files to track by Zuul In many places it was not necessary to add a regex, due the whole role/playbook/module should be tracked. In other words, we can live if README file would be changed and there would be a CI job running for veryfing that. Signed-off-by: Daniel Pawlik --- ci/config/molecule.yaml | 28 ++--- ci/templates/molecule.yaml.j2 | 5 +- ci/templates/noop-molecule.yaml.j2 | 5 +- zuul.d/adoption.yaml | 14 +-- zuul.d/architecture-jobs.yaml | 4 +- zuul.d/content_provider.yaml | 10 +- zuul.d/edpm.yaml | 8 +- zuul.d/end-to-end.yaml | 6 +- zuul.d/molecule.yaml | 190 ++++++++++++++--------------- zuul.d/tcib.yaml | 2 +- 10 files changed, 133 insertions(+), 139 deletions(-) diff --git a/ci/config/molecule.yaml b/ci/config/molecule.yaml index 6030afeb10..468705e5c0 100644 --- a/ci/config/molecule.yaml +++ b/ci/config/molecule.yaml @@ -2,9 +2,9 @@ - job: name: cifmw-molecule-libvirt_manager files: - - ^roles/dnsmasq/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/networking_mapper/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/config_drive/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/dnsmasq/.* + - ^roles/networking_mapper/.* + - ^roles/config_drive/.* timeout: 3600 - job: name: cifmw-molecule-openshift_login @@ -54,12 +54,12 @@ nodeset: centos-9-crc-2-48-0-xxl-ibm timeout: 5400 files: - - ^roles/dnsmasq/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/libvirt_manager/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/networking_mapper/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/podman/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/sushy_emulator/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/rhol_crc/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/dnsmasq/.* + - ^roles/libvirt_manager/.* + - ^roles/networking_mapper/.* + - ^roles/podman/.* + - ^roles/sushy_emulator/.* + - ^roles/rhol_crc/.* - job: name: cifmw-molecule-cert_manager nodeset: centos-9-crc-2-48-0-xxl-ibm @@ -69,17 +69,17 @@ - job: name: cifmw_molecule-pkg_build files: - - ^roles/build_openstack_packages/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/build_openstack_packages/.* - job: name: cifmw_molecule-build_containers files: - - ^roles/build_openstack_packages/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/repo_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/build_openstack_packages/.* + - ^roles/repo_setup/.* - job: name: cifmw-molecule-build_openstack_packages files: - - ^roles/pkg_build/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/repo_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/pkg_build/.* + - ^roles/repo_setup/.* - job: name: cifmw-molecule-manage_secrets nodeset: centos-9-crc-2-48-0-xl-ibm diff --git a/ci/templates/molecule.yaml.j2 b/ci/templates/molecule.yaml.j2 index 551032c8ab..d82c73afeb 100644 --- a/ci/templates/molecule.yaml.j2 +++ b/ci/templates/molecule.yaml.j2 @@ -1,8 +1,5 @@ # Don't modify this file. # If you need apply custom molecule changes, please edit ci/config/molecule.yaml -{% set want_list = ['defaults', 'files', 'handlers', 'library', - 'lookup_plugins', 'module_utils', 'molecule', - 'tasks', 'templates', 'vars'] -%} {% for role_name in role_names | sort %} - job: name: cifmw-molecule-{{ role_name }} @@ -12,7 +9,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/{{ role_name }}/({{ want_list | sort | join('|') }}).* + - ^roles/{{ role_name }}/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* {% endfor %} diff --git a/ci/templates/noop-molecule.yaml.j2 b/ci/templates/noop-molecule.yaml.j2 index f118ccf65c..b7a43895b7 100644 --- a/ci/templates/noop-molecule.yaml.j2 +++ b/ci/templates/noop-molecule.yaml.j2 @@ -1,6 +1,3 @@ -{% set want_list = ['defaults', 'files', 'handlers', 'library', - 'lookup_plugins', 'module_utils', 'molecule', - 'tasks', 'templates', 'vars'] -%} {% for role_name in role_names | sort %} - job: name: cifmw-molecule-{{ role_name }} @@ -8,7 +5,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/{{ role_name }}/{{ want_list | sort | join('|') }}.* + - ^roles/{{ role_name }}/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* {% endfor %} diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index 773c52be6c..f826af9057 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -120,13 +120,13 @@ - ^playbooks/01-bootstrap.yml - ^playbooks/02-infra.yml - ^playbooks/06-deploy-edpm.yml - - ^roles/discover_latest_image/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/edpm_prepare/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/install_ca/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/install_yamls/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/openshift_login/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/openshift_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/repo_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/discover_latest_image/.* + - ^roles/edpm_prepare/.* + - ^roles/install_ca/.* + - ^roles/install_yamls/.* + - ^roles/openshift_login/.* + - ^roles/openshift_setup/.* + - ^roles/repo_setup/.* - ^hooks/playbooks/fetch_compute_facts.yml - ^zuul.d/adoption.yaml # openstack-operator diff --git a/zuul.d/architecture-jobs.yaml b/zuul.d/architecture-jobs.yaml index 5d0bcaf4bf..36875dbb69 100644 --- a/zuul.d/architecture-jobs.yaml +++ b/zuul.d/architecture-jobs.yaml @@ -40,5 +40,5 @@ cifmw_architecture_scenario: hci files: - zuul.d/architecture-jobs.yaml - - ^roles/ci_gen_kustomize_values/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/kustomize_deploy/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/ci_gen_kustomize_values/.* + - ^roles/kustomize_deploy/.* diff --git a/zuul.d/content_provider.yaml b/zuul.d/content_provider.yaml index 2790e6203a..764d95c686 100644 --- a/zuul.d/content_provider.yaml +++ b/zuul.d/content_provider.yaml @@ -22,9 +22,9 @@ against ci-framework repo to validate meta content provider changes. files: - - ^roles/build_containers/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/build_openstack_packages/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/registry_deploy/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/edpm_build_images/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/operator_build/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/build_containers/.* + - ^roles/build_openstack_packages/.* + - ^roles/registry_deploy/.* + - ^roles/edpm_build_images/.* + - ^roles/operator_build/.* - ^ci/playbooks/meta_content_provider/.* diff --git a/zuul.d/edpm.yaml b/zuul.d/edpm.yaml index 2e0a2871db..f325f9c408 100644 --- a/zuul.d/edpm.yaml +++ b/zuul.d/edpm.yaml @@ -35,8 +35,8 @@ parent: cifmw-crc-podified-edpm-deployment files: - ^playbooks/* - - ^roles/edpm_prepare/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/edpm_deploy/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/edpm_prepare/.* + - ^roles/edpm_deploy/.* - ^roles/artifacts/tasks/edpm.yml - ^deploy-edpm.yml - ^scenarios/centos-9/edpm_ci.yml @@ -46,7 +46,7 @@ parent: cifmw-crc-podified-galera-deployment files: - ^playbooks/* - - ^roles/edpm_prepare/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/edpm_prepare/.* - ^deploy-edpm.yml - ^scenarios/centos-9/edpm_ci.yml @@ -55,7 +55,7 @@ parent: cifmw-crc-podified-edpm-baremetal files: - ^playbooks/* - - ^roles/edpm_deploy_baremetal/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/edpm_deploy_baremetal/.* - ^roles/artifacts/tasks/edpm.yml - ^ci/playbooks/edpm_baremetal_deployment/run.yml - ^deploy-edpm.yml diff --git a/zuul.d/end-to-end.yaml b/zuul.d/end-to-end.yaml index a92c576f85..5c0f1babdf 100644 --- a/zuul.d/end-to-end.yaml +++ b/zuul.d/end-to-end.yaml @@ -23,9 +23,9 @@ name: cifmw-end-to-end parent: cifmw-end-to-end-base files: - - ^roles/.*_build/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/build.*/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/openshift_.*/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/.*_build/.* + - ^roles/build.*/.* + - ^roles/openshift_.*/.* - ^playbooks/.*build.*.yml irrelevant-files: - ^.*/*.md diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 04b2d43469..3578dd32b1 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -2,7 +2,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/artifacts/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/artifacts/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-artifacts @@ -13,7 +13,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/build_containers/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/build_containers/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-build_containers @@ -24,11 +24,11 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/build_openstack_packages/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/build_openstack_packages/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* - - ^roles/pkg_build/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/repo_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/pkg_build/.* + - ^roles/repo_setup/.* name: cifmw-molecule-build_openstack_packages parent: cifmw-molecule-base vars: @@ -37,7 +37,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/build_push_container/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/build_push_container/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-build_push_container @@ -48,7 +48,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cert_manager/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/cert_manager/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cert_manager @@ -60,7 +60,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ci_gen_kustomize_values/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/ci_gen_kustomize_values/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_gen_kustomize_values @@ -73,7 +73,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ci_local_storage/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/ci_local_storage/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_local_storage @@ -85,7 +85,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ci_multus/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/ci_multus/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_multus @@ -96,7 +96,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ci_network/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/ci_network/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_network @@ -107,7 +107,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ci_nmstate/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/ci_nmstate/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_nmstate @@ -118,7 +118,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ci_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/ci_setup/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_setup @@ -129,7 +129,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cifmw_block_device/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/cifmw_block_device/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cifmw_block_device @@ -140,7 +140,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cifmw_ceph_client/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/cifmw_ceph_client/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cifmw_ceph_client @@ -151,7 +151,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cifmw_ceph_spec/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/cifmw_ceph_spec/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cifmw_ceph_spec @@ -162,7 +162,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cifmw_cephadm/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/cifmw_cephadm/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cifmw_cephadm @@ -173,7 +173,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cifmw_create_admin/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/cifmw_create_admin/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cifmw_create_admin @@ -184,7 +184,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cifmw_ntp/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/cifmw_ntp/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cifmw_ntp @@ -195,7 +195,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cifmw_test_role/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/cifmw_test_role/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cifmw_test_role @@ -206,7 +206,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/compliance/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/compliance/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-compliance @@ -217,7 +217,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/config_drive/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/config_drive/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-config_drive @@ -228,7 +228,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/copy_container/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/copy_container/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-copy_container @@ -239,7 +239,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/deploy_bmh/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/deploy_bmh/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-deploy_bmh @@ -250,7 +250,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/devscripts/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/devscripts/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-devscripts @@ -261,7 +261,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/discover_latest_image/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/discover_latest_image/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-discover_latest_image @@ -272,7 +272,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/dlrn_promote/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/dlrn_promote/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-dlrn_promote @@ -283,7 +283,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/dlrn_report/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/dlrn_report/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-dlrn_report @@ -294,7 +294,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/dnsmasq/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/dnsmasq/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-dnsmasq @@ -305,7 +305,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/edpm_build_images/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/edpm_build_images/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-edpm_build_images @@ -316,7 +316,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/edpm_deploy/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/edpm_deploy/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-edpm_deploy @@ -327,7 +327,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/edpm_deploy_baremetal/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/edpm_deploy_baremetal/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-edpm_deploy_baremetal @@ -338,7 +338,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/edpm_kustomize/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/edpm_kustomize/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-edpm_kustomize @@ -349,7 +349,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/edpm_prepare/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/edpm_prepare/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-edpm_prepare @@ -360,7 +360,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/env_op_images/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/env_op_images/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-env_op_images @@ -372,7 +372,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/hci_prepare/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/hci_prepare/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-hci_prepare @@ -383,7 +383,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/hive/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/hive/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-hive @@ -394,7 +394,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/idrac_configuration/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/idrac_configuration/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-idrac_configuration @@ -405,7 +405,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/install_ca/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/install_ca/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-install_ca @@ -418,7 +418,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/install_openstack_ca/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/install_openstack_ca/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-install_openstack_ca @@ -431,7 +431,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/install_yamls/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/install_yamls/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-install_yamls @@ -442,7 +442,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/kustomize_deploy/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/kustomize_deploy/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-kustomize_deploy @@ -455,12 +455,12 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/libvirt_manager/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/libvirt_manager/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* - - ^roles/dnsmasq/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/networking_mapper/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/config_drive/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/dnsmasq/.* + - ^roles/networking_mapper/.* + - ^roles/config_drive/.* name: cifmw-molecule-libvirt_manager parent: cifmw-molecule-base timeout: 3600 @@ -470,7 +470,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/manage_secrets/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/manage_secrets/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-manage_secrets @@ -482,7 +482,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/mirror_registry/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/mirror_registry/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-mirror_registry @@ -493,7 +493,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/nat64_appliance/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/nat64_appliance/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-nat64_appliance @@ -504,7 +504,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/networking_mapper/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/networking_mapper/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-networking_mapper @@ -516,7 +516,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/openshift_login/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/openshift_login/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_login @@ -528,7 +528,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/openshift_obs/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/openshift_obs/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_obs @@ -540,7 +540,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/openshift_provisioner_node/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/openshift_provisioner_node/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_provisioner_node @@ -552,7 +552,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/openshift_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/openshift_setup/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_setup @@ -564,7 +564,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/operator_build/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/operator_build/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-operator_build @@ -575,7 +575,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/operator_deploy/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/operator_deploy/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-operator_deploy @@ -587,7 +587,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/os_must_gather/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/os_must_gather/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-os_must_gather @@ -598,7 +598,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/os_net_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/os_net_setup/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-os_net_setup @@ -609,7 +609,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/pkg_build/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/pkg_build/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-pkg_build @@ -620,7 +620,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/podman/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/podman/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-podman @@ -631,7 +631,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/registry_deploy/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/registry_deploy/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-registry_deploy @@ -642,7 +642,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/repo_setup/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/repo_setup/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-repo_setup @@ -653,7 +653,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/reportportal/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/reportportal/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-reportportal @@ -664,15 +664,15 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/reproducer/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/reproducer/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* - - ^roles/dnsmasq/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/libvirt_manager/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/networking_mapper/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/podman/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/sushy_emulator/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - - ^roles/rhol_crc/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/dnsmasq/.* + - ^roles/libvirt_manager/.* + - ^roles/networking_mapper/.* + - ^roles/podman/.* + - ^roles/sushy_emulator/.* + - ^roles/rhol_crc/.* name: cifmw-molecule-reproducer nodeset: centos-9-crc-2-48-0-xxl-ibm parent: cifmw-molecule-base @@ -683,7 +683,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/rhol_crc/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/rhol_crc/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-rhol_crc @@ -696,7 +696,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/run_hook/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/run_hook/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-run_hook @@ -707,7 +707,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/set_openstack_containers/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/set_openstack_containers/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-set_openstack_containers @@ -718,7 +718,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/shiftstack/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/shiftstack/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-shiftstack @@ -730,7 +730,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ssh_jumper/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/ssh_jumper/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ssh_jumper @@ -741,7 +741,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/sushy_emulator/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/sushy_emulator/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-sushy_emulator @@ -753,7 +753,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/tempest/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/tempest/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-tempest @@ -764,7 +764,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/test_deps/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/test_deps/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-test_deps @@ -775,7 +775,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/test_operator/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/test_operator/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-test_operator @@ -786,7 +786,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/tofu/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/tofu/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* - ^ci_framework/playbooks/run_tofu.yml @@ -799,7 +799,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/update/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/update/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-update @@ -810,7 +810,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/update_containers/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/update_containers/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-update_containers @@ -821,7 +821,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/validations/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/validations/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-validations @@ -832,7 +832,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/virtualbmc/(defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars).* + - ^roles/virtualbmc/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-virtualbmc @@ -843,7 +843,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/adoption_osp_deploy/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/adoption_osp_deploy/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-adoption_osp_deploy @@ -852,7 +852,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ci_dcn_site/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/ci_dcn_site/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_dcn_site @@ -861,7 +861,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ci_lvms_storage/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/ci_lvms_storage/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ci_lvms_storage @@ -870,7 +870,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/cifmw_external_dns/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/cifmw_external_dns/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-cifmw_external_dns @@ -879,7 +879,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/federation/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/federation/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-federation @@ -888,7 +888,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/krb_request/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/krb_request/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-krb_request @@ -897,7 +897,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/openshift_adm/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/openshift_adm/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-openshift_adm @@ -906,7 +906,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/ovirt/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/ovirt/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-ovirt @@ -915,7 +915,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/polarion/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/polarion/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-polarion @@ -924,7 +924,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/recognize_ssh_keypair/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/recognize_ssh_keypair/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-recognize_ssh_keypair @@ -933,7 +933,7 @@ files: - ^common-requirements.txt - ^test-requirements.txt - - ^roles/switch_config/defaults|files|handlers|library|lookup_plugins|module_utils|molecule|tasks|templates|vars.* + - ^roles/switch_config/.* - ^ci/playbooks/molecule.* - ^.config/molecule/.* name: cifmw-molecule-switch_config diff --git a/zuul.d/tcib.yaml b/zuul.d/tcib.yaml index 94ba621c2e..f90fc3d9a3 100644 --- a/zuul.d/tcib.yaml +++ b/zuul.d/tcib.yaml @@ -25,6 +25,6 @@ name: cifmw-tcib parent: cifmw-tcib-base files: - - ^roles/build_containers/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* + - ^roles/build_containers/.* - ^scenarios/centos-9/tcib.yml - ^ci/playbooks/tcib From 146a0682ab7d502dc58cf5f4efba615742b7969d Mon Sep 17 00:00:00 2001 From: Andrew Bays Date: Wed, 30 Apr 2025 11:13:09 -0400 Subject: [PATCH 096/480] Retry OCP CSR approval --- roles/openshift_adm/tasks/wait_for_cluster.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/roles/openshift_adm/tasks/wait_for_cluster.yml b/roles/openshift_adm/tasks/wait_for_cluster.yml index 5d3c92be28..3148877b81 100644 --- a/roles/openshift_adm/tasks/wait_for_cluster.yml +++ b/roles/openshift_adm/tasks/wait_for_cluster.yml @@ -49,8 +49,15 @@ - name: Check for pending certificate approval. when: - _openshift_adm_check_cert_approve | default(false) | bool + register: _approve_csr approve_csr: k8s_config: "{{ cifmw_openshift_kubeconfig }}" + retries: 30 + delay: 10 + until: + - _approve_csr is defined + - _approve_csr.rc is defined + - _approve_csr.rc == 0 - name: Wait until the OpenShift cluster is stable. environment: From 3cc19802840ddc4deef62eb6d15a89b500d595cc Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 6 May 2025 10:36:19 +0200 Subject: [PATCH 097/480] Update Ansible requirements to latest version It was not done for a while. Signed-off-by: Daniel Pawlik --- requirements.yml | 22 +++++++++++----------- zuul.d/base.yaml | 5 +++++ zuul.d/kuttl_multinode.yaml | 1 + 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/requirements.yml b/requirements.yml index 22fece6f58..aa6dac7ffb 100644 --- a/requirements.yml +++ b/requirements.yml @@ -17,42 +17,42 @@ collections: - name: https://github.com/ansible-collections/ansible.posix type: git - version: "1.6.2" + version: "v2.0.0" - name: https://github.com/ansible-collections/ansible.utils type: git - version: "v5.1.2" + version: "v6.0.0" - name: https://github.com/ansible-collections/community.general type: git - version: "10.0.1" + version: "10.6.0" - name: https://github.com/ansible-collections/community.crypto type: git - version: "2.22.3" + version: "2.26.1" - name: https://github.com/containers/ansible-podman-collections type: git - version: "1.16.2" + version: "1.16.3" - name: https://github.com/ansible-collections/community.libvirt type: git - version: "1.3.0" + version: "1.3.1" - name: https://github.com/ansible-collections/kubernetes.core type: git - version: "5.0.0" + version: "5.2.0" - name: https://github.com/ansible-collections/ansible.netcommon type: git - version: "v7.1.0" + version: "v8.0.0" - name: https://github.com/openstack/ansible-config_template type: git version: "2.1.1" - name: https://github.com/ansible-collections/junipernetworks.junos type: git - version: "v9.1.0" + version: "v10.0.0" - name: https://github.com/ansible-collections/cisco.ios type: git - version: "v9.0.3" + version: "v10.0.0" - name: https://github.com/ansible-collections/mellanox.onyx type: git - name: https://github.com/openshift/community.okd type: git - version: "4.0.0" + version: "4.0.1" - name: https://github.com/ovirt/ovirt-ansible-collection type: git version: "3.2.0-1" diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 25e92d2fc8..4397c2b66f 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -73,6 +73,8 @@ # openstack-ansibleee-operator - examples - mkdocs.yml + files: &files + - ^requirements.yml required-projects: - opendev.org/zuul/zuul-jobs - openstack-k8s-operators/barbican-operator @@ -127,6 +129,7 @@ attempts: 1 nodeset: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl irrelevant-files: *ir_files + files: *files required-projects: &multinode_edpm_rp - openstack-k8s-operators/ci-framework - openstack-k8s-operators/install_yamls @@ -214,6 +217,7 @@ attempts: 1 nodeset: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl irrelevant-files: *ir_files + files: *files required-projects: *multinode_edpm_rp roles: *multinode_edpm_roles pre-run: @@ -268,6 +272,7 @@ timeout: 10800 abstract: true irrelevant-files: *ir_files + files: *files required-projects: - openstack-k8s-operators/ci-framework - openstack-k8s-operators/install_yamls diff --git a/zuul.d/kuttl_multinode.yaml b/zuul.d/kuttl_multinode.yaml index fa5fb7f700..e45498d190 100644 --- a/zuul.d/kuttl_multinode.yaml +++ b/zuul.d/kuttl_multinode.yaml @@ -58,6 +58,7 @@ - ^ci/playbooks/kuttl/.* - ^scenarios/centos-9/kuttl.yml - ^zuul.d/kuttl.yaml + - ^requirements.yml vars: cifmw_extras: - '@scenarios/centos-9/kuttl_multinode.yml' From a6bdcb67e76e7657234b9a956f05856dbc8ff685 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 7 May 2025 08:45:41 +0200 Subject: [PATCH 098/480] Drop tracking requiements.yml file from Zuul CI base jobs We should track the files not in base job, but in "child" jobs. Signed-off-by: Daniel Pawlik --- zuul.d/base.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 4397c2b66f..25e92d2fc8 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -73,8 +73,6 @@ # openstack-ansibleee-operator - examples - mkdocs.yml - files: &files - - ^requirements.yml required-projects: - opendev.org/zuul/zuul-jobs - openstack-k8s-operators/barbican-operator @@ -129,7 +127,6 @@ attempts: 1 nodeset: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl irrelevant-files: *ir_files - files: *files required-projects: &multinode_edpm_rp - openstack-k8s-operators/ci-framework - openstack-k8s-operators/install_yamls @@ -217,7 +214,6 @@ attempts: 1 nodeset: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl irrelevant-files: *ir_files - files: *files required-projects: *multinode_edpm_rp roles: *multinode_edpm_roles pre-run: @@ -272,7 +268,6 @@ timeout: 10800 abstract: true irrelevant-files: *ir_files - files: *files required-projects: - openstack-k8s-operators/ci-framework - openstack-k8s-operators/install_yamls From d8f86c86284c1447f66ae3a0d8354d9bfea03ca3 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Wed, 30 Apr 2025 10:42:25 -0400 Subject: [PATCH 099/480] Allow setting concurrency in 'stages' loop Currently, it is not possible to set concurrency using the 'stages' functionality. It is because the retyping to string is present and concurrency is supposed to be type integer. This patch fixes this bug and lets users to set the value of the parameter. --- ansible.cfg | 1 + roles/test_operator/README.md | 2 +- roles/test_operator/defaults/main.yml | 3 +-- roles/test_operator/tasks/stages.yml | 6 +++++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index 1a19201b31..9228777d09 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -18,5 +18,6 @@ fact_caching_timeout = 0 inventory = inventory.yml pipelining = True any_errors_fatal = True +jinja2_native = True [ssh_connection] ssh_args = -o ControlMaster=auto -o ControlPersist=300 diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index ab1222f96b..48e91505ef 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -9,7 +9,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `cifmw_test_operator_bundle`: (String) Full name of container image with bundle that contains the test-operator. Default value: `""` * `cifmw_test_operator_timeout`: (Integer) Timeout in seconds for the execution of the tests. Default value: `3600` * `cifmw_test_operator_logs_image`: (String) Image that should be used to collect logs from the pods spawned by the test-operator. Default value: `quay.io/quay/busybox` -* `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. As of now this value can not be specified inside `test_vars`. Default value: `8` +* `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. Default value: `8` * `cifmw_test_operator_cleanup`: (Bool) Delete all resources created by the role at the end of the testing. Default value: `false` * `cifmw_test_operator_tempest_cleanup`: (Bool) Run tempest cleanup after test execution (tempest run) to delete any resources created by tempest that may have been left out. * `cifmw_test_operator_default_groups`: (List) List of groups in the include list to search for tests to be executed. Default value: `[ 'default' ]` diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index d7b533aad4..64e4c9accb 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -29,7 +29,6 @@ cifmw_test_operator_controller_namespace: openstack-operators cifmw_test_operator_bundle: "" cifmw_test_operator_timeout: 3600 cifmw_test_operator_logs_image: quay.io/quay/busybox -cifmw_test_operator_concurrency: 8 cifmw_test_operator_cleanup: false cifmw_test_operator_dry_run: false cifmw_test_operator_default_groups: @@ -140,7 +139,7 @@ cifmw_test_operator_tempest_config: {{ stage_vars_dict.cifmw_test_operator_tempest_exclude_list | default('') }} expectedFailuresList: | {{ stage_vars_dict.cifmw_test_operator_tempest_expected_failures_list | default('') }} - concurrency: "{{ cifmw_test_operator_concurrency }}" + concurrency: "{{ stage_vars_dict.cifmw_test_operator_concurrency | default(8) }}" externalPlugin: "{{ stage_vars_dict.cifmw_test_operator_tempest_external_plugin | default([]) }}" extraRPMs: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_rpms | default([]) }}" extraImages: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_images | default([]) }}" diff --git a/roles/test_operator/tasks/stages.yml b/roles/test_operator/tasks/stages.yml index 72c460fcdf..3fcddb5ebc 100644 --- a/roles/test_operator/tasks/stages.yml +++ b/roles/test_operator/tasks/stages.yml @@ -35,7 +35,11 @@ start_with: cifmw_test_operator_{{ _stage_vars.type }} when: item.key.startswith(start_with) ansible.builtin.set_fact: - stage_vars_dict: "{{ stage_vars_dict | combine({item.key: _stage_test_vars[item.key] | default(lookup('vars', item.key, default=omit))} ) }}" + stage_vars_dict: "{{ stage_vars_dict | combine({item.key: _stage_test_vars[item.key] | default(lookup('vars', item.key, default=omit)) }) }}" + +- name: Overwrite concurrency in global_vars + ansible.builtin.set_fact: + stage_vars_dict: "{{ stage_vars_dict | combine({'cifmw_test_operator_concurrency': _stage_test_vars.cifmw_test_operator_concurrency | default(lookup('vars', 'concurrency', default=omit)) }) }}" - name: Override specific type config vars: From 0f46d1838e38ec988cd831127f23e4431cbd3b19 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Thu, 1 May 2025 15:45:05 +0530 Subject: [PATCH 100/480] Add cifmw_build_containers_force flag to build tcib containers always in meta content provider Sometimes we want to build tcib content provider irrespective of gating repo. In order to do that there is no way to do that. By adding cifmw_build_containers_force flag allows to build the containers in meta content provider. Signed-off-by: Chandan Kumar (raukadah) --- ci/playbooks/meta_content_provider/meta_content_provider.yml | 4 +++- roles/build_containers/README.md | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ci/playbooks/meta_content_provider/meta_content_provider.yml b/ci/playbooks/meta_content_provider/meta_content_provider.yml index 22f2b7dda5..eea0995e66 100644 --- a/ci/playbooks/meta_content_provider/meta_content_provider.yml +++ b/ci/playbooks/meta_content_provider/meta_content_provider.yml @@ -47,7 +47,9 @@ - name: Build openstack services container when gating repo exists when: - "'os-net-config' not in zuul_change_list" - - _gating_repo.stat.exists + # Note: cifmw_build_containers_force var is used to run build_containers + # role in the meta content provider irrespective of gating repo. + - _gating_repo.stat.exists or cifmw_build_containers_force | default('false') block: # It is needed to install built python-tcib package on the controller - name: Populate gating repo in /etc/yum.repos.d diff --git a/roles/build_containers/README.md b/roles/build_containers/README.md index 84feb3fec6..967774e7c5 100644 --- a/roles/build_containers/README.md +++ b/roles/build_containers/README.md @@ -35,3 +35,7 @@ become - Required to install and execute tcib * `cifmw_build_containers_hotfix_tag`: (String) The tag of the container image. * `cifmw_build_containers_run_hotfix`: (boolean) conditional variable for executing build_containers. * `cifmw_build_containers_install_from_source`: (boolean) Install tcib from RPM. + +### Parameters used in meta-content-provider + +* `cifmw_build_containers_force`: (Boolean) Force run build_containers role irrespective of gating repo. Default to `false`. From d471c0c04c40008c5ea660e7d55bf143af98841d Mon Sep 17 00:00:00 2001 From: Ronelle Landy Date: Fri, 25 Apr 2025 12:49:04 -0400 Subject: [PATCH 101/480] Add conditional for CentOS 10 - nodepool mirror --- .../templates/build_containers.sh.j2 | 2 +- roles/repo_setup/tasks/ci_mirror.yml | 28 ++++++++++++++----- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/roles/build_containers/templates/build_containers.sh.j2 b/roles/build_containers/templates/build_containers.sh.j2 index 9237b46cc1..86eb3952d0 100644 --- a/roles/build_containers/templates/build_containers.sh.j2 +++ b/roles/build_containers/templates/build_containers.sh.j2 @@ -49,7 +49,7 @@ openstack tcib container image build \ {% endfor %} {% endif %} --tcib-extra tcib_release={{ ansible_distribution_major_version }} \ - --tcib-extra tcib_python_version={{ (ansible_distribution_major_version is version('9', '<')) | ternary ('3.6', '3.9') }} \ + --tcib-extra tcib_python_version={{ (ansible_distribution_major_version is version('10', '<')) | ternary ('3.9', '3.12') }} \ {% if cifmw_build_containers_install_from_source | bool %} --tcib-extra tcib_package= \ {% endif %} diff --git a/roles/repo_setup/tasks/ci_mirror.yml b/roles/repo_setup/tasks/ci_mirror.yml index 0b016c7224..dae3377660 100644 --- a/roles/repo_setup/tasks/ci_mirror.yml +++ b/roles/repo_setup/tasks/ci_mirror.yml @@ -8,10 +8,24 @@ become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" when: - mirror_path.stat.exists - ansible.builtin.shell: | - set -o pipefail - source /etc/ci/mirror_info.sh - sed -i -e "s|https://trunk.rdoproject.org|$NODEPOOL_RDO_PROXY|g" *.repo - sed -i -e "s|http://mirror.stream.centos.org|$NODEPOOL_CENTOS_MIRROR|g" *.repo - args: - chdir: "{{ cifmw_repo_setup_output }}" + block: + - name: Use RDO proxy mirrors + ansible.builtin.shell: | + set -o pipefail + source /etc/ci/mirror_info.sh + sed -i -e "s|https://trunk.rdoproject.org|$NODEPOOL_RDO_PROXY|g" *.repo + args: + chdir: "{{ cifmw_repo_setup_output }}" + + # TODO(rlandy) remove when CentOS 10 mirrors exist on Nodepool Hosts + # mirror ref: http://mirror.regionone.vexxhost-nodepool-sf.rdoproject.org/centos-stream/ + - name: Use RDO CentOS mirrors (remove CentOS 10 conditional when Nodepool mirrors exist) + when: + - ansible_distribution | lower == 'centos' + - ansible_distribution_major_version is not version('10', '==') + ansible.builtin.shell: | + set -o pipefail + source /etc/ci/mirror_info.sh + sed -i -e "s|http://mirror.stream.centos.org|$NODEPOOL_CENTOS_MIRROR|g" *.repo + args: + chdir: "{{ cifmw_repo_setup_output }}" From 06bfb9dc816745fa5770fed3c055526c396ac892 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 9 May 2025 10:13:13 +0200 Subject: [PATCH 102/480] Fix setting fact for cifmw_test_operator_concurrency Some part of previous commit [1] was not needed and it breaks overwriting the values (it always was uing 8 value for concurrency). This commit partially reverts commit proposed in PR [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2938 Signed-off-by: Daniel Pawlik --- roles/test_operator/defaults/main.yml | 3 ++- roles/test_operator/tasks/stages.yml | 6 +----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 64e4c9accb..d7b533aad4 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -29,6 +29,7 @@ cifmw_test_operator_controller_namespace: openstack-operators cifmw_test_operator_bundle: "" cifmw_test_operator_timeout: 3600 cifmw_test_operator_logs_image: quay.io/quay/busybox +cifmw_test_operator_concurrency: 8 cifmw_test_operator_cleanup: false cifmw_test_operator_dry_run: false cifmw_test_operator_default_groups: @@ -139,7 +140,7 @@ cifmw_test_operator_tempest_config: {{ stage_vars_dict.cifmw_test_operator_tempest_exclude_list | default('') }} expectedFailuresList: | {{ stage_vars_dict.cifmw_test_operator_tempest_expected_failures_list | default('') }} - concurrency: "{{ stage_vars_dict.cifmw_test_operator_concurrency | default(8) }}" + concurrency: "{{ cifmw_test_operator_concurrency }}" externalPlugin: "{{ stage_vars_dict.cifmw_test_operator_tempest_external_plugin | default([]) }}" extraRPMs: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_rpms | default([]) }}" extraImages: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_images | default([]) }}" diff --git a/roles/test_operator/tasks/stages.yml b/roles/test_operator/tasks/stages.yml index 3fcddb5ebc..72c460fcdf 100644 --- a/roles/test_operator/tasks/stages.yml +++ b/roles/test_operator/tasks/stages.yml @@ -35,11 +35,7 @@ start_with: cifmw_test_operator_{{ _stage_vars.type }} when: item.key.startswith(start_with) ansible.builtin.set_fact: - stage_vars_dict: "{{ stage_vars_dict | combine({item.key: _stage_test_vars[item.key] | default(lookup('vars', item.key, default=omit)) }) }}" - -- name: Overwrite concurrency in global_vars - ansible.builtin.set_fact: - stage_vars_dict: "{{ stage_vars_dict | combine({'cifmw_test_operator_concurrency': _stage_test_vars.cifmw_test_operator_concurrency | default(lookup('vars', 'concurrency', default=omit)) }) }}" + stage_vars_dict: "{{ stage_vars_dict | combine({item.key: _stage_test_vars[item.key] | default(lookup('vars', item.key, default=omit))} ) }}" - name: Override specific type config vars: From 58bc72099dfcc8fb9440b3bd8c50e91c5d1b33fd Mon Sep 17 00:00:00 2001 From: Marian Krcmarik Date: Sat, 10 May 2025 02:06:30 +0200 Subject: [PATCH 103/480] dnsmasq: Fix start_v[4|6] is None The task[1] has started to fail recently on following error: "object of type 'NoneType' has no len(). object of type 'NoneType' has no len()" I can see that cifmw_dnsmasq_network_definition in my case is: name: osp_trunk original_name: cifmw-osp_trunk ranges: - label: osp_trunk options: - option:dns-server,192.168.122.1 - option:router prefix_length_v4: 24 prefix_length_v6: null start_v4: 192.168.122.2 start_v6: null Which means that start_v6 is None and that's defined but of NoneType so length filter can't be used on it. The filter does not need to be used tho It's enough to condition, i.e. "if variable" (instead of "If variable | length > 0") which would be false if values is None, false, 0, empty string, dictionary or list. [1] https://github.com/openstack-k8s-operators/ci-framework/blob/main/roles/dnsmasq/tasks/manage_network.yml#L35 --- roles/config_drive/tasks/main.yml | 4 ++-- roles/dnsmasq/templates/network.conf.j2 | 10 +++++----- .../libvirt_manager/tasks/generate_networking_data.yml | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/roles/config_drive/tasks/main.yml b/roles/config_drive/tasks/main.yml index 2954d689b4..67b38d8c59 100644 --- a/roles/config_drive/tasks/main.yml +++ b/roles/config_drive/tasks/main.yml @@ -63,7 +63,7 @@ register: _net_data_change when: - cifmw_config_drive_networkconfig is defined - - cifmw_config_drive_networkconfig | length > 0 + - cifmw_config_drive_networkconfig ansible.builtin.template: backup: true src: "network-config.j2" @@ -101,6 +101,6 @@ -joliet -rock user-data meta-data {% if cifmw_config_drive_networkconfig is defined and - cifmw_config_drive_networkconfig | length > 0 -%} + cifmw_config_drive_networkconfig -%} network-config {%- endif -%} diff --git a/roles/dnsmasq/templates/network.conf.j2 b/roles/dnsmasq/templates/network.conf.j2 index cb5ce35958..a948c591ef 100644 --- a/roles/dnsmasq/templates/network.conf.j2 +++ b/roles/dnsmasq/templates/network.conf.j2 @@ -1,21 +1,21 @@ # Managed by ci-framework/dnsmasq {% if cifmw_dnsmasq_network_definition.ranges | selectattr('start_v6', 'defined') | - rejectattr('start_v6', 'match', '^$') -%} + rejectattr('start_v6', 'none') | rejectattr('start_v6', 'match', '^$') -%} enable-ra {% endif -%} {% for range in cifmw_dnsmasq_network_definition['ranges'] -%} -{% if range.start_v4 is defined and range.start_v4 | length > 0 -%} +{% if range.start_v4 is defined and range.start_v4 -%} dhcp-range=set:{{ range.label }},{{ range.start_v4 }},static,{{ (range.start_v4 + "/" + range.prefix_length_v4 | default(24) | string) | ansible.utils.ipaddr('netmask') }},{{ range.ttl | default('1h') }} -{% if range.domain is defined and range.domain | length > 0 -%} +{% if range.domain is defined and range.domain -%} {% set range_v4_allowed = (range.start_v4 ~ "/" ~ range.prefix_length_v4 | default('24')) | ansible.utils.ipaddr('range_usable') | replace("-",",") %} domain={{ range.domain }},{{ range_v4_allowed }},local {% endif %} {% endif %} -{% if range.start_v6 is defined and range.start_v6 | length > 0 -%} +{% if range.start_v6 is defined and range.start_v6 -%} dhcp-range=set:{{ range.label }},{{ range.start_v6 }},static,{{ range.prefix_length_v6 | default('64') }},{{ range.ttl | default('1h') }} -{% if range.domain is defined and range.domain | length > 0 -%} +{% if range.domain is defined and range.domain -%} {% set range_v6_allowed = (range.start_v6 ~ "/" ~ range.prefix_length_v6 | default('64')) | ansible.utils.ipaddr('range_usable') | replace("-",",") %} domain={{ range.domain }},{{ range_v6_allowed }},local diff --git a/roles/libvirt_manager/tasks/generate_networking_data.yml b/roles/libvirt_manager/tasks/generate_networking_data.yml index 5d614d8ad2..45539656bb 100644 --- a/roles/libvirt_manager/tasks/generate_networking_data.yml +++ b/roles/libvirt_manager/tasks/generate_networking_data.yml @@ -180,11 +180,11 @@ {% set ns = namespace(ip_start=30) %} networks: {{ _lnet_data.name | replace('cifmw_', '') }}: - {% if _lnet_data.ranges[0].start_v4 is defined and _lnet_data.ranges[0].start_v4 | length > 0 %} + {% if _lnet_data.ranges[0].start_v4 is defined and _lnet_data.ranges[0].start_v4 %} {% set net_4 = _lnet_data.ranges[0].start_v4 | ansible.utils.ipsubnet(_lnet_data.ranges[0].prefix_length_v4) %} network-v4: {{ net_4}} {% endif %} - {% if _lnet_data.ranges[0].start_v6 is defined and _lnet_data.ranges[0].start_v6 | length > 0 %} + {% if _lnet_data.ranges[0].start_v6 is defined and _lnet_data.ranges[0].start_v6 %} {% set net_6 = _lnet_data.ranges[0].start_v6 | ansible.utils.ipsubnet(_lnet_data.ranges[0].prefix_length_v6) %} network-v6: {{ net_6 }} {% endif %} From c4d37b0793a857110885d2964ed44fec878854d5 Mon Sep 17 00:00:00 2001 From: openstack-k8s-ci-robot Date: Mon, 5 May 2025 11:22:18 +0200 Subject: [PATCH 104/480] The workflow needs -f flag to rebase and push on a branch --- .github/workflows/sync_branches_reusable_workflow.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml index 5c258d47ed..9e88e5889c 100644 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -37,4 +37,4 @@ jobs: git fetch origin ${{ inputs.source-branch }} git checkout ${{ inputs.target-branch }} git rebase ${{ inputs.source-branch }} - git push origin ${{ inputs.target-branch }} + git push --force origin ${{ inputs.target-branch }} From 7f14cb220d38f936387534397263a34fe916bfaf Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Thu, 8 May 2025 12:03:33 +0200 Subject: [PATCH 105/480] Add for each worker journalctl kubelet logs This going to help to identify reason of some errors related to unstable cluster. --- ci/playbooks/collect-logs.yml | 13 +++++++++++++ ci/playbooks/e2e-collect-logs.yml | 13 +++++++++++++ 2 files changed, 26 insertions(+) diff --git a/ci/playbooks/collect-logs.yml b/ci/playbooks/collect-logs.yml index d6e5b83a17..aeabecdf1a 100644 --- a/ci/playbooks/collect-logs.yml +++ b/ci/playbooks/collect-logs.yml @@ -157,3 +157,16 @@ url: "report.html" metadata: type: html_report + +- name: "Run ci/playbooks/collect-logs.yml on CRC host" + hosts: crc + gather_facts: false + tasks: + - name: Get kubelet journalctl logs + ignore_errors: true # noqa: ignore-errors + become: true + ansible.builtin.shell: | + journalctl -u kubelet > kubelet.log + no_log: true + args: + chdir: "{{ ansible_user_dir }}/zuul-output/logs/" diff --git a/ci/playbooks/e2e-collect-logs.yml b/ci/playbooks/e2e-collect-logs.yml index 1c3ef44785..b7733626a9 100644 --- a/ci/playbooks/e2e-collect-logs.yml +++ b/ci/playbooks/e2e-collect-logs.yml @@ -26,3 +26,16 @@ cmd: >- ansible-playbook playbooks/99-logs.yml -e @scenarios/centos-9/base.yml + +- name: "Run ci/playbooks/collect-logs.yml on CRC host" + hosts: crc + gather_facts: false + tasks: + - name: Get kubelet journalctl logs + ignore_errors: true # noqa: ignore-errors + become: true + ansible.builtin.shell: | + journalctl -u kubelet > kubelet.log + no_log: true + args: + chdir: "{{ ansible_user_dir }}/zuul-output/logs/" From b54e797dfe3249410ae65336214a098af9d59ed8 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Wed, 16 Apr 2025 10:06:47 -0400 Subject: [PATCH 106/480] Update CRDs when changing test-operator version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch updated the CRDs, if the user wants to patch the test operator version. Until now the CRDs stayed unchanged even after version change, so testing new test-operator parameters was not possible. It is important to point out, that the update will not be triggered in 90% of test-operator use cases. It is primarily used in two situations: 1. When running checks for new PRs in the test-operator repository. 2. When updating the test-operator version in jobs to prevent failures. The approach isn’t the cleanest, but alternative solutions are significantly more complex than the benefits they offer. --- roles/test_operator/README.md | 3 ++- roles/test_operator/tasks/main.yml | 39 ++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 48e91505ef..a1473789fb 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -5,8 +5,9 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ ## Parameters * `cifmw_test_operator_artifacts_basedir`: (String) Directory where we will have all test-operator related files. Default value: `{{ cifmw_basedir }}/tests/test_operator` which defaults to `~/ci-framework-data/tests/test_operator` * `cifmw_test_operator_namespace`: (String) Namespace inside which all the resources are created. Default value: `openstack` -* `cifmw_test_operator_controller_namespace`: (String) Namespace inside which the test-operator-controller-manager is created. Default value: `openstack-opearators` +* `cifmw_test_operator_controller_namespace`: (String) Namespace inside which the test-operator-controller-manager is created. Default value: `openstack-operators` * `cifmw_test_operator_bundle`: (String) Full name of container image with bundle that contains the test-operator. Default value: `""` +* `cifmw_test_operator_version`: (String) The commit hash corresponding to the version of test-operator the user wants to use. This parameter is only used when `cifmw_test_operator_bundle` is also set. * `cifmw_test_operator_timeout`: (Integer) Timeout in seconds for the execution of the tests. Default value: `3600` * `cifmw_test_operator_logs_image`: (String) Image that should be used to collect logs from the pods spawned by the test-operator. Default value: `quay.io/quay/busybox` * `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. Default value: `8` diff --git a/roles/test_operator/tasks/main.yml b/roles/test_operator/tasks/main.yml index 1515b70836..4e5b8952b9 100644 --- a/roles/test_operator/tasks/main.yml +++ b/roles/test_operator/tasks/main.yml @@ -118,6 +118,45 @@ ) }} + - name: Update existing CRDs + when: cifmw_test_operator_version is defined + block: + - name: Delete CRDs created by test-operator + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + kind: CustomResourceDefinition + state: absent + api_version: v1 + name: "{{ item }}" + namespace: "{{ cifmw_test_operator_namespace }}" + wait: true + wait_timeout: 600 + loop: + - "{{ cifmw_test_operator_tempest_crd_name }}" + - "{{ cifmw_test_operator_tobiko_crd_name }}" + - "{{ cifmw_test_operator_ansibletest_crd_name }}" + - "{{ cifmw_test_operator_horizontest_crd_name }}" + + - name: Clone test-operator repository and checkout into specified version + ansible.builtin.git: + repo: "https://github.com/openstack-k8s-operators/test-operator.git" + dest: /tmp/test-operator + refspec: '+refs/pull/*:refs/heads/*' + version: "{{ cifmw_test_operator_version }}" + force: true + + - name: Run make generate manifests install from /tmp/test-operator dir + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.shell: >- + set -o pipefail; + make generate manifests install + args: + chdir: /tmp/test-operator + - name: Call test stages loop when: not cifmw_test_operator_dry_run | bool ansible.builtin.include_tasks: stages.yml From 712ef7db06d53a0db78f365aa612e672b65f0588 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 28 Apr 2025 08:59:32 +0200 Subject: [PATCH 107/480] Use role instead of playbooks - 01-bootstrap.yml Most of the playbooks which are executed inside the nested Ansible (Ansible inside the Ansible) does not need to be in playbook, it can be in a simple role. The previous CI execution for 01-bootstrap.yml file was: Zuul CI -> Ansible command -> Ansible playbook -> import playbook -> import playbook With this change would be one layer less: Zuul CI -> Ansible command -> Ansible playbook -> cifmw_setup role Signed-off-by: Daniel Pawlik --- .../content_provider/content_provider.yml | 18 +++-- .../edpm_build_images/edpm_image_builder.yml | 18 +++-- ci/playbooks/kuttl/deploy-deps.yaml | 10 ++- ci/playbooks/kuttl/e2e-kuttl.yml | 18 +++-- .../meta_content_provider.yml | 16 ++--- ci/playbooks/tcib/tcib.yml | 18 +++-- deploy-edpm.yml | 6 +- hooks/playbooks/ceph-deploy.yml | 1 + playbooks/01-bootstrap.yml | 2 + roles/cifmw_setup/defaults/main.yml | 2 + roles/cifmw_setup/tasks/bootstrap.yml | 67 +++++++++++++++++++ roles/reproducer/tasks/main.yml | 17 ++--- scenarios/centos-9/ci.yml | 3 +- zuul.d/adoption.yaml | 2 +- zuul.d/molecule.yaml | 9 +++ zuul.d/projects.yaml | 1 + 16 files changed, 141 insertions(+), 67 deletions(-) create mode 100644 roles/cifmw_setup/defaults/main.yml create mode 100644 roles/cifmw_setup/tasks/bootstrap.yml diff --git a/ci/playbooks/content_provider/content_provider.yml b/ci/playbooks/content_provider/content_provider.yml index 488e6a0f17..3bd34b4189 100644 --- a/ci/playbooks/content_provider/content_provider.yml +++ b/ci/playbooks/content_provider/content_provider.yml @@ -1,14 +1,12 @@ --- -- name: Bootstrap step - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '01-bootstrap.yml' - ] | ansible.builtin.path_join - }} +- name: Bootstrap playbook + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: true + tasks: + - name: Run bootstrap + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: bootstrap.yml - name: "Run ci/playbooks/content_provider/content_provider.yml" hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/ci/playbooks/edpm_build_images/edpm_image_builder.yml b/ci/playbooks/edpm_build_images/edpm_image_builder.yml index c2c13771ba..38eeaee90d 100644 --- a/ci/playbooks/edpm_build_images/edpm_image_builder.yml +++ b/ci/playbooks/edpm_build_images/edpm_image_builder.yml @@ -1,14 +1,12 @@ --- -- name: Boostrap node - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '01-bootstrap.yml' - ] | ansible.builtin.path_join - }} +- name: Bootstrap playbook + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: true + tasks: + - name: Run bootstrap + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: bootstrap.yml - name: "Run ci/playbooks/edpm_build_images/edpm_image_builder.yml" hosts: "{{ cifmw_zuul_target_host | default('localhost') }}" diff --git a/ci/playbooks/kuttl/deploy-deps.yaml b/ci/playbooks/kuttl/deploy-deps.yaml index dcf097de5a..97f754b027 100644 --- a/ci/playbooks/kuttl/deploy-deps.yaml +++ b/ci/playbooks/kuttl/deploy-deps.yaml @@ -1,6 +1,12 @@ --- -- name: Run ci_framework bootstrap playbook - ansible.builtin.import_playbook: "../../../playbooks/01-bootstrap.yml" +- name: Bootstrap playbook + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: true + tasks: + - name: Run bootstrap + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: bootstrap.yml - hosts: "{{ cifmw_target_host | default('localhost') }}" name: Install dev tools diff --git a/ci/playbooks/kuttl/e2e-kuttl.yml b/ci/playbooks/kuttl/e2e-kuttl.yml index 4cd9dd4c93..20addb7061 100644 --- a/ci/playbooks/kuttl/e2e-kuttl.yml +++ b/ci/playbooks/kuttl/e2e-kuttl.yml @@ -1,14 +1,12 @@ --- -- name: Bootstrap step - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '01-bootstrap.yml' - ] | ansible.builtin.path_join - }} +- name: Bootstrap playbook + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: true + tasks: + - name: Run bootstrap + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: bootstrap.yml - name: Install deps and prepare for KUTTL run hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/ci/playbooks/meta_content_provider/meta_content_provider.yml b/ci/playbooks/meta_content_provider/meta_content_provider.yml index eea0995e66..533e663eb1 100644 --- a/ci/playbooks/meta_content_provider/meta_content_provider.yml +++ b/ci/playbooks/meta_content_provider/meta_content_provider.yml @@ -1,19 +1,13 @@ --- -- name: Bootstrap step - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '01-bootstrap.yml' - ] | ansible.builtin.path_join - }} - - name: Run ci/playbooks/meta_content_provider/meta_content_provider.yml hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true tasks: + - name: Run bootstrap + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: bootstrap.yml + - name: Install necessary dependencies ansible.builtin.include_role: name: 'install_yamls_makes' diff --git a/ci/playbooks/tcib/tcib.yml b/ci/playbooks/tcib/tcib.yml index 0edcd33a35..ca1f6380e0 100644 --- a/ci/playbooks/tcib/tcib.yml +++ b/ci/playbooks/tcib/tcib.yml @@ -1,14 +1,12 @@ --- -- name: Bootstrap step - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '01-bootstrap.yml' - ] | ansible.builtin.path_join - }} +- name: Bootstrap playbook + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: true + tasks: + - name: Run bootstrap + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: bootstrap.yml - name: "Run ci/playbooks/tcib/tcib.yml" hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 79d5065d26..0e45ee3e49 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -18,8 +18,10 @@ ansible.builtin.include_tasks: file: "ci/playbooks/tasks/inherit_parent_scenario.yml" -- name: Bootstrap step - ansible.builtin.import_playbook: playbooks/01-bootstrap.yml + - name: Run bootstrap + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: bootstrap.yml - name: Import infra entrypoint playbook ansible.builtin.import_playbook: playbooks/02-infra.yml diff --git a/hooks/playbooks/ceph-deploy.yml b/hooks/playbooks/ceph-deploy.yml index 11c11917b9..3356d85356 100644 --- a/hooks/playbooks/ceph-deploy.yml +++ b/hooks/playbooks/ceph-deploy.yml @@ -14,6 +14,7 @@ # Since the hook injects the ansible.cfg in the ansible-playbook command, # we therefore should know where to look for the install_yamls_makes role. # For the records, this role is generated in the 01-bootstrap.yml playbook + # (migrated to: roles/cifmw_setup/tasks/bootstrap.yml) # by leveraging the install_yamls role and related modules, especially # the generate_make_tasks. # And we can pass down the cifmw_make_ceph_environment set in the diff --git a/playbooks/01-bootstrap.yml b/playbooks/01-bootstrap.yml index 3e24f171f5..f81ae4f41a 100644 --- a/playbooks/01-bootstrap.yml +++ b/playbooks/01-bootstrap.yml @@ -1,3 +1,5 @@ +--- +# NOTE: Playbook migrated to: cifmw_setup/tasks/bootstrap.yml. - name: Bootstrap playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true diff --git a/roles/cifmw_setup/defaults/main.yml b/roles/cifmw_setup/defaults/main.yml new file mode 100644 index 0000000000..159eaa9a78 --- /dev/null +++ b/roles/cifmw_setup/defaults/main.yml @@ -0,0 +1,2 @@ +--- +ansible_user_dir: "{{ lookup('env', 'HOME') }}" diff --git a/roles/cifmw_setup/tasks/bootstrap.yml b/roles/cifmw_setup/tasks/bootstrap.yml new file mode 100644 index 0000000000..0f92eb2b5e --- /dev/null +++ b/roles/cifmw_setup/tasks/bootstrap.yml @@ -0,0 +1,67 @@ +--- +- name: Set custom cifmw PATH reusable fact + tags: + - always + when: + - cifmw_path is not defined + ansible.builtin.set_fact: + cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" + cacheable: true + +- name: Get customized parameters + tags: + - always + ansible.builtin.set_fact: + ci_framework_params: >- + {{ + hostvars[inventory_hostname] | + dict2items | + selectattr("key", "match", + "^(cifmw|pre|post)_(?!install_yamls|openshift_token|openshift_login|openshift_kubeconfig).*") | + list | items2dict + }} + +- name: Install custom CAs as soon as possible + tags: + - bootstrap + - packages + ansible.builtin.import_role: + name: install_ca + +- name: Run repo_setup + tags: + - bootstrap + - packages + ansible.builtin.import_role: + name: repo_setup + +- name: Run ci_setup role + tags: + - bootstrap + ansible.builtin.import_role: + role: ci_setup + +- name: Prepare install_yamls make targets + when: + - cifmw_architecture_scenario is undefined + tags: + - bootstrap + ansible.builtin.include_role: + name: install_yamls + apply: + tags: + - bootstrap + +- name: Get latest image for future reference + tags: + - bootstrap + ansible.builtin.import_role: + role: discover_latest_image + +- name: Create artifacts with custom params + tags: + - always + ansible.builtin.copy: + mode: "0644" + dest: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/parameters/custom-params.yml" + content: "{{ ci_framework_params | to_nice_yaml }}" diff --git a/roles/reproducer/tasks/main.yml b/roles/reproducer/tasks/main.yml index b31962fbf8..31167eee19 100644 --- a/roles/reproducer/tasks/main.yml +++ b/roles/reproducer/tasks/main.yml @@ -284,17 +284,14 @@ - "/home/zuul/ansible-bootstrap.log" - name: Bootstrap environment on controller-0 - environment: - ANSIBLE_LOG_PATH: "~/ansible-bootstrap.log" + vars: + # NOTE: need to overwrite parent vars: + # ./roles/reproducer/molecule/crc_layout/converge.yml + cifmw_basedir: "{{ ansible_user_dir ~ '/ci-framework-data' }}" no_log: "{{ cifmw_nolog | default(true) | bool }}" - ansible.builtin.command: - chdir: "{{ _cifmw_reproducer_framework_location }}" - cmd: >- - ansible-playbook -i ~/ci-framework-data/artifacts/zuul_inventory.yml - -e @~/ci-framework-data/parameters/reproducer-variables.yml - -e @scenarios/reproducers/networking-definition.yml - playbooks/01-bootstrap.yml - creates: "/home/zuul/ansible-bootstrap.log" + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: bootstrap.yml - name: Install dev tools from install_yamls on controller-0 environment: diff --git a/scenarios/centos-9/ci.yml b/scenarios/centos-9/ci.yml index e289b70a47..77114ebcf1 100644 --- a/scenarios/centos-9/ci.yml +++ b/scenarios/centos-9/ci.yml @@ -34,7 +34,8 @@ cifmw_run_tests: true # The actual ceph_make task understands "make_ceph_environment". # But since we're calling it via hook, in order to expose it properly, we # have to prefix it with "cifmw_". It will then end in the generated file from -# 01-bootstrap.yml playbook (custom-params.yml), and the hook will be able +# 01-bootstrap.yml playbook (custom-params.yml; migrated to +# roles/cifmw_setup/tasks/bootstrap.yml), and the hook will be able # to load it and consume the parameters properly # Check hooks/playbooks/ceph-deploy.yml for the whole logic. cifmw_make_ceph_environment: diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index f826af9057..ef537ac056 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -117,7 +117,6 @@ parent: adoption-standalone-to-crc-ceph files: # ci-framework - - ^playbooks/01-bootstrap.yml - ^playbooks/02-infra.yml - ^playbooks/06-deploy-edpm.yml - ^roles/discover_latest_image/.* @@ -127,6 +126,7 @@ - ^roles/openshift_login/.* - ^roles/openshift_setup/.* - ^roles/repo_setup/.* + - ^roles/cifmw_setup/.* - ^hooks/playbooks/fetch_compute_facts.yml - ^zuul.d/adoption.yaml # openstack-operator diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 3578dd32b1..7796925872 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -875,6 +875,15 @@ - ^.config/molecule/.* name: cifmw-molecule-cifmw_external_dns parent: cifmw-molecule-noop +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/cifmw_setup/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-cifmw_setup + parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 0c0e3a90ef..ee6b054032 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -34,6 +34,7 @@ - cifmw-molecule-cifmw_create_admin - cifmw-molecule-cifmw_external_dns - cifmw-molecule-cifmw_ntp + - cifmw-molecule-cifmw_setup - cifmw-molecule-cifmw_test_role - cifmw-molecule-compliance - cifmw-molecule-config_drive From f1882e4e57cf58cf78ded49c74bf5ce8e5bb47c8 Mon Sep 17 00:00:00 2001 From: Fiorella Yanac Date: Wed, 16 Apr 2025 09:25:31 +0100 Subject: [PATCH 108/480] Add tobiko_cleanup in cifmw --- roles/test_operator/README.md | 2 + roles/test_operator/defaults/main.yml | 2 + .../tasks/runners/tobiko_runner.yml | 57 +++++++++++++++++++ 3 files changed, 61 insertions(+) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index a1473789fb..c22bd80a4b 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -107,6 +107,7 @@ Default value: {} * `cifmw_test_operator_tobiko_name`: (String) Value used in the `Tobiko.Metadata.Name` field. The value specifies the name of some resources spawned by the test-operator role. Default value: `tobiko-tests` * `cifmw_test_operator_tobiko_registry`: (String) The registry where to pull tobiko container. Default value: `{{ cifmw_test_operator_default_registry }}` * `cifmw_test_operator_tobiko_namespace`: (String) Registry's namespace where to pull tobiko container. Default value: `{{ cifmw_test_operator_default_namespace }}` +* `cifmw_test_operator_tobiko_cleanup`: (Boolean) Cleanup all resources created by tobiko. Default value: `false` * `cifmw_test_operator_tobiko_container`: (String) Name of the tobiko container. Default value: `openstack-tobiko` * `cifmw_test_operator_tobiko_image`: (String) Tobiko image to be used. Default value: `{{ cifmw_test_operator_tobiko_registry }}/{{ cifmw_test_operator_tobiko_namespace }}/{{ cifmw_test_operator_tobiko_container }}` * `cifmw_test_operator_tobiko_image_tag`: (String) Tag for the `cifmw_test_operator_tobiko_image`. Default value: `{{ cifmw_test_operator_default_image_tag }}` @@ -117,6 +118,7 @@ Default value: {} * `cifmw_test_operator_tobiko_num_processes`: (Integer) Sets the value of the env variable `TOX_NUM_PROCESSES` that is used to run pytest with `--numprocesses $TOX_NUM_PROCESSES`. Defaults to `null`. In case of `null` value, `TOX_NUM_PROCESSES` is not set (tobiko internally uses the value `auto`, see pytest documentation about the `--numprocesses` option). * `cifmw_test_operator_tobiko_advanced_image_url`: (String) Tobiko will download images from this URL that will be used to create advance VM instances. By default, the provided image will include all the customizations required by the tobiko tests. Defaults to `https://softwarefactory-project.io/ubuntu-minimal-customized-enp3s0`. * `cifmw_test_operator_tobiko_kubeconfig_secret`: (String) Name of the Openshift Secret required to use Openshift Client from the Tobiko pod. Default value: `tobiko-secret` +* `cifmw_test_operator_tobiko_openstack_cmd`: (String) Openstack command is used by tobiko to cleanup resources. Default value: `oc -n openstack exec openstackclient -- openstack` * `cifmw_test_operator_tobiko_override_conf`: (Dict) Overrides the default configuration from `cifmw_test_operator_tobiko_default_conf` that is used to generate the tobiko.conf file. Default value: empty dictionary * `cifmw_test_operator_tobiko_ssh_keytype`: (String) Type of ssh key that tobiko will use to connect to the VM instances it creates. Defaults to `cifmw_ssh_keytype` which default to `ecdsa`. * `cifmw_test_operator_tobiko_ssh_keysize`: (Integer) Size of ssh key that tobiko will use to connect to the VM instances it creates. Defaults to `cifmw_ssh_keysize` which defaults to 521. diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index d7b533aad4..66b8e5fcf7 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -164,6 +164,8 @@ cifmw_test_operator_tobiko_num_processes: null cifmw_test_operator_tobiko_advanced_image_url: "https://softwarefactory-project.io/ubuntu-minimal-customized-enp3s0" cifmw_test_operator_tobiko_override_conf: {} cifmw_test_operator_tobiko_kubeconfig_secret: tobiko-secret +cifmw_test_operator_tobiko_openstack_cmd: 'oc -n openstack exec openstackclient -- openstack' +cifmw_test_operator_tobiko_cleanup: false cifmw_test_operator_tobiko_ssh_keytype: "{{ cifmw_ssh_keytype | default('ecdsa') }}" cifmw_test_operator_tobiko_ssh_keysize: "{{ cifmw_ssh_keysize | default(521) }}" cifmw_test_operator_tobiko_debug: false diff --git a/roles/test_operator/tasks/runners/tobiko_runner.yml b/roles/test_operator/tasks/runners/tobiko_runner.yml index da663290be..418ef5e1a3 100644 --- a/roles/test_operator/tasks/runners/tobiko_runner.yml +++ b/roles/test_operator/tasks/runners/tobiko_runner.yml @@ -10,3 +10,60 @@ test_operator_workflow: "{{ stage_vars_dict.cifmw_test_operator_tobiko_workflow }}" test_operator_config_playbook: tobiko-tests.yml ansible.builtin.include_tasks: run-test-operator-job.yml + +- name: Cleanup tobiko workloads + when: cifmw_test_operator_tobiko_cleanup | bool + block: + - name: Cleanup Loadbalancers created by Tobiko tests + ansible.builtin.shell: | + set -o pipefail && \ + for lb in $({{ cifmw_test_operator_tobiko_openstack_cmd }} loadbalancer list | \ + grep "tobiko" | awk -F '|' '{print $2}') + do + {{ cifmw_test_operator_tobiko_openstack_cmd }} loadbalancer delete --cascade --wait $lb + done + failed_when: false + + - name: Cleanup Heat stacks created by Tobiko tests + ansible.builtin.shell: | + set -o pipefail && \ + {{ cifmw_test_operator_tobiko_openstack_cmd }} stack list | \ + grep "tobiko" | awk -F '|' '{print $2}' | \ + xargs -r timeout 180 {{ cifmw_test_operator_tobiko_openstack_cmd }} stack delete -y --wait + register: result + retries: 5 + delay: 5 + until: result.rc == 0 + failed_when: false + + - name: Cleanup subnet pools created by Tobiko tests + ansible.builtin.shell: | + set -o pipefail && \ + {{ cifmw_test_operator_tobiko_openstack_cmd }} subnet pool list | \ + grep "tobiko" | awk -F '|' '{print $2}' | \ + xargs -r {{ cifmw_test_operator_tobiko_openstack_cmd }} subnet pool delete + failed_when: false + + - name: Cleanup Security Groups created by Tobiko tests + ansible.builtin.shell: | + set -o pipefail && \ + {{ cifmw_test_operator_tobiko_openstack_cmd }} security group list | \ + grep "tobiko" | awk -F '|' '{print $2}' | \ + xargs -r {{ cifmw_test_operator_tobiko_openstack_cmd }} security group delete + failed_when: false + + - name: Cleanup Glance images created by Tobiko tests + ansible.builtin.shell: | + set -o pipefail && \ + {{ cifmw_test_operator_tobiko_openstack_cmd }} image list | \ + grep "tobiko" | awk -F '|' '{print $2}' | \ + xargs -r {{ cifmw_test_operator_tobiko_openstack_cmd }} image delete + failed_when: false + + - name: Cleanup Manila shares created by Tobiko tests + ansible.builtin.shell: | + set -o pipefail && \ + {{ cifmw_test_operator_tobiko_openstack_cmd }} share list | \ + grep "tobiko" | awk -F '|' '{print $2}' | \ + xargs -r {{ cifmw_test_operator_tobiko_openstack_cmd }} share delete --force + failed_when: false From 3ddebbe3c6b394534f45fc034db532b911a9f9bf Mon Sep 17 00:00:00 2001 From: Marian Krcmarik Date: Wed, 7 May 2025 03:19:19 +0200 Subject: [PATCH 109/480] dt-dcn: Make VM's names of DCN compute grouos not to be substring of each other. The reason is to workaround a bug in neutron --- playbooks/dcn.yml | 4 ++-- roles/reproducer/tasks/libvirt_layout.yml | 4 ++-- scenarios/reproducers/dt-dcn.yml | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/playbooks/dcn.yml b/playbooks/dcn.yml index 761cfd68e9..a9b247c6da 100644 --- a/playbooks/dcn.yml +++ b/playbooks/dcn.yml @@ -68,8 +68,8 @@ ansible.builtin.set_fact: az_to_group_map: az0: computes - az1: dcn1-computes - az2: dcn2-computes + az1: dcn1-compute-az1s + az2: dcn2-compute-az2s - name: Scaledown the DCN site vars: diff --git a/roles/reproducer/tasks/libvirt_layout.yml b/roles/reproducer/tasks/libvirt_layout.yml index 43e1d9a7ee..e0883565b1 100644 --- a/roles/reproducer/tasks/libvirt_layout.yml +++ b/roles/reproducer/tasks/libvirt_layout.yml @@ -99,8 +99,8 @@ (compute.key in (groups['computes'] | default([]))) or (compute.key in (groups['cephs'] | default([]))) or (compute.key in (groups['networkers'] | default([]))) or - (compute.key in (groups['dcn1-computes'] | default([]))) or - (compute.key in (groups['dcn2-computes'] | default([]))) or + (compute.key in (groups['dcn1-compute-az1s'] | default([]))) or + (compute.key in (groups['dcn2-compute-az2s'] | default([]))) or (compute.key is match('^r[0-9]-compute-.*')) or (compute.key is match('^r[0-9]-networker-.*')) vars: diff --git a/scenarios/reproducers/dt-dcn.yml b/scenarios/reproducers/dt-dcn.yml index 7f81a718e7..fdfa6b8ea4 100644 --- a/scenarios/reproducers/dt-dcn.yml +++ b/scenarios/reproducers/dt-dcn.yml @@ -142,7 +142,7 @@ cifmw_libvirt_manager_configuration: nets: - ocpbm - osp_trunk - dcn1-compute: + dcn1-compute-az1: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" @@ -158,7 +158,7 @@ cifmw_libvirt_manager_configuration: nets: - dcn1_pb - dcn1_tr - dcn2-compute: + dcn2-compute-az2: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" @@ -470,7 +470,7 @@ cifmw_networking_definition: trunk-parent: ctlplane storagemgmt: trunk-parent: ctlplane - dcn1-computes: + dcn1-compute-az1s: network-template: range: start: 111 @@ -485,7 +485,7 @@ cifmw_networking_definition: trunk-parent: ctlplanedcn1 storagemgmtdcn1: trunk-parent: ctlplanedcn1 - dcn2-computes: + dcn2-compute-az2s: network-template: range: start: 121 From 647d7d79b54d746ec794b058dcc18e92981b5fb6 Mon Sep 17 00:00:00 2001 From: Andrew Bays Date: Mon, 21 Apr 2025 13:03:21 +0000 Subject: [PATCH 110/480] [OSPRH-15434] Automate deploy of multi-namespace RHOSO --- .../multi-namespace/ns2_osdp_services.yaml | 30 ++ .../multi-namespace/ns2_osp_networks.yaml | 40 ++ playbooks/multi-namespace/ns2_validation.yaml | 9 + .../edpm-nodeset-values/values.yaml.j2 | 71 +++ .../edpm-nodeset2-values/values.yaml.j2 | 73 ++++ .../network-values2/values.yaml.j2 | 130 ++++++ roles/os_net_setup/README.md | 1 + roles/os_net_setup/defaults/main.yml | 1 + roles/os_net_setup/tasks/main.yml | 12 +- .../os_net_setup/templates/network_command.j2 | 2 +- .../os_net_setup/templates/subnet_command.j2 | 2 +- .../templates/subnet_pool_command.j2 | 2 +- roles/reproducer/tasks/libvirt_layout.yml | 3 +- .../tasks/run-test-operator-job.yml | 3 +- scenarios/reproducers/va-multi.yml | 405 ++++++++++++++++++ 15 files changed, 773 insertions(+), 11 deletions(-) create mode 100644 playbooks/multi-namespace/ns2_osdp_services.yaml create mode 100644 playbooks/multi-namespace/ns2_osp_networks.yaml create mode 100644 playbooks/multi-namespace/ns2_validation.yaml create mode 100644 roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset2-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 create mode 100644 scenarios/reproducers/va-multi.yml diff --git a/playbooks/multi-namespace/ns2_osdp_services.yaml b/playbooks/multi-namespace/ns2_osdp_services.yaml new file mode 100644 index 0000000000..c2196bc929 --- /dev/null +++ b/playbooks/multi-namespace/ns2_osdp_services.yaml @@ -0,0 +1,30 @@ +--- +- name: Acquire previously-deployed OpenStackDataPlaneServices for openstack2 namespace + hosts: "{{ cifmw_target_host | default('localhost') }}" + tasks: + - name: Fetch the already deployed services for further usage + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc get osdps + --namespace openstack2 + --no-headers + -o custom-columns=":metadata.name" + changed_when: false + register: _ci_gen_kustomize_deployed_services_stdout + + - name: Expose the deployed services as a fact + ansible.builtin.set_fact: + ci_gen_kustomize_edpm_nodeset_predeployed_services2: >- + {{ + _ci_gen_kustomize_deployed_services_stdout.stdout_lines | default ([]) + }} + + - name: Feed generated content to main play + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/pre_stage_8_run_get_openstackdataplaneservices.yml" + content: | + ci_gen_kustomize_edpm_nodeset_predeployed_services2: {{ ci_gen_kustomize_edpm_nodeset_predeployed_services2 }} + mode: "0644" diff --git a/playbooks/multi-namespace/ns2_osp_networks.yaml b/playbooks/multi-namespace/ns2_osp_networks.yaml new file mode 100644 index 0000000000..e006a0d8db --- /dev/null +++ b/playbooks/multi-namespace/ns2_osp_networks.yaml @@ -0,0 +1,40 @@ +--- +- name: Post-deployment admin setup steps for namespace {{ cifmw_os_net_setup_namespace }} + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + + - name: Create openstack network elements + vars: + cifmw_os_net_setup_config: + - name: public + external: true + shared: false + is_default: true + provider_network_type: flat + provider_physical_network: datacentre + availability_zone_hints: [] + subnets: + - name: public_subnet + cidr: "{{ cifmw_os_net_setup_public_cidr }}" + allocation_pool_start: "{{ cifmw_os_net_setup_public_start }}" + allocation_pool_end: "{{ cifmw_os_net_setup_public_end }}" + gateway_ip: "{{ cifmw_os_net_setup_public_gateway }}" + enable_dhcp: true + cifmw_os_net_subnetpool_config: + - name: shared-pool-ipv4 + default_prefix_length: 26 + prefixes: '10.1.0.0/20' + is_default: true + is_shared: true + - name: shared-pool-ipv6 + default_prefix_length: 64 + prefixes: 'fdfe:391f:8400::/56' + is_default: true + is_shared: true + ansible.builtin.import_role: + name: os_net_setup + when: not cifmw_skip_os_net_setup | default('false') | bool diff --git a/playbooks/multi-namespace/ns2_validation.yaml b/playbooks/multi-namespace/ns2_validation.yaml new file mode 100644 index 0000000000..f5f28158bc --- /dev/null +++ b/playbooks/multi-namespace/ns2_validation.yaml @@ -0,0 +1,9 @@ +--- +- name: Validation for namespace {{ cifmw_test_operator_namespace }} + hosts: "{{ cifmw_target_host | default('localhost') }}" + tasks: + - name: "Run tests for namespace {{ cifmw_test_operator_namespace }}" + tags: + - tests + ansible.builtin.import_role: + name: "{{ cifmw_run_test_role | default('tempest') }}" diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..b9a163bac6 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset-values/values.yaml.j2 @@ -0,0 +1,71 @@ +--- +# source: multi-namespace/edpm-nodeset-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set instances_names = [] %} +{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_services = _original_nodeset['services'] | default([]) %} +{% set _vm_type = (_original_nodes.keys() | first).split('-')[1] %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith(_vm_type ~ "-") %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{% endfor %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_network_config_os_net_config_mappings: +{% for instance in instances_names %} + edpm-{{ instance }}: +{% if hostvars[instance] is defined %} + nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" +{% endif %} + nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane.mac_addr }}" +{% endfor %} +{% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} + edpm_sshd_allowed_ranges: +{% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} + - "{{ range }}" +{% endfor %} +{% endif %} + nodes: +{% for instance in instances_names %} + edpm-{{ instance }}: + ansible: + host: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} + - name: {{ net }} + subnetName: subnet1 + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[net][_ipv.ip_vX] }} +{% if net is match('ctlplane') %} + defaultRoute: true +{% endif %} +{% endfor %} +{% endfor %} +{% if ('repo-setup' not in _original_services) and + ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} + services: + - "repo-setup" +{% for svc in _original_services %} + - "{{ svc }}" +{% endfor %} +{% endif %} + +{% if _vm_type.startswith('compute') %} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} +{% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset2-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset2-values/values.yaml.j2 new file mode 100644 index 0000000000..8014989476 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/multi-namespace/edpm-nodeset2-values/values.yaml.j2 @@ -0,0 +1,73 @@ +--- +# source: multi-namespace/edpm-nodeset2-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set instances_names = [] %} +{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_services = _original_nodeset['services'] | default([]) %} +{% set _vm_type = (_original_nodes.keys() | first).split('-')[1] %} +{{ '#vmtype: ' ~ _vm_type }} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith(_vm_type ~ "2-") %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{{ '#' ~ _inst }} +{% endfor %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_network_config_os_net_config_mappings: +{% for instance in instances_names %} + edpm-{{ instance }}: +{% if hostvars[instance] is defined %} + nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" +{% endif %} + nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane2.mac_addr }}" +{% endfor %} +{% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} + edpm_sshd_allowed_ranges: +{% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} + - "{{ range }}" +{% endfor %} +{% endif %} + nodes: +{% for instance in instances_names %} + edpm-{{ instance }}: + ansible: + host: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane2[_ipv.ip_vX] }} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} + - name: {{ net | replace('2', '') }} + subnetName: subnet1 + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[net][_ipv.ip_vX] }} +{% if net is match('ctlplane2') %} + defaultRoute: true +{% endif %} +{% endfor %} +{% endfor %} +{% if ('repo-setup' not in _original_services) and + ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services2 | default([])) %} + services: + - "repo-setup" +{% for svc in _original_services %} + - "{{ svc }}" +{% endfor %} +{% endif %} + +{% if _vm_type.startswith('compute') %} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} +{% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 new file mode 100644 index 0000000000..a9ad9c29dc --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 @@ -0,0 +1,130 @@ +--- +# source: multi-namespace/network-values2/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: +{% set ns.ocp_index = ns.ocp_index+1 %} + name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} +{% if "2" in network.network_name %} + {{ network.network_name | replace("2", "") }}_ip: {{ network[_ipv.ip_vX] }} +{% endif %} +{% endfor %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() if "2" in network %} +{% set ns.lb_tools = {} %} + {{ network.network_name | replace("2", "") }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} + subnets: +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} + - allocationRanges: +{% for range in network.tools.netconfig[_ipv.ipvX_ranges] %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network[_ipv.network_vX] }} +{% if network[_ipv.gw_vX] is defined %} + gateway: {{ network[_ipv.gw_vX] }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% for lb_range in network.tools[tool][_ipv.ipvX_ranges] %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network[_ipv.network_vX] | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} + base_iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% else %} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% endif %} +{% if network.tools.multus is defined %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "macvlan", +{% if network.vlan_id is defined%} + "master": "{{ network.network_name }}", +{% elif network.network_name == "ctlplane" %} + "master": "ospbr", +{% else %} + "master": "{{ ns.interfaces[network.network_name] }}", +{% endif %} + "ipam": { + "type": "whereabouts", + "range": "{{ network[_ipv.network_vX] }}", + "range_start": "{{ network.tools.multus[_ipv.ipvX_ranges].0.start }}", + "range_end": "{{ network.tools.multus[_ipv.ipvX_ranges].0.end }}" + } + } +{% endif %} +{% endfor %} + + dns-resolver: + config: + server: + - "{{ cifmw_networking_env_definition.networks.ctlplane2[_ipv.gw_vX] }}" + search: [] + options: + - key: server + values: + - {{ cifmw_networking_env_definition.networks.ctlplane2[_ipv.gw_vX] }} +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + + routes: + config: [] + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi2 + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi2'][_ipv.network_vX] | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi2 + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi2'][_ipv.network_vX] | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: {{ cifmw_ci_gen_kustomize_values_storage_class }} diff --git a/roles/os_net_setup/README.md b/roles/os_net_setup/README.md index a947c884d2..fd70e0b4f4 100644 --- a/roles/os_net_setup/README.md +++ b/roles/os_net_setup/README.md @@ -16,6 +16,7 @@ That is provided by `openshift_login` role. * `cifmw_os_net_subnetpool_config`: (list) It contains the definitions for subnet pools. See an example in roles/os_net_setup/defaults/main.yml * `cifmw_os_net_setup_dry_run`: (bool) Disable the generation of the commands. +* `cifmw_os_net_setup_namespace`: (str) Namespace in which to access the OSP cloud. Defaults to `openstack`. ## Molecule diff --git a/roles/os_net_setup/defaults/main.yml b/roles/os_net_setup/defaults/main.yml index 397b9ba529..068a53dc55 100644 --- a/roles/os_net_setup/defaults/main.yml +++ b/roles/os_net_setup/defaults/main.yml @@ -35,3 +35,4 @@ cifmw_os_net_subnetpool_config: is_shared: true cifmw_os_net_setup_dry_run: false +cifmw_os_net_setup_namespace: openstack diff --git a/roles/os_net_setup/tasks/main.yml b/roles/os_net_setup/tasks/main.yml index b3cd35d720..fece2bb39c 100644 --- a/roles/os_net_setup/tasks/main.yml +++ b/roles/os_net_setup/tasks/main.yml @@ -9,10 +9,10 @@ - name: Delete existing subnets ansible.builtin.shell: | set -euxo pipefail - if [ $(oc exec -n openstack openstackclient -- \ + if [ $(oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- \ openstack subnet list --network {{ item.0.name }} -c Name -f value | \ grep -c {{ item.1.name }}) != 0 ];then - oc exec -n openstack openstackclient -- \ + oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- \ openstack subnet delete {{ item.1.name }} fi loop: >- @@ -23,20 +23,20 @@ - name: Delete existing subnet pools ansible.builtin.shell: | set -euxo pipefail - if [ $(oc exec -n openstack openstackclient -- \ + if [ $(oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- \ openstack subnet pool list -c Name -f value | \ grep -c {{ item.name }}) != 0 ];then - oc exec -n openstack openstackclient -- \ + oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- \ openstack subnet pool delete {{ item.name }} fi loop: "{{ cifmw_os_net_subnetpool_config }}" - name: Delete existing networks ansible.builtin.shell: | set -euxo pipefail - if [ $(oc exec -n openstack openstackclient -- \ + if [ $(oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- \ openstack network list -c Name -f value | \ grep -c {{ item.name }}) != 0 ];then - oc exec -n openstack openstackclient -- \ + oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- \ openstack network delete {{ item.name }} fi loop: "{{ cifmw_os_net_setup_config }}" diff --git a/roles/os_net_setup/templates/network_command.j2 b/roles/os_net_setup/templates/network_command.j2 index d2f638a57c..cceace2f39 100644 --- a/roles/os_net_setup/templates/network_command.j2 +++ b/roles/os_net_setup/templates/network_command.j2 @@ -1,6 +1,6 @@ set -euo pipefail {% for net_args in cifmw_os_net_setup_config %} -oc exec -n openstack openstackclient -- openstack network create \ +oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- openstack network create \ {% if net_args.dns_domain is defined %} --dns-domain {{ keydns_domain }} \ {% endif %} diff --git a/roles/os_net_setup/templates/subnet_command.j2 b/roles/os_net_setup/templates/subnet_command.j2 index 1484dfa6e0..731cbeac5a 100644 --- a/roles/os_net_setup/templates/subnet_command.j2 +++ b/roles/os_net_setup/templates/subnet_command.j2 @@ -7,7 +7,7 @@ set -euo pipefail {% for net_args in cifmw_os_net_setup_config %} {% if net_args.subnets is defined %} {% for subnet_args in net_args.subnets %} -oc exec -n openstack openstackclient -- openstack subnet create \ +oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- openstack subnet create \ {% if subnet_args.allocation_pool_start is defined and subnet_args.allocation_pool_end is defined %} --allocation-pool start={{ subnet_args.allocation_pool_start }},end={{ subnet_args.allocation_pool_end }} \ diff --git a/roles/os_net_setup/templates/subnet_pool_command.j2 b/roles/os_net_setup/templates/subnet_pool_command.j2 index 59011519b5..4389d90a41 100644 --- a/roles/os_net_setup/templates/subnet_pool_command.j2 +++ b/roles/os_net_setup/templates/subnet_pool_command.j2 @@ -1,6 +1,6 @@ set -euo pipefail {% for subnet_pool_args in cifmw_os_net_subnetpool_config %} -oc exec -n openstack openstackclient -- openstack subnet pool create \ +oc exec -n {{ cifmw_os_net_setup_namespace }} openstackclient -- openstack subnet pool create \ {% if subnet_pool_args.default_prefix_length is defined %} --default-prefix-length {{ subnet_pool_args.default_prefix_length }} \ {% endif %} diff --git a/roles/reproducer/tasks/libvirt_layout.yml b/roles/reproducer/tasks/libvirt_layout.yml index e0883565b1..56da59c775 100644 --- a/roles/reproducer/tasks/libvirt_layout.yml +++ b/roles/reproducer/tasks/libvirt_layout.yml @@ -102,7 +102,8 @@ (compute.key in (groups['dcn1-compute-az1s'] | default([]))) or (compute.key in (groups['dcn2-compute-az2s'] | default([]))) or (compute.key is match('^r[0-9]-compute-.*')) or - (compute.key is match('^r[0-9]-networker-.*')) + (compute.key is match('^r[0-9]-networker-.*')) or + (compute.key is match('^compute2-.*')) vars: _host: "{{ compute.key }}" _prefix: >- diff --git a/roles/test_operator/tasks/run-test-operator-job.yml b/roles/test_operator/tasks/run-test-operator-job.yml index 1e3a0c95dc..72663c54f1 100644 --- a/roles/test_operator/tasks/run-test-operator-job.yml +++ b/roles/test_operator/tasks/run-test-operator-job.yml @@ -77,6 +77,7 @@ kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit)}}" context: "{{ cifmw_openshift_context | default(omit)}}" + namespace: "{{ cifmw_test_operator_namespace }}" kind: PersistentVolumeClaim label_selectors: - "instanceName={{ test_operator_instance_name }}" @@ -150,7 +151,7 @@ pod_path: mnt/logs-{{ test_operator_instance_name }}-step-{{ index }} ansible.builtin.shell: > oc cp -n {{ cifmw_test_operator_namespace }} - openstack/test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}:{{ pod_path }} + test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}:{{ pod_path }} {{ cifmw_test_operator_artifacts_basedir }} loop: "{{ logsPVCs.resources }}" loop_control: diff --git a/scenarios/reproducers/va-multi.yml b/scenarios/reproducers/va-multi.yml new file mode 100644 index 0000000000..467b3ef0d1 --- /dev/null +++ b/scenarios/reproducers/va-multi.yml @@ -0,0 +1,405 @@ +--- +cifmw_architecture_scenario: multi-namespace + +# HERE if you want to override kustomization, you can uncomment this parameter +# and push the data structure you want to apply. +# cifmw_architecture_user_kustomize: +# stage_0: +# 'network-values': +# data: +# starwars: Obiwan + +# HERE, if you want to stop the deployment loop at any stage, you can uncomment +# the following parameter and update the value to match the stage you want to +# reach. Known stages are: +# pre_kustomize_stage_INDEX +# pre_apply_stage_INDEX +# post_apply_stage_INDEX +# +# cifmw_deploy_architecture_stopper: + +cifmw_arch_automation_file: multi-namespace.yaml +cifmw_os_must_gather_additional_namespaces: kuttl,openshift-storage,sushy-emulator,openstack2 +cifmw_reproducer_validate_network_host: "192.168.122.1" +cifmw_libvirt_manager_default_gw_nets: + - ocpbm + - osptrunk2 +cifmw_networking_mapper_interfaces_info_translations: + osp_trunk: + - controlplane + - ctlplane + osptrunk2: + - ctlplane2 + +cifmw_libvirt_manager_configuration: + networks: + osp_trunk: | + + osp_trunk + + + + + + + osptrunk2: | + + osptrunk2 + + + + + + + ocpbm: | + + ocpbm + + + + + + + ocppr: | + + ocppr + + + + vms: + ocp: + amount: 3 + admin_user: core + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "ocp_master" + disksize: "100" + extra_disks_num: 3 + extra_disks_size: "50G" + cpus: 16 + memory: 32 + root_part_id: 4 + uefi: true + nets: + - ocppr + - ocpbm + - osp_trunk + - osptrunk2 + compute: + uefi: "{{ cifmw_use_uefi }}" + root_part_id: "{{ cifmw_root_partition_id }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}" + memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" + cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" + extra_disks_num: 3 + extra_disks_size: 30G + nets: + - ocpbm + - osp_trunk + compute2: + uefi: "{{ cifmw_use_uefi }}" + root_part_id: "{{ cifmw_root_partition_id }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}" + memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" + cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" + extra_disks_num: 3 + extra_disks_size: 30G + nets: + - ocpbm + - osptrunk2 + controller: + uefi: "{{ cifmw_use_uefi }}" + root_part_id: "{{ cifmw_root_partition_id }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - osp_trunk + - osptrunk2 + +## devscript support for OCP deploy +cifmw_devscripts_config_overrides: + fips_mode: "{{ cifmw_fips_enabled | default(false) | bool }}" + +# Set Logical Volume Manager Storage by default for local storage +cifmw_use_lvms: true +cifmw_lvms_disk_list: + - /dev/vda + - /dev/vdb + - /dev/vdc + +cifmw_networking_definition: + networks: + ctlplane: + network: "192.168.122.0/24" + gateway: "192.168.122.1" + dns: + - "192.168.122.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 120 + - start: 150 + end: 170 + metallb: + ranges: + - start: 80 + end: 90 + ctlplane2: + network: "192.168.133.0/24" + gateway: "192.168.133.1" + dns: + - "192.168.133.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 120 + - start: 150 + end: 170 + metallb: + ranges: + - start: 80 + end: 90 + internalapi: + network: "172.17.0.0/24" + vlan: 20 + mtu: 1496 + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + internalapi2: + network: "172.17.10.0/24" + vlan: 30 + mtu: 1496 + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + storage: + network: "172.18.0.0/24" + vlan: 21 + mtu: 1496 + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + storage2: + network: "172.18.10.0/24" + vlan: 31 + mtu: 1496 + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + tenant: + network: "172.19.0.0/24" + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + vlan: 22 + mtu: 1496 + tenant2: + network: "172.19.10.0/24" + tools: + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + multus: + ranges: + - start: 30 + end: 70 + vlan: 32 + mtu: 1496 + external: + network: "10.0.0.0/24" + tools: + netconfig: + ranges: + - start: 100 + end: 250 + vlan: 22 + mtu: 1500 + external2: + network: "10.10.0.0/24" + tools: + netconfig: + ranges: + - start: 100 + end: 250 + vlan: 32 + mtu: 1500 + + group-templates: + ocps: + network-template: + range: + start: 10 + length: 10 + networks: &ocps_nets + ctlplane: {} + internalapi: + trunk-parent: ctlplane + tenant: + trunk-parent: ctlplane + storage: + trunk-parent: ctlplane + ctlplane2: {} + internalapi2: + trunk-parent: ctlplane2 + tenant2: + trunk-parent: ctlplane2 + storage2: + trunk-parent: ctlplane2 + ocp_workers: + network-template: + range: + start: 20 + length: 10 + networks: *ocps_nets + computes: + network-template: + range: + start: 100 + length: 21 + networks: + ctlplane: {} + internalapi: + trunk-parent: ctlplane + tenant: + trunk-parent: ctlplane + storage: + trunk-parent: ctlplane + compute2s: + network-template: + range: + start: 200 + length: 21 + networks: + ctlplane2: {} + internalapi2: + trunk-parent: ctlplane2 + tenant2: + trunk-parent: ctlplane2 + storage2: + trunk-parent: ctlplane2 + instances: + controller-0: + networks: + ctlplane: + ip: "192.168.122.9" + ctlplane2: + ip: "192.168.133.9" + +# Hooks +post_deploy: + - name: Discover hypervisors for openstack2 namespace + type: playbook + source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/hooks/playbooks/nova_manage_discover_hosts.yml" + extra_vars: + namespace: openstack2 + _cell_conductors: nova-cell0-conductor-0 + +pre_admin_setup: + - name: Prepare OSP networks in openstack2 namespace + type: playbook + source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_osp_networks.yaml" + extra_vars: + cifmw_os_net_setup_namespace: openstack2 + cifmw_os_net_setup_public_cidr: "192.168.133.0/24" + cifmw_os_net_setup_public_start: "192.168.133.230" + cifmw_os_net_setup_public_end: "192.168.133.250" + cifmw_os_net_setup_public_gateway: "192.168.133.1" + +post_tests: + - name: Run tempest against openstack2 namespace + type: playbook + source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_validation.yaml" + extra_vars: + cifmw_test_operator_tempest_name: tempest-tests2 + cifmw_test_operator_namespace: openstack2 From 932c948d9160bedfb140c6f2758edb5d1174520e Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Wed, 23 Apr 2025 13:49:05 +0300 Subject: [PATCH 111/480] Reuse OCP cluster This patch creates playbooks and roles to allow re-using OCP cluster in zuul CI jobs --- clean_openstack_deployment.yaml | 9 +- deploy-edpm-reuse.yaml | 102 +++++++ roles/kustomize_deploy/tasks/cleanup.yml | 9 + roles/reproducer/tasks/configure_cleanup.yaml | 56 ++++ roles/reproducer/tasks/reuse_main.yaml | 274 ++++++++++++++++++ 5 files changed, 449 insertions(+), 1 deletion(-) create mode 100644 deploy-edpm-reuse.yaml create mode 100644 roles/reproducer/tasks/configure_cleanup.yaml create mode 100644 roles/reproducer/tasks/reuse_main.yaml diff --git a/clean_openstack_deployment.yaml b/clean_openstack_deployment.yaml index e70aacad66..da7f57f9b0 100644 --- a/clean_openstack_deployment.yaml +++ b/clean_openstack_deployment.yaml @@ -1,6 +1,5 @@ - name: Clean OpenStack deployment hosts: "{{ target_host | default('localhost') }}" - gather_facts: false tasks: - name: Clean up OpenStack operators vars: @@ -8,3 +7,11 @@ ansible.builtin.include_role: name: kustomize_deploy tasks_from: cleanup + + - name: Remove logs and tests directories + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - "/home/zuul/ci-framework-data/logs" + - "/home/zuul/ci-framework-data/tests" diff --git a/deploy-edpm-reuse.yaml b/deploy-edpm-reuse.yaml new file mode 100644 index 0000000000..1677af3572 --- /dev/null +++ b/deploy-edpm-reuse.yaml @@ -0,0 +1,102 @@ +--- +- name: Manage unique ID + ansible.builtin.import_playbook: playbooks/unique-id.yml + +- name: Reproducer prepare play + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: true + pre_tasks: + - name: Prepare cleanup script + ansible.builtin.include_role: + name: reproducer + tasks_from: configure_cleanup.yaml + + - name: Run Openstack cleanup + no_log: "{{ cifmw_nolog | default(true) | bool }}" + async: "{{ 7200 + cifmw_test_operator_timeout | default(3600) }}" # 2h should be enough to deploy EDPM and rest for tests. + poll: 20 + delegate_to: controller-0 + ansible.builtin.command: + cmd: "/home/zuul/cleanup-architecture.sh" + + - name: Inherit from parent scenarios if needed + ansible.builtin.include_tasks: + file: "ci/playbooks/tasks/inherit_parent_scenario.yml" + + - name: Include common architecture parameter file + when: + - cifmw_architecture_scenario is defined + - cifmw_architecture_scenario | length > 0 + ansible.builtin.include_vars: + file: "scenarios/reproducers/va-common.yml" + + - name: Run reproducer validations + ansible.builtin.import_role: + name: reproducer + tasks_from: validations + + - name: Gather OS facts + ansible.builtin.setup: + gather_subset: + - "!all" + - "!min" + - "distribution" + + - name: Tweak dnf configuration + become: true + community.general.ini_file: + no_extra_spaces: true + option: "{{ config.option }}" + path: "/etc/dnf/dnf.conf" + section: "{{ config.section | default('main') }}" + state: "{{ config.state | default(omit) }}" + value: "{{ config.value | default(omit) }}" + mode: "0644" + loop: "{{ cifmw_reproducer_dnf_tweaks }}" + loop_control: + label: "{{ config.option }}" + loop_var: 'config' + + - name: Install custom CA if needed + ansible.builtin.import_role: + name: install_ca + + - name: Setup repositories via rhos-release if needed + tags: + - packages + when: + - ansible_facts['distribution'] == 'RedHat' + - cifmw_reproducer_hp_rhos_release | bool + vars: + cifmw_repo_setup_output: /etc/yum.repos.d + cifmw_repo_setup_rhos_release_args: "rhel" + ansible.builtin.import_role: + name: repo_setup + tasks_from: rhos_release + + roles: + - role: ci_setup + +- name: Prepare switches + vars: + cifmw_configure_switches: "{{ 'switches' in groups }}" + ansible.builtin.import_playbook: playbooks/switches_config.yml + +- name: Reproducer reuse run + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run reproducer reuse playbook + ansible.builtin.include_role: + name: reproducer + tasks_from: reuse_main + + - name: Run deployment if instructed to + when: + - cifmw_deploy_architecture | default(false) | bool + no_log: "{{ cifmw_nolog | default(true) | bool }}" + async: "{{ 7200 + cifmw_test_operator_timeout | default(3600) }}" # 2h should be enough to deploy EDPM and rest for tests. + poll: 20 + delegate_to: controller-0 + ansible.builtin.command: + cmd: "/home/zuul/deploy-architecture.sh {{ cifmw_deploy_architecture_args | default('') }}" diff --git a/roles/kustomize_deploy/tasks/cleanup.yml b/roles/kustomize_deploy/tasks/cleanup.yml index 0b3d6b4320..b42abc0b09 100644 --- a/roles/kustomize_deploy/tasks/cleanup.yml +++ b/roles/kustomize_deploy/tasks/cleanup.yml @@ -34,6 +34,7 @@ reverse | selectattr('build_output', 'defined') | map(attribute='build_output') | + map('basename') | list }} _stages_crs_path: >- @@ -48,10 +49,14 @@ - "{{ cifmw_kustomize_deploy_metallb_dest_file }}" - "{{ cifmw_kustomize_deploy_kustomizations_dest_dir }}/openstack.yaml" - "{{ cifmw_kustomize_deploy_olm_dest_file }}" + _external_dns_crs: + - /home/zuul/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-dns.yml + - /home/zuul/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-cert.yml register: _cifmw_kustomize_files ansible.builtin.set_fact: cifmw_kustomize_deploy_crs_to_delete: >- {{ + _external_dns_crs + _stages_crs_path + _operators_crs }} @@ -72,6 +77,10 @@ wait: true wait_timeout: 600 loop: "{{ _cifmw_kustomize_files.results }}" + register: _cleanup_results + until: "_cleanup_results is success" + retries: 3 + delay: 120 when: - item.stat.exists - not cifmw_kustomize_deploy_generate_crs_only diff --git a/roles/reproducer/tasks/configure_cleanup.yaml b/roles/reproducer/tasks/configure_cleanup.yaml new file mode 100644 index 0000000000..b14294c834 --- /dev/null +++ b/roles/reproducer/tasks/configure_cleanup.yaml @@ -0,0 +1,56 @@ +--- +- name: Configure cleanup + delegate_to: controller-0 + block: + - name: Discover and expose CI Framework path on remote node + tags: + - always + vars: + default_path: >- + {{ + cifmw_reproducer_default_repositories | + selectattr('src', 'match', '^.*/ci[_\-]framework$') | + map(attribute='dest') | first + }} + custom_path: >- + {{ + cifmw_reproducer_repositories | + selectattr('src', 'match', '^.*/ci-framework$') | + map(attribute='dest') + }} + _path: >- + {{ + (custom_path | length > 0) | + ternary(custom_path | first, default_path) + }} + ansible.builtin.set_fact: + _cifmw_reproducer_framework_location: >- + {{ + (_path is match('.*/ci-framework/?$')) | + ternary(_path, [_path, 'ci-framework'] | path_join) + }} + + - name: Push cleanup script + vars: + run_directory: "{{ _cifmw_reproducer_framework_location }}" + exports: + ANSIBLE_LOG_PATH: "~/ansible-cleanup-architecture.log" + default_extravars: + - "@~/ci-framework-data/parameters/reproducer-variables.yml" + - "@~/ci-framework-data/parameters/openshift-environment.yml" + - "@~/ci-framework-data/artifacts/parameters/openshift-login-params.yml" + extravars: "{{ cifmw_reproducer_play_extravars }}" + playbook: "clean_openstack_deployment.yaml" + ansible.builtin.template: + dest: "/home/zuul/cleanup-architecture.sh" + src: "script.sh.j2" + mode: "0755" + owner: "zuul" + group: "zuul" + + - name: Rotate some logs + tags: + - always + ansible.builtin.include_tasks: rotate_log.yml + loop: + - ansible-cleanup-architecture.log diff --git a/roles/reproducer/tasks/reuse_main.yaml b/roles/reproducer/tasks/reuse_main.yaml new file mode 100644 index 0000000000..2d4c4ea8cb --- /dev/null +++ b/roles/reproducer/tasks/reuse_main.yaml @@ -0,0 +1,274 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Load CI job environment + tags: + - bootstrap_layout + when: + - cifmw_job_uri is defined + ansible.builtin.include_tasks: + file: ci_data.yml + apply: + tags: + - bootstrap_layout + +- name: Discover and expose CI Framework path on remote node + tags: + - always + vars: + default_path: >- + {{ + cifmw_reproducer_default_repositories | + selectattr('src', 'match', '^.*/ci[_\-]framework$') | + map(attribute='dest') | first + }} + custom_path: >- + {{ + cifmw_reproducer_repositories | + selectattr('src', 'match', '^.*/ci-framework$') | + map(attribute='dest') + }} + _path: >- + {{ + (custom_path | length > 0) | + ternary(custom_path | first, default_path) + }} + ansible.builtin.set_fact: + _cifmw_reproducer_framework_location: >- + {{ + (_path is match('.*/ci-framework/?$')) | + ternary(_path, [_path, 'ci-framework'] | path_join) + }} + +- name: Set _use_crc based on actual layout + tags: + - always + vars: + _use_crc: >- + {{ + _cifmw_libvirt_manager_layout.vms.crc is defined and + ( + (_cifmw_libvirt_manager_layout.vms.crc.amount is defined and + _cifmw_libvirt_manager_layout.vms.crc.amount|int > 0) or + _cifmw_libvirt_manager_layout.vms.crc.amount is undefined) + }} + _use_ocp: >- + {{ + _cifmw_libvirt_manager_layout.vms.ocp is defined and + (_cifmw_libvirt_manager_layout.vms.ocp.amount is defined and + _cifmw_libvirt_manager_layout.vms.ocp.amount|int > 0) + }} + ansible.builtin.set_fact: + _use_crc: "{{ _use_crc }}" + _use_ocp: "{{ _use_ocp }}" + _has_openshift: "{{ _use_ocp or _use_crc }}" + +- name: Ensure directories are present + tags: + - always + ansible.builtin.file: + path: "{{ cifmw_reproducer_basedir }}/{{ item }}" + state: directory + mode: "0755" + loop: + - artifacts + - logs + +- name: Load the architecture local kustomize patches + when: + - cifmw_architecture_scenario is defined + ansible.builtin.include_role: + name: kustomize_deploy + tasks_from: generate_base64_patches_from_tree.yml + +- name: Run only on hypervisor with controller-0 + block: + - name: Push local code + ansible.builtin.include_tasks: push_code.yml + + - name: Group tasks on controller-0 + delegate_to: controller-0 + block: + - name: Inject CI Framework motd + become: true + ansible.builtin.template: + dest: "/etc/motd.d/cifmw.motd" + src: "motd.j2" + mode: "0644" + + - name: Rotate ansible-bootstrap logs + tags: + - always + ansible.builtin.include_tasks: rotate_log.yml + loop: + - "/home/zuul/ansible-bootstrap.log" + + - name: Bootstrap environment on controller-0 + environment: + ANSIBLE_LOG_PATH: "~/ansible-bootstrap.log" + no_log: "{{ cifmw_nolog | default(true) | bool }}" + ansible.builtin.command: + chdir: "{{ _cifmw_reproducer_framework_location }}" + cmd: >- + ansible-playbook -i ~/ci-framework-data/artifacts/zuul_inventory.yml + -e @~/ci-framework-data/parameters/reproducer-variables.yml + -e @scenarios/reproducers/networking-definition.yml + playbooks/01-bootstrap.yml + creates: "/home/zuul/ansible-bootstrap.log" + + - name: Install dev tools from install_yamls on controller-0 + environment: + ANSIBLE_LOG_PATH: "~/ansible-bootstrap.log" + vars: + _devsetup_path: >- + {{ + ( + cifmw_install_yamls_repo | default('/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'), + 'devsetup' + ) | ansible.builtin.path_join + }} + no_log: "{{ cifmw_nolog | default(true) | bool }}" + ansible.builtin.command: + chdir: "{{ _devsetup_path }}" + cmd: >- + ansible-playbook -i ~/ci-framework-data/artifacts/zuul_inventory.yml + download_tools.yaml --tags kustomize,kubectl + creates: "/home/zuul/bin/kubectl" + +# Run from the hypervisor +- name: Ensure OCP cluster is stable + when: + - _wait_ocp_cluster is defined + - _wait_ocp_cluster | bool + tags: + - bootstrap + - bootstrap_layout + vars: + _auth_path: >- + {{ + ( + cifmw_devscripts_repo_dir, + 'ocp', + cifmw_devscripts_config.cluster_name, + 'auth' + ) | ansible.builtin.path_join + }} + cifmw_openshift_adm_op: "stable" + cifmw_openshift_kubeconfig: >- + {{ (_auth_path, 'kubeconfig') | ansible.builtin.path_join }} + ansible.builtin.include_role: + name: openshift_adm + +- name: Run from controller-0 + delegate_to: controller-0 + block: + - name: Emulate CI job + when: + - cifmw_job_uri is defined + ansible.builtin.include_tasks: ci_job.yml + + - name: Prepare VA deployment + when: + - cifmw_architecture_scenario is defined + - cifmw_job_uri is undefined + tags: + - deploy_architecture + ansible.builtin.include_tasks: + file: configure_architecture.yml + apply: + tags: + - deploy_architecture + + - name: Set facts related to the reproducer + ansible.builtin.set_fact: + _ctl_reproducer_basedir: >- + {{ + ( + '/home/zuul', + 'ci-framework-data', + ) | path_join + }} + + - name: Ensure directories exist + ansible.builtin.file: + path: "{{ _ctl_reproducer_basedir }}/{{ item }}" + state: directory + mode: "0755" + loop: + - parameters + - artifacts + + - name: Inject most of the cifmw_ parameters passed to the reproducer run + tags: + - bootstrap_env + vars: + _filtered_vars: >- + {{ + hostvars[inventory_hostname] | default({}) | + dict2items | + selectattr('key', 'match', + '^(pre|post|cifmw)_(?!install_yamls|devscripts).*') | + rejectattr('key', 'equalto', 'cifmw_target_host') | + rejectattr('key', 'equalto', 'cifmw_basedir') | + rejectattr('key', 'equalto', 'cifmw_path') | + rejectattr('key', 'equalto', 'cifmw_extras') | + rejectattr('key', 'equalto', 'cifmw_openshift_kubeconfig') | + rejectattr('key', 'equalto', 'cifmw_openshift_token') | + rejectattr('key', 'equalto', 'cifmw_networking_env_definition') | + rejectattr('key', 'match', '^cifmw_use_(?!lvms).*') | + rejectattr('key', 'match', '^cifmw_reproducer.*') | + rejectattr('key', 'match', '^cifmw_rhol.*') | + rejectattr('key', 'match', '^cifmw_discover.*') | + rejectattr('key', 'match', '^cifmw_libvirt_manager.*') | + rejectattr('key', 'match', '^cifmw_manage_secrets_(pullsecret|citoken).*') | + items2dict + }} + ansible.builtin.copy: + mode: "0644" + dest: "/home/zuul/ci-framework-data/parameters/reproducer-variables.yml" + content: "{{ _filtered_vars | to_nice_yaml }}" + + - name: Create reproducer-variables.yml symlink to old location + ansible.builtin.file: + dest: "/home/zuul/reproducer-variables.yml" + src: "/home/zuul/ci-framework-data/parameters/reproducer-variables.yml" + state: link + + - name: Slurp kubeadmin password + ansible.builtin.slurp: + src: /home/zuul/.kube/kubeadmin-password + register: _kubeadmin_password + + - name: Prepare ci-like EDPM deploy + when: + - cifmw_job_uri is undefined + delegate_to: controller-0 + vars: + run_directory: "{{ _cifmw_reproducer_framework_location }}" + exports: + ANSIBLE_LOG_PATH: "~/ansible-deploy-edpm.log" + default_extravars: + - "@scenarios/centos-9/base.yml" + - "@scenarios/centos-9/edpm_ci.yml" + - "cifmw_openshift_password='{{ _kubeadmin_password.content | b64decode }}'" + extravars: "{{ cifmw_reproducer_play_extravars }}" + playbook: "deploy-edpm.yml" + ansible.builtin.template: + dest: "/home/zuul/deploy-edpm.sh" + src: "script.sh.j2" + mode: "0755" + owner: "zuul" + group: "zuul" From 80cfa59e78b0123af338af3a040237e7bd6398f0 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Tue, 13 May 2025 13:01:34 +0300 Subject: [PATCH 112/480] Add become for log delete In certain usecases some log files are generated by other tools and might have unexpected permissions set to them added become to avoid issues related in permissions --- clean_openstack_deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/clean_openstack_deployment.yaml b/clean_openstack_deployment.yaml index da7f57f9b0..1bbc61b03b 100644 --- a/clean_openstack_deployment.yaml +++ b/clean_openstack_deployment.yaml @@ -15,3 +15,4 @@ loop: - "/home/zuul/ci-framework-data/logs" - "/home/zuul/ci-framework-data/tests" + become: true From 2e645a0190f5d6c922a0edf63830bf1fb8dc55df Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Tue, 13 May 2025 11:32:37 +0300 Subject: [PATCH 113/480] Allow skiping edpm log gathering Add a var to allow skipping compute/edpm logs which might be too big in certain cases and cause failures with zuul --- roles/artifacts/README.md | 1 + roles/artifacts/tasks/main.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/roles/artifacts/README.md b/roles/artifacts/README.md index fa5d84df7b..1c1ed67278 100644 --- a/roles/artifacts/README.md +++ b/roles/artifacts/README.md @@ -12,6 +12,7 @@ None - writes happen only in the user home. * `cifmw_artifacts_crc_sshkey`: (String) Path to the private SSH key to connect to CRC. Defaults to `~/.crc/machines/crc/id_ecdsa`. * `cifmw_artifacts_crc_sshkey_ed25519`: (String) Path to the private SSH key to connect to CRC (newer CRC images). Defaults to `~/.crc/machines/crc/id_ed25519`. * `cifmw_artifacts_gather_logs`: (Boolean) Enables must-gather logs fetching. Defaults to `true` +* `cifmw_artifacts_gather_edpm_logs`: (Boolean) Enables edpm logs fetching. Defaults to `true` ## Examples Usually we'll import the role as-is at the very start of the playbook, and diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index d34474677a..818a524ecd 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -72,6 +72,8 @@ ansible.builtin.import_tasks: crc.yml - name: Get EDPM logs + when: + - cifmw_artifacts_gather_edpm_logs | default(true) | bool ignore_errors: true # noqa: ignore-errors ansible.builtin.import_tasks: edpm.yml From ff07546354071b589f969ad46dba0e2da6c7c108 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 13 May 2025 10:11:48 +0200 Subject: [PATCH 114/480] Force dump admin password as a string If the password is just based on numbers, after we enabled jinja2_native [1], it is parsed to a integer not a string. [1] https://github.com/openstack-k8s-operators/ci-framework/commit/d8f86c86284c1447f66ae3a0d8354d9bfea03ca3 --- roles/test_operator/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 66b8e5fcf7..55a8b17910 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -284,7 +284,7 @@ cifmw_test_operator_horizontest_config: privileged: "{{ cifmw_test_operator_privileged }}" containerImage: "{{ stage_vars_dict.cifmw_test_operator_horizontest_image }}:{{ stage_vars_dict.cifmw_test_operator_horizontest_image_tag }}" adminUsername: "{{ stage_vars_dict.cifmw_test_operator_horizontest_admin_username }}" - adminPassword: "{{ stage_vars_dict.cifmw_test_operator_horizontest_admin_password }}" + adminPassword: "{{ stage_vars_dict.cifmw_test_operator_horizontest_admin_password | string }}" dashboardUrl: "{{ stage_vars_dict.cifmw_test_operator_horizontest_dashboard_url }}" authUrl: "{{ stage_vars_dict.cifmw_test_operator_horizontest_auth_url }}" repoUrl: "{{ stage_vars_dict.cifmw_test_operator_horizontest_repo_url }}" From a7d8032a33ae70a4a605de06a076d4bbb3f7d55b Mon Sep 17 00:00:00 2001 From: Luca Miccini Date: Fri, 16 May 2025 15:35:47 +0200 Subject: [PATCH 115/480] Add variable to turn off artifact masking --- roles/artifacts/README.md | 1 + roles/artifacts/defaults/main.yml | 1 + roles/artifacts/tasks/main.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/roles/artifacts/README.md b/roles/artifacts/README.md index 1c1ed67278..3eb25bb62a 100644 --- a/roles/artifacts/README.md +++ b/roles/artifacts/README.md @@ -13,6 +13,7 @@ None - writes happen only in the user home. * `cifmw_artifacts_crc_sshkey_ed25519`: (String) Path to the private SSH key to connect to CRC (newer CRC images). Defaults to `~/.crc/machines/crc/id_ed25519`. * `cifmw_artifacts_gather_logs`: (Boolean) Enables must-gather logs fetching. Defaults to `true` * `cifmw_artifacts_gather_edpm_logs`: (Boolean) Enables edpm logs fetching. Defaults to `true` +* `cifmw_artifacts_mask_logs`: (Boolean) Enables artifacts and logs masking. Defaults to `true` ## Examples Usually we'll import the role as-is at the very start of the playbook, and diff --git a/roles/artifacts/defaults/main.yml b/roles/artifacts/defaults/main.yml index 572093fc15..eabd9427a6 100644 --- a/roles/artifacts/defaults/main.yml +++ b/roles/artifacts/defaults/main.yml @@ -23,3 +23,4 @@ cifmw_artifacts_crc_user: "core" cifmw_artifacts_crc_sshkey: "~/.crc/machines/crc/id_ecdsa" cifmw_artifacts_crc_sshkey_ed25519: "~/.crc/machines/crc/id_ed25519" cifmw_artifacts_gather_logs: true +cifmw_artifacts_mask_logs: true diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index 818a524ecd..e7210bb9ca 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -91,6 +91,7 @@ find {{ cifmw_artifacts_basedir }}/artifacts -type d -exec chmod 0755 '{}' \; - name: Mask secrets in yaml log files + when: cifmw_artifacts_mask_logs |bool ignore_errors: true # noqa: ignore-errors timeout: 3600 crawl_n_mask: From dfb2ab9650b32a43be04808d6b92d4f3e850c917 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Mon, 12 May 2025 09:20:52 -0400 Subject: [PATCH 116/480] Fix spaces to increase readability --- roles/test_operator/tasks/stages.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/test_operator/tasks/stages.yml b/roles/test_operator/tasks/stages.yml index 72c460fcdf..b67e948fcf 100644 --- a/roles/test_operator/tasks/stages.yml +++ b/roles/test_operator/tasks/stages.yml @@ -35,13 +35,13 @@ start_with: cifmw_test_operator_{{ _stage_vars.type }} when: item.key.startswith(start_with) ansible.builtin.set_fact: - stage_vars_dict: "{{ stage_vars_dict | combine({item.key: _stage_test_vars[item.key] | default(lookup('vars', item.key, default=omit))} ) }}" + stage_vars_dict: "{{ stage_vars_dict | combine({item.key: _stage_test_vars[item.key] | default(lookup('vars', item.key, default=omit)) }) }}" - name: Override specific type config vars: _stage_config: 'cifmw_test_operator_{{ _stage_vars.type }}_config' ansible.builtin.set_fact: - stage_vars_dict: "{{ stage_vars_dict | combine({_stage_config: _stage_test_vars[_stage_config] | default(lookup('vars', _stage_config, default=omit))} ) }}" + stage_vars_dict: "{{ stage_vars_dict | combine({_stage_config: _stage_test_vars[_stage_config] | default(lookup('vars', _stage_config, default=omit)) }) }}" - name: "Call runner {{ _stage_vars.type }}" ansible.builtin.include_tasks: "runners/{{ _stage_vars.type }}_runner.yml" From e0351dcbd81e01e3436780eb353917cbb9037950 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Mon, 12 May 2025 09:22:25 -0400 Subject: [PATCH 117/480] Change concurrency to tempest specific parameter This patch introduces the cifmw_test_operator_tempest_concurrency parameter, as current concurrency parameter is used only in Tempest tests. It will also allow the stages loop to include concurrency, so now users can set different concurrencies for different tempest runs. --- roles/test_operator/README.md | 7 ++++--- roles/test_operator/defaults/main.yml | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index c22bd80a4b..0f648e218b 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -10,7 +10,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `cifmw_test_operator_version`: (String) The commit hash corresponding to the version of test-operator the user wants to use. This parameter is only used when `cifmw_test_operator_bundle` is also set. * `cifmw_test_operator_timeout`: (Integer) Timeout in seconds for the execution of the tests. Default value: `3600` * `cifmw_test_operator_logs_image`: (String) Image that should be used to collect logs from the pods spawned by the test-operator. Default value: `quay.io/quay/busybox` -* `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. Default value: `8` +* `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. NOTE: This parameter is deprecated, please use `cifmw_test_operator_tempest_concurrency` instead. Default value: `8` * `cifmw_test_operator_cleanup`: (Bool) Delete all resources created by the role at the end of the testing. Default value: `false` * `cifmw_test_operator_tempest_cleanup`: (Bool) Run tempest cleanup after test execution (tempest run) to delete any resources created by tempest that may have been left out. * `cifmw_test_operator_default_groups`: (List) List of groups in the include list to search for tests to be executed. Default value: `[ 'default' ]` @@ -34,7 +34,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `type`: (String) The framework name you would like to call, currently the options are: tempest, ansibletest, horizontest, tobiko. * `test_vars_file`: (String) Path to the file used for testing, this file should contain the testing params for this stage. Only parameters specific for the controller can be used (Tempest, Ansibletest, Horizontest and Tobiko). * `test_vars`: (String) Testing parameters for this specific stage if a `test_vars` is used the specified parameters would override the ones in the `test_vars_file`. Only parameters specific for the controller can be used (Tempest, Ansibletest, Horizontest and Tobiko). - > Important note! Only variables with the following structure can be used to override inside a stage: `cifmw_test_operator_[test-operator CR name]_[parameter name]`. For example, these variables cannot be overridden per stage: `cifmw_test_operator_concurrency`, `cifmw_test_operator_default_registry`, `cifmw_test_operator_default_namespace`, `cifmw_test_operator_default_image_tag`. + > Important note! Only variables with the following structure can be used to override inside a stage: `cifmw_test_operator_[test-operator CR name]_[parameter name]`. For example, these variables cannot be overridden per stage: `cifmw_test_operator_default_registry`, `cifmw_test_operator_default_namespace`, `cifmw_test_operator_default_image_tag`. * `pre_test_stage_hooks`: (List) List of pre hooks to run as described [hooks README](https://github.com/openstack-k8s-operators/ci-framework/tree/main/roles/run_hook#hooks-expected-format). * `post_test_stage_hooks`: (List) List of post hooks to run as described [hooks README](https://github.com/openstack-k8s-operators/ci-framework/tree/main/roles/run_hook#hooks-expected-format). Default value: @@ -52,6 +52,7 @@ cifmw_test_operator_stages: * `cifmw_test_operator_tempest_container`: (String) Name of the tempest container. Default value: `openstack-tempest` * `cifmw_test_operator_tempest_image`: (String) Tempest image to be used. Default value: `{{ cifmw_test_operator_tempest_registry }}/{{ cifmw_test_operator_tempest_namespace }}/{{ cifmw_test_operator_tempest_container }}` * `cifmw_test_operator_tempest_image_tag`: (String) Tag for the `cifmw_test_operator_tempest_image`. Default value: `{{ cifmw_test_operator_default_image_tag }}` +* `cifmw_test_operator_tempest_concurrency`: (Integer) The number of worker processes running tests concurrently. Default value: `8` * `cifmw_test_operator_tempest_include_list`: (String) List of tests to be executed. Setting this will not use the `list_allowed` plugin. Default value: `''` * `cifmw_test_operator_tempest_exclude_list`: (String) List of tests to be skipped. Setting this will not use the `list_skipped` plugin. Default value: `''` * `cifmw_test_operator_tempest_expected_failures_list`: (String) List of tests for which failures will be ignored. Default value: `''` @@ -95,7 +96,7 @@ Default value: {} {{ cifmw_test_operator_tempest_include_list | default('') }} excludeList: | {{ cifmw_test_operator_tempest_exclude_list | default('') }} - concurrency: "{{ cifmw_test_operator_concurrency }}" + concurrency: "{{ cifmw_test_operator_tempest_concurrency | default(8) }}" externalPlugin: "{{ cifmw_test_operator_tempest_external_plugin | default([]) }}" extraRPMs: "{{ cifmw_test_operator_tempest_extra_rpms | default([]) }}" extraImages: "{{ cifmw_test_operator_tempest_extra_images | default([]) }}" diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 55a8b17910..fa66b163fb 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -140,7 +140,8 @@ cifmw_test_operator_tempest_config: {{ stage_vars_dict.cifmw_test_operator_tempest_exclude_list | default('') }} expectedFailuresList: | {{ stage_vars_dict.cifmw_test_operator_tempest_expected_failures_list | default('') }} - concurrency: "{{ cifmw_test_operator_concurrency }}" + # NOTE: cifmw_test_operator_concurrency is deprecated, use cifmw_test_operator_tempest_concurrency instead + concurrency: "{{ stage_vars_dict.cifmw_test_operator_tempest_concurrency | default(cifmw_test_operator_concurrency) }}" externalPlugin: "{{ stage_vars_dict.cifmw_test_operator_tempest_external_plugin | default([]) }}" extraRPMs: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_rpms | default([]) }}" extraImages: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_images | default([]) }}" From 3441fc9f0a8f7f1195a6468c77ccadbc56222e24 Mon Sep 17 00:00:00 2001 From: Jeremy Agee Date: Fri, 4 Apr 2025 18:02:30 -0400 Subject: [PATCH 118/480] Add support for federation in the horizon UI This adds a openidc dropbox to the horizon login screen so a user can select openidc as a login type. The horzion UI will the be redirected to the keycloak server for the user authentication and then be passed back to the horizon dashboard as the federated user. --- .../federation-controlplane-config.yml | 22 ++++++-- ...federation-horizon-controlplane-config.yml | 50 +++++++++++++++++++ hooks/playbooks/federation-post-deploy.yml | 2 + hooks/playbooks/federation-pre-deploy.yml | 2 + .../tasks/run_keycloak_realm_setup.yml | 7 ++- 5 files changed, 77 insertions(+), 6 deletions(-) create mode 100644 hooks/playbooks/federation-horizon-controlplane-config.yml diff --git a/hooks/playbooks/federation-controlplane-config.yml b/hooks/playbooks/federation-controlplane-config.yml index 845d3958de..afaad2c767 100644 --- a/hooks/playbooks/federation-controlplane-config.yml +++ b/hooks/playbooks/federation-controlplane-config.yml @@ -2,6 +2,20 @@ - name: Create kustomization to update Keystone to use Federation hosts: "{{ cifmw_target_hook_host | default('localhost') }}" tasks: + - name: Set urls for install type uni + ansible.builtin.set_fact: + cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps.ocp.openstack.lab' + cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps.ocp.openstack.lab' + cifmw_federation_horizon_url: 'https://horizon-openstack.apps.ocp.openstack.lab' + when: cifmw_federation_deploy_type == "uni" + + - name: Set urls for install type crc + ansible.builtin.set_fact: + cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps-crc.testing' + cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps-crc.testing' + cifmw_federation_horizon_url: 'https://horizon-openstack.apps-crc.testing' + when: cifmw_federation_deploy_type == "crc" + - name: Create file to customize keystone for Federation resources deployed in the control plane ansible.builtin.copy: dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/keystone_federation.yaml" @@ -32,7 +46,8 @@ insecure_debug=true debug=true [federation] - trusted_dashboard={{ '{{ .KeystoneEndpointPublic }}' }}/dashboard/auth/websso/ + trusted_dashboard={{ cifmw_federation_horizon_url }}/dashboard/auth/websso/ + sso_callback_template=/etc/keystone/sso_callback_template.html [openid] remote_id_attribute=HTTP_OIDC_ISS [auth] @@ -77,8 +92,6 @@ OIDCClaimDelimiter "{{ cifmw_keystone_OIDC_ClaimDelimiter }}" OIDCPassUserInfoAs "{{ cifmw_keystone_OIDC_PassUserInfoAs }}" OIDCPassClaimsAs "{{ cifmw_keystone_OIDC_PassClaimsAs }}" - OIDCCacheType "{{ cifmw_keystone_OIDC_CacheType }}" - OIDCMemCacheServers "{{ '{{ .MemcachedServers }}' }}" OIDCProviderMetadataURL "{{ cifmw_keystone_OIDC_ProviderMetadataURL }}" OIDCClientID "{{ cifmw_keystone_OIDC_ClientID }}" OIDCClientSecret "{{ cifmw_keystone_OIDC_ClientSecret }}" @@ -86,7 +99,8 @@ OIDCOAuthClientID "{{ cifmw_keystone_OIDC_OAuthClientID }}" OIDCOAuthClientSecret "{{ cifmw_keystone_OIDC_OAuthClientSecret }}" OIDCOAuthIntrospectionEndpoint "{{ cifmw_keystone_OIDC_OAuthIntrospectionEndpoint }}" - OIDCRedirectURI "{{ '{{ .KeystoneEndpointPublic }}' }}/v3/auth/OS-FEDERATION/identity_providers/{{ cifmw_keystone_OIDC_provider_name }}/protocols/openid/websso" + OIDCRedirectURI "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/identity_providers/{{ cifmw_keystone_OIDC_provider_name }}/protocols/openid/websso/" + LogLevel debug AuthType "openid-connect" diff --git a/hooks/playbooks/federation-horizon-controlplane-config.yml b/hooks/playbooks/federation-horizon-controlplane-config.yml new file mode 100644 index 0000000000..f363fb21e2 --- /dev/null +++ b/hooks/playbooks/federation-horizon-controlplane-config.yml @@ -0,0 +1,50 @@ +--- +- name: Create kustomization to update Horizon to use Federation + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + tasks: + - name: Set urls for install type uni + ansible.builtin.set_fact: + cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps.ocp.openstack.lab' + cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps.ocp.openstack.lab' + cifmw_federation_horizon_url: 'https://horizon-openstack.apps.ocp.openstack.lab' + when: cifmw_federation_deploy_type == "uni" + + - name: Set urls for install type crc + ansible.builtin.set_fact: + cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps-crc.testing' + cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps-crc.testing' + cifmw_federation_horizon_url: 'https://horizon-openstack.apps-crc.testing' + when: cifmw_federation_deploy_type == "crc" + + - name: Create file to customize horizon for Federation resources deployed in the control plane + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/horizon_federation.yaml" + mode: preserve + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - namespace: {{ namespace }} + patches: + - target: + kind: OpenStackControlPlane + name: .* + patch: |- + - op: add + path: /spec/horizon/enabled + value: true + - op: add + path: /spec/horizon/template/memcachedInstance + value: memcached + - op: add + path: /spec/horizon/template/customServiceConfig + value: | + OPENSTACK_KEYSTONE_URL = "{{ cifmw_federation_keystone_url }}/v3" + WEBSSO_ENABLED = True + WEBSSO_CHOICES = ( + ("credentials", _("Keystone Credentials")), + ("OIDC", _("OpenID Connect")), + ) + WEBSSO_IDP_MAPPING = { + "OIDC": ("{{ cifmw_keystone_OIDC_provider_name }}", "openid"), + } diff --git a/hooks/playbooks/federation-post-deploy.yml b/hooks/playbooks/federation-post-deploy.yml index bb2ad638df..bcd45e7754 100644 --- a/hooks/playbooks/federation-post-deploy.yml +++ b/hooks/playbooks/federation-post-deploy.yml @@ -22,12 +22,14 @@ ansible.builtin.set_fact: cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps.ocp.openstack.lab' cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps.ocp.openstack.lab' + cifmw_federation_horizon_url: 'https://horizon-openstack.apps.ocp.openstack.lab' when: cifmw_federation_deploy_type == "uni" - name: Set urls for install type crc ansible.builtin.set_fact: cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps-crc.testing' cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps-crc.testing' + cifmw_federation_horizon_url: 'https://horizon-openstack.apps-crc.testing' when: cifmw_federation_deploy_type == "crc" - name: Run federation setup on OSP diff --git a/hooks/playbooks/federation-pre-deploy.yml b/hooks/playbooks/federation-pre-deploy.yml index 3b974b390a..791c48624c 100644 --- a/hooks/playbooks/federation-pre-deploy.yml +++ b/hooks/playbooks/federation-pre-deploy.yml @@ -22,12 +22,14 @@ ansible.builtin.set_fact: cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps.ocp.openstack.lab' cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps.ocp.openstack.lab' + cifmw_federation_horizon_url: 'https://horizon-openstack.apps.ocp.openstack.lab' when: cifmw_federation_deploy_type == "uni" - name: Set urls for install type crc ansible.builtin.set_fact: cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps-crc.testing' cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps-crc.testing' + cifmw_federation_horizon_url: 'https://horizon-openstack.apps-crc.testing' when: cifmw_federation_deploy_type == "crc" - name: Run SSO pod setup on Openshift diff --git a/roles/federation/tasks/run_keycloak_realm_setup.yml b/roles/federation/tasks/run_keycloak_realm_setup.yml index cdd840be0a..b001e5ebff 100644 --- a/roles/federation/tasks/run_keycloak_realm_setup.yml +++ b/roles/federation/tasks/run_keycloak_realm_setup.yml @@ -43,17 +43,20 @@ description: 'RHOSO client for keystone federation' root_url: "{{ cifmw_federation_keystone_url }}" admin_url: "{{ cifmw_federation_keystone_url }}" - base_url: '/projects/dashboard' + base_url: '/dashboard/project' enabled: true client_authenticator_type: client-secret secret: "{{ cifmw_federation_keycloak_client_secret }}" redirect_uris: - - "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/identity_providers/kcIDP/protocols/openid/websso" + - "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/identity_providers/kcIDP/protocols/openid/websso/" - "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/websso/openid" + - "{{ cifmw_federation_horizon_url }}/dashboard/auth/websso/" web_origins: - "{{ cifmw_federation_keystone_url }}" + - "{{ cifmw_federation_horizon_url }}" bearer_only: false public_client: false + implicit_flow_enabled: true protocol: openid-connect - name: Create a Keycloak group1 From 2d3aed4e1ac225adddfc32415dd6a13535084195 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 16 May 2025 12:17:08 +0200 Subject: [PATCH 119/480] Use modules for enabling rhel subscription We should start using Ansible modules instead of executing commands via command/shell module. Dedicated modules might handle in better way that operation. Signed-off-by: Daniel Pawlik --- roles/adoption_osp_deploy/tasks/login_registries.yml | 10 ++++++---- roles/adoption_osp_deploy/tasks/prepare_overcloud.yml | 5 +++-- roles/adoption_osp_deploy/tasks/prepare_undercloud.yml | 5 +++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/roles/adoption_osp_deploy/tasks/login_registries.yml b/roles/adoption_osp_deploy/tasks/login_registries.yml index bc9e21b545..1430ef85d7 100644 --- a/roles/adoption_osp_deploy/tasks/login_registries.yml +++ b/roles/adoption_osp_deploy/tasks/login_registries.yml @@ -20,10 +20,12 @@ - cifmw_adoption_osp_deploy_rhsm_key is defined become: true no_log: true - ansible.builtin.command: >- - subscription-manager register --force - --org "{{ cifmw_adoption_osp_deploy_rhsm_org }}" - --activationkey "{{ cifmw_adoption_osp_deploy_rhsm_key }}" + community.general.redhat_subscription: + activationkey: "{{ cifmw_adoption_osp_deploy_rhsm_key }}" + org_id: "{{ cifmw_adoption_osp_deploy_rhsm_org }}" + release: "{{ ansible_distribution_version }}" + force_register: true + state: present - name: Login in container registry when: diff --git a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml index 71571a13e5..96b7faccae 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml @@ -113,8 +113,9 @@ - name: Ensure repos are setup in overcloud nodes delegate_to: "{{ _vm }}" become: true - ansible.builtin.command: - cmd: "subscription-manager repos --enable {{ cifmw_adoption_osp_deploy_repos | join(' --enable ') }}" + community.general.rhsm_repository: + name: "{{ cifmw_adoption_osp_deploy_repos }}" + state: enabled loop: "{{ _tripleo_nodes_stack }}" loop_control: loop_var: _vm diff --git a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml index 74b411aa08..bf5871a059 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml @@ -25,8 +25,9 @@ - name: Ensure repos are setup become: true - ansible.builtin.command: - cmd: "subscription-manager repos --enable {{ cifmw_adoption_osp_deploy_repos | join(' --enable ') }}" + community.general.rhsm_repository: + name: "{{ cifmw_adoption_osp_deploy_repos }}" + state: enabled - name: Install director packages become: true From e27fac3b7cf70c870e8c4de5b2a051ac77e7724c Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Fri, 9 May 2025 17:30:13 +0530 Subject: [PATCH 120/480] Drop PyYAML package for bindep PyYAML package is not available on RHEL distro and is replaced with python-pyyaml, which is already there. This pr will drops PyYAML from bindep. Signed-off-by: Chandan Kumar (raukadah) --- bindep.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/bindep.txt b/bindep.txt index d8bf1edbc2..7d5f68785f 100644 --- a/bindep.txt +++ b/bindep.txt @@ -22,7 +22,6 @@ podman [platform:rpm] python3-devel [platform:rpm !platform:rhel-7 !platform:centos-7] python3-libvirt [platform:rpm] python3-lxml [platform:rpm] -PyYAML [platform:rpm !platform:rhel-8 !platform:centos-8 !platform:rhel-9 !platform:centos-9 !platform:fedora] python3-pyyaml [platform:rpm !platform:rhel-7 !platform:centos-7] python3-dnf [platform:rpm !platform:rhel-7 !platform:centos-7] From 4c9be83af9fa24aac768b399ac0cd093abc1c0d8 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Mon, 19 May 2025 15:57:38 +0300 Subject: [PATCH 121/480] Use sync module instead of rsync command Change repo sync to use ansible.posix.synchronize to allow more flexability. --- roles/reproducer/tasks/push_code.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/roles/reproducer/tasks/push_code.yml b/roles/reproducer/tasks/push_code.yml index 826320b34b..98b7b4e28d 100644 --- a/roles/reproducer/tasks/push_code.yml +++ b/roles/reproducer/tasks/push_code.yml @@ -124,8 +124,12 @@ delegate_to: localhost when: - item.src is abs or item.src is not match('.*:.*') - ansible.builtin.command: # noqa: command-instead-of-module - cmd: "rsync -ar {{ item.src }} zuul@controller-0:{{ item.dest }}" + ansible.posix.synchronize: + src: "{{ item.src }}" + dest: "zuul@controller-0:{{ item.dest }}" + archive: true + recursive: true + delete: true loop: "{{ _cifmw_reproducer_all_repositories }}" loop_control: label: "{{ item.src | basename }}" From b2cb7a26f5755293548fdf727347b10dbc0ecbed Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Tue, 13 May 2025 15:15:35 +0300 Subject: [PATCH 122/480] Reboot controller-0 to avoid running deployments Because deploy architecture runs detached from the zuul job and might run other playbooks in a detached way we need to make sure there are no leftover running deployments that might break the cleanup (trying to create additional resources while the cleanup is running). Added a reboot for controller-0 before cleanup to avoid such cases. --- deploy-edpm-reuse.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/deploy-edpm-reuse.yaml b/deploy-edpm-reuse.yaml index 1677af3572..fdbb0442bd 100644 --- a/deploy-edpm-reuse.yaml +++ b/deploy-edpm-reuse.yaml @@ -2,6 +2,20 @@ - name: Manage unique ID ansible.builtin.import_playbook: playbooks/unique-id.yml +- name: Reboot controller-0 to make sure there are no running deployments + hosts: controller-0 + gather_facts: false + tasks: + - name: Reboot controller-0 + ansible.builtin.reboot: + reboot_timeout: 600 + become: true + + - name: Wait for controller-0 to come back online + ansible.builtin.wait_for_connection: + timeout: 600 + delay: 10 + - name: Reproducer prepare play hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true From aae88585d199be33a12e2735f83077040abad746 Mon Sep 17 00:00:00 2001 From: Marian Krcmarik Date: Tue, 20 May 2025 13:41:27 +0200 Subject: [PATCH 123/480] ci_dcn_site: Set cross_az_attach at nova-api We set the cross_az_attack to False at the nova compute level and we should set it at the nova api level too. --- roles/ci_dcn_site/templates/service-values.yaml.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/ci_dcn_site/templates/service-values.yaml.j2 b/roles/ci_dcn_site/templates/service-values.yaml.j2 index 3d8965ccce..75a430776e 100644 --- a/roles/ci_dcn_site/templates/service-values.yaml.j2 +++ b/roles/ci_dcn_site/templates/service-values.yaml.j2 @@ -163,6 +163,8 @@ data: customServiceConfig: | [DEFAULT] default_schedule_zone=az0 + [cinder] + cross_az_attach=False metadataServiceTemplate: enabled: false cellTemplates: From 72c4374784018716368ef1f357bd022ef741cc8d Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Thu, 8 May 2025 15:43:14 +0200 Subject: [PATCH 124/480] Optionally configure glance with cinder_volume_type=multiattach With this patch, the hook that creates a multiattach volume type can also configure that volume type as the `cinder_volume_type`, if parameter `configure_cinder_volume_type` is set to true. This is needed for octavia, because the amphora image that it creates needs to be based on a multiattach volume. Otherwise, it fails to create amphora VMs on different computes simultaneously, which is something that happens when octavia is configured with ACTIVE_STANDBY. OSPRH-16089 OSPCIX-768 --- .../cinder_multiattach_volume_type.yml | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/hooks/playbooks/cinder_multiattach_volume_type.yml b/hooks/playbooks/cinder_multiattach_volume_type.yml index 4bb451f25b..69c5832a50 100644 --- a/hooks/playbooks/cinder_multiattach_volume_type.yml +++ b/hooks/playbooks/cinder_multiattach_volume_type.yml @@ -24,3 +24,50 @@ oc rsh openstackclient \ openstack volume type set --property multiattach=" True" \ {{ cifmw_volume_multiattach_type }} + + # This block is needed for octavia because the Amphora image needs to be created on a multiattach volume + - name: Block to configure cinder_volume_type when needed + when: configure_cinder_volume_type | default('false') | bool + block: + - name: Create tempfile + ansible.builtin.tempfile: + state: file + prefix: glance_custom_service_config + register: _glance_custom_service_config_file + + - name: Write current glance customServiceConfig to tempfile + ansible.builtin.shell: | + set -xe -o pipefail + crname=$(oc get openstackcontrolplane -o name -n {{ namespace }}) + oc -n {{ namespace }} get ${crname} -o jsonpath={.spec.glance.template.customServiceConfig} > {{ _glance_custom_service_config_file.path }} + changed_when: false + + - name: Ensure cinder_volume_type is configured with proper value in tempfile + community.general.ini_file: + path: "{{ _glance_custom_service_config_file.path }}" + section: "{{ default_backend | default('default_backend') }}" + option: cinder_volume_type + value: "{{ cifmw_volume_multiattach_type }}" + mode: "0644" + register: _glance_ini_file + + - name: Slurp tempfile # noqa: no-handler + ansible.builtin.slurp: + path: "{{ _glance_custom_service_config_file.path }}" + register: _glance_ini_content + when: _glance_ini_file.changed + + - name: Apply patched glance customServiceConfig # noqa: no-handler + vars: + _yaml_patch: + spec: + glance: + template: + customServiceConfig: "{{ _glance_ini_content.content | b64decode }}" + ansible.builtin.shell: | + set -xe -o pipefail + crname=$(oc get openstackcontrolplane -o name -n {{ namespace }}) + oc -n {{ namespace }} patch ${crname} --type=merge --patch "{{ _yaml_patch | to_nice_yaml }}" + oc -n {{ namespace }} wait ${crname} --for condition=Ready --timeout=10m + changed_when: _glance_ini_file.changed + when: _glance_ini_file.changed From efef0e1632f50435758a8d673b5da3116828ee7a Mon Sep 17 00:00:00 2001 From: Fiorella Yanac Date: Mon, 5 May 2025 11:32:07 +0100 Subject: [PATCH 125/480] Add scenario unidelta_ipv6 adoption It's empty patch, the config will be defined in the config job. but this file is required for the adoption_osp_deploy role to run. --- scenarios/adoption/uni04delta-ipv6.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 scenarios/adoption/uni04delta-ipv6.yml diff --git a/scenarios/adoption/uni04delta-ipv6.yml b/scenarios/adoption/uni04delta-ipv6.yml new file mode 100644 index 0000000000..4e9e5200e7 --- /dev/null +++ b/scenarios/adoption/uni04delta-ipv6.yml @@ -0,0 +1,2 @@ +libvirt_manager_patch_layout: {} +networking_mapper_definition_patch: {} From 725977a475bb0c5c7119004bee09048b437ba6c0 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 13 May 2025 11:06:54 +0200 Subject: [PATCH 126/480] Use role instead of playbooks - 02-infra.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 Signed-off-by: Daniel Pawlik --- ci/playbooks/kuttl/deploy-deps.yaml | 28 ++++- deploy-edpm.yml | 35 +++++- playbooks/01-bootstrap.yml | 3 + playbooks/02-infra.yml | 4 + .../cifmw_setup/tasks/host_virtualization.yml | 18 +++ roles/cifmw_setup/tasks/infra.yml | 107 ++++++++++++++++++ 6 files changed, 189 insertions(+), 6 deletions(-) create mode 100644 roles/cifmw_setup/tasks/host_virtualization.yml create mode 100644 roles/cifmw_setup/tasks/infra.yml diff --git a/ci/playbooks/kuttl/deploy-deps.yaml b/ci/playbooks/kuttl/deploy-deps.yaml index 97f754b027..4b5109db9c 100644 --- a/ci/playbooks/kuttl/deploy-deps.yaml +++ b/ci/playbooks/kuttl/deploy-deps.yaml @@ -21,14 +21,38 @@ name: 'install_yamls_makes' tasks_from: 'make_download_tools' -- name: Run ci_framework infra playbook - ansible.builtin.import_playbook: "../../../playbooks/02-infra.yml" + - name: Run pre_infra hooks + vars: + step: pre_infra + ansible.builtin.import_role: + name: run_hook + +- name: Prepare host virtualization + hosts: "{{ ('virthosts' in groups) | ternary('virthosts', cifmw_target_host | default('localhost') ) }}" + tasks: + - name: Run prepare host virtualization + vars: + step: pre_infra + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: host_virtualization.yml + tags: + - infra - name: Build dataset hook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false connection: local tasks: + - name: Prepare the platform + vars: + step: pre_infra + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: infra.yml + tags: + - infra + - name: Load parameters ansible.builtin.include_vars: dir: "{{ item }}" diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 0e45ee3e49..f113724694 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -23,10 +23,37 @@ name: cifmw_setup tasks_from: bootstrap.yml -- name: Import infra entrypoint playbook - ansible.builtin.import_playbook: playbooks/02-infra.yml - tags: - - infra + - name: Run pre_infra hooks + vars: + step: pre_infra + ansible.builtin.import_role: + name: run_hook + tags: + - infra + +- name: Prepare host virtualization + hosts: "{{ ('virthosts' in groups) | ternary('virthosts', cifmw_target_host | default('localhost') ) }}" + tasks: + - name: Run prepare host virtualization + vars: + step: pre_infra + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: host_virtualization.yml + tags: + - infra + +- name: Run cifmw_setup infra.yml + hosts: "{{ cifmw_target_host | default('localhost') }}" + tasks: + - name: Prepare the platform + vars: + step: pre_infra + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: infra.yml + tags: + - infra - name: Import package build playbook ansible.builtin.import_playbook: playbooks/03-build-packages.yml diff --git a/playbooks/01-bootstrap.yml b/playbooks/01-bootstrap.yml index f81ae4f41a..42697c2ceb 100644 --- a/playbooks/01-bootstrap.yml +++ b/playbooks/01-bootstrap.yml @@ -1,5 +1,8 @@ --- +# # NOTE: Playbook migrated to: cifmw_setup/tasks/bootstrap.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Bootstrap playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true diff --git a/playbooks/02-infra.yml b/playbooks/02-infra.yml index 06d2ce30cf..61b66abff1 100644 --- a/playbooks/02-infra.yml +++ b/playbooks/02-infra.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: cifmw_setup/tasks/infra.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Run pre_infra hooks hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false diff --git a/roles/cifmw_setup/tasks/host_virtualization.yml b/roles/cifmw_setup/tasks/host_virtualization.yml new file mode 100644 index 0000000000..a2da1b0de1 --- /dev/null +++ b/roles/cifmw_setup/tasks/host_virtualization.yml @@ -0,0 +1,18 @@ +--- +- name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + +- name: Ensure libvirt is present/configured + when: + - cifmw_use_libvirt is defined + - cifmw_use_libvirt | bool + ansible.builtin.include_role: + name: libvirt_manager + +- name: Perpare OpenShift provisioner node + when: + - cifmw_use_opn is defined + - cifmw_use_opn | bool + ansible.builtin.include_role: + name: openshift_provisioner_node diff --git a/roles/cifmw_setup/tasks/infra.yml b/roles/cifmw_setup/tasks/infra.yml new file mode 100644 index 0000000000..7639b90e0a --- /dev/null +++ b/roles/cifmw_setup/tasks/infra.yml @@ -0,0 +1,107 @@ +--- +- name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + +- name: Load Networking Environment Definition + vars: + cifmw_networking_mapper_assert_env_load: false + ansible.builtin.import_role: + name: networking_mapper + tasks_from: load_env_definition.yml + +- name: Deploy OCP using Hive + when: + - cifmw_use_hive is defined + - cifmw_use_hive | bool + ansible.builtin.include_role: + name: hive + +- name: Prepare CRC + when: + - cifmw_use_crc is defined + - cifmw_use_crc | bool + ansible.builtin.include_role: + name: rhol_crc + +- name: Deploy OpenShift cluster using dev-scripts + when: + - cifmw_use_devscripts is defined + - cifmw_use_devscripts | bool + ansible.builtin.include_role: + name: devscripts + +- name: Login into Openshift cluster + tags: + - always + vars: + cifmw_openshift_login_force_refresh: true + ansible.builtin.import_role: + name: openshift_login + +- name: Setup Openshift cluster + ansible.builtin.import_role: + name: openshift_setup + +- name: Deploy Observability operator. + when: + - cifmw_deploy_obs is defined + - cifmw_deploy_obs | bool + ansible.builtin.include_role: + name: openshift_obs + +- name: Deploy Metal3 BMHs + when: + - cifmw_config_bmh is defined + - cifmw_config_bmh | bool + ansible.builtin.include_role: + name: deploy_bmh + +- name: Install certmanager operator role + when: + - cifmw_config_certmanager is defined + - cifmw_config_certmanager | bool + ansible.builtin.include_role: + name: cert_manager + +- name: Configure hosts networking using nmstate + when: + - cifmw_config_nmstate is defined + - cifmw_config_nmstate | bool + ansible.builtin.include_role: + name: ci_nmstate + +- name: Configure multus networks + when: + - cifmw_config_multus | default(false) | bool + ansible.builtin.include_role: + name: ci_multus + +- name: Deploy Sushy Emulator and configure controller as hypervisor + when: + - cifmw_enable_virtual_baremetal_support | default(false) | bool + block: + - name: Deploy Sushy Emulator service pod + vars: + cifmw_sushy_emulator_hypervisor_address: "{{ hostvars['controller'].ansible_host }}" + cifmw_sushy_emulator_hypervisor_target: controller + cifmw_sushy_emulator_install_type: ocp + ansible.builtin.include_role: + name: sushy_emulator + + - name: Setup Libvirt on controller + ansible.builtin.include_role: + name: libvirt_manager + +- name: Prepare container package builder + when: + - cifmw_pkg_build_list is defined + - cifmw_pkg_build_list | length > 0 + ansible.builtin.include_role: + name: pkg_build + +- name: Run post_infra hooks + vars: + step: post_infra + ansible.builtin.import_role: + name: run_hook From e836ea006334c515d208f7f8decd4098097df0a3 Mon Sep 17 00:00:00 2001 From: Sofer Athlan-Guyot Date: Wed, 23 Apr 2025 17:16:09 +0200 Subject: [PATCH 127/480] Add OpenStack Operator Initialization We have introduced new tasks for initializing the OpenStack operator. This is required for versions prior to v1.0.7, where deployment didn't include the initialization. This action only takes place if we are using an OLM-based update. Additionally, we block the process until the OpenStack deployment is successful. This applies to all starting versions, as the operator should be ready before proceeding. Closes: [OSPRH-15799](https://issues.redhat.com/browse/OSPRH-15799) --- roles/update/defaults/main.yml | 1 + roles/update/tasks/main.yml | 45 ++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/roles/update/defaults/main.yml b/roles/update/defaults/main.yml index c1bef2225b..27f8805331 100644 --- a/roles/update/defaults/main.yml +++ b/roles/update/defaults/main.yml @@ -24,6 +24,7 @@ cifmw_update_openstack_update_run_containers_namespace: "podified-antelope-cento cifmw_update_openstack_update_run_containers_target_tag: "current-podified" cifmw_update_openstack_update_run_timeout: "600s" +# Avoid certain tasks during molecule run cifmw_update_run_dryrun: false ### Test related variables diff --git a/roles/update/tasks/main.yml b/roles/update/tasks/main.yml index 800aab5209..183733fef2 100644 --- a/roles/update/tasks/main.yml +++ b/roles/update/tasks/main.yml @@ -44,6 +44,51 @@ - cifmw_ci_gen_kustomize_values_installplan_approval is defined - cifmw_ci_gen_kustomize_values_installplan_approval | lower == 'manual' +- name: Initialize the openstack operator if needed + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + definition: "{{ _openstack_init_resource }}" + state: present + vars: + _openstack_init_resource: + apiVersion: operator.openstack.org/v1beta1 + kind: OpenStack + metadata: + name: openstack + namespace: openstack-operators + when: + - cifmw_ci_gen_kustomize_values_deployment_version is defined + - cifmw_ci_gen_kustomize_values_deployment_version is in ['v1.0.3', 'v1.0.6'] + +- name: Ensure OpenStack deployment is successful and block until it is done + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: operator.openstack.org/v1beta1 + kind: OpenStack + namespace: openstack-operators + register: _cifmw_update_openstack_info + until: > + _cifmw_update_openstack_info.resources[0].status.conditions is defined + and + ( + _cifmw_update_openstack_info.resources[0].status.conditions | + selectattr('type', 'equalto', 'Ready') | + map(attribute='status') | first | default('False') == 'True' + ) + and + ( + _cifmw_update_openstack_info.resources[0].status.conditions | + selectattr('type', 'equalto', 'OpenStackOperatorReadyCondition') | + map(attribute='status') | first | default('False') == 'True' + ) + retries: 20 + delay: 15 + when: not (cifmw_update_run_dryrun | bool) + # Get the next available version available when using OLM - name: Handle the next version when using OLM when: From e9bba528904a886e5fbfbf075aef84e0a983f98c Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Wed, 21 May 2025 17:02:50 -0400 Subject: [PATCH 128/480] Revert "Add reporting for playbook testing" This reverts commit 38e58336d35e3aba7cd577ea05127a87d15ec2ca. which is causing a CIX failure. Jira: OSPCIX-861 --- roles/validations/tasks/security/invoke_tlse_playbooks.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/validations/tasks/security/invoke_tlse_playbooks.yml b/roles/validations/tasks/security/invoke_tlse_playbooks.yml index c20e1f84a3..8af1f4c14a 100644 --- a/roles/validations/tasks/security/invoke_tlse_playbooks.yml +++ b/roles/validations/tasks/security/invoke_tlse_playbooks.yml @@ -9,4 +9,3 @@ cd "{{ ansible_user_dir }}/src/gitlab.cee.redhat.com/OSP-DFG-security/automation" ansible-playbook -vv playbooks/renew_internal_cert_outer.yml || echo "renew_internal_cert_outer failed, continuing..." ansible-playbook -vv playbooks/data_plane_cert_testing_with_delete.yml - ansible-playbook -vv playbooks/get_test_results_of_playbook_tests.yml From d6459d53df148bb2952b725c35f4bcbced9d7ab1 Mon Sep 17 00:00:00 2001 From: Ronelle Landy Date: Thu, 15 May 2025 16:13:06 -0400 Subject: [PATCH 129/480] Add a containers tag role --- roles/build_containers/README.md | 3 + roles/build_containers/defaults/main.yml | 3 + roles/build_containers/tasks/main.yml | 9 +++ roles/build_containers/tasks/tag.yml | 99 ++++++++++++++++++++++++ 4 files changed, 114 insertions(+) create mode 100644 roles/build_containers/tasks/tag.yml diff --git a/roles/build_containers/README.md b/roles/build_containers/README.md index 967774e7c5..56cb592a3f 100644 --- a/roles/build_containers/README.md +++ b/roles/build_containers/README.md @@ -35,6 +35,9 @@ become - Required to install and execute tcib * `cifmw_build_containers_hotfix_tag`: (String) The tag of the container image. * `cifmw_build_containers_run_hotfix`: (boolean) conditional variable for executing build_containers. * `cifmw_build_containers_install_from_source`: (boolean) Install tcib from RPM. +* `cifmw_build_containers_tag_string`: (String) Human readable string to tag containers +* `cifmw_build_containers_retag_images`: (Boolean) Whether to tag images again after pushing with hash tag. Default to `false` +* `cifmw_build_containers_retag_string`: (String) Human readable string to re-tag containers ### Parameters used in meta-content-provider diff --git a/roles/build_containers/defaults/main.yml b/roles/build_containers/defaults/main.yml index 2cac373cc3..9d8bc9c185 100644 --- a/roles/build_containers/defaults/main.yml +++ b/roles/build_containers/defaults/main.yml @@ -41,6 +41,9 @@ cifmw_build_containers_repo_dir: "{{ cifmw_build_containers_basedir }}/artifacts cifmw_build_containers_image_tag: current-podified cifmw_build_containers_containers_base_image: quay.io/centos/centos:stream9 cifmw_build_containers_cleanup: false +cifmw_build_containers_tag_string: current +cifmw_build_containers_retag_images: false +cifmw_build_containers_retag_string: current # Install tcib from source cifmw_build_containers_install_from_source: false diff --git a/roles/build_containers/tasks/main.yml b/roles/build_containers/tasks/main.yml index 5d67aef3d1..a361aa5a30 100644 --- a/roles/build_containers/tasks/main.yml +++ b/roles/build_containers/tasks/main.yml @@ -90,6 +90,15 @@ - cifmw_build_containers_buildah_push | default ('false') | bool - not cifmw_build_containers_push_containers | bool +- name: "Retag each image and push to registry: {{ item }}" + become: true + ansible.builtin.command: > + buildah push --format v2s2 --all {{ item }}:{{ cifmw_build_containers_image_tag }} docker://{{ item }}:{{ cifmw_build_containers_retag_string }} + loop: "{{ built_images.stdout_lines }}" + when: + - cifmw_build_containers_retag_images | default(false) | bool + - cifmw_build_containers_tag_string != cifmw_build_containers_retag_string + - name: Cleanup tcib directories after container build ansible.builtin.import_tasks: cleanup.yml when: cifmw_build_containers_cleanup | bool diff --git a/roles/build_containers/tasks/tag.yml b/roles/build_containers/tasks/tag.yml new file mode 100644 index 0000000000..e46dcfc15c --- /dev/null +++ b/roles/build_containers/tasks/tag.yml @@ -0,0 +1,99 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Ensure directories are present + ansible.builtin.file: + path: "{{ cifmw_build_containers_basedir }}/{{ item }}" + state: directory + mode: "0755" + loop: + - tmp + - artifacts + - logs + +- name: Make sure authfile exists + when: + - cifmw_build_containers_authfile_path != None + - cifmw_build_containers_push_containers | bool + block: + - name: Check for authfile + ansible.builtin.stat: + path: '{{ cifmw_build_containers_authfile_path }}' + register: authfile_exist + + - name: Make sure authfile exists + ansible.builtin.assert: + that: + - authfile_exist.stat.exists | bool + +- name: Retrieve the log file from container build job + ansible.builtin.get_url: + url: "{{ containers_built_artifacts_url }}/ci-framework-data/logs/containers-built.log" + dest: "{{ cifmw_build_containers_basedir }}/logs/containers-built.log" + mode: "0644" + force: true + register: result + until: + - result.status_code is defined + - result.status_code == 200 + retries: 6 + delay: 50 + +- name: Get built_images from the log file + ansible.builtin.shell: + cmd: >- + set -o pipefail; + cat {{ cifmw_build_containers_basedir }}/logs/containers-built.log | + grep {{ cifmw_build_containers_container_name_prefix }} | + awk '{ print $1 }' + register: built_images_from_file + +- name: Get the hash tag from the log file + ansible.builtin.shell: + cmd: >- + set -o pipefail; + cat {{ cifmw_build_containers_basedir }}/logs/containers-built.log | + grep {{ cifmw_build_containers_container_name_prefix }} | + awk '{ print $2 }' | head -n 1 + register: images_tag_from_file + +- name: Set variables for looping + ansible.builtin.set_fact: + built_images: "{{ built_images_from_file.stdout_lines }}" + images_tag: "{{ images_tag_from_file.stdout_lines[0] }}" + +- name: Pull images returned in built_images + containers.podman.podman_image: + name: "{{ item }}" + tag: "{{ images_tag }}" + loop: "{{ built_images }}" + +- name: Retag the images with new tag + containers.podman.podman_tag: + image: "{{ item }}:{{ images_tag }}" + target_names: + - "{{ item }}:{{ cifmw_build_containers_tag_string }}" + loop: "{{ built_images }}" + +- name: Push images to registry with new tag + containers.podman.podman_image: + name: "{{ item }}" + push_args: + dest: "{{ cifmw_build_containers_push_registry }}/{{ cifmw_build_containers_registry_namespace }}" + tag: "{{ cifmw_build_containers_tag_string }}" + pull: false + push: true + loop: "{{ built_images }}" From 22adb5aa1ebf57c8a92fbdd6b1bdcbd803466f12 Mon Sep 17 00:00:00 2001 From: Jenkins Date: Wed, 21 May 2025 17:10:23 +0200 Subject: [PATCH 130/480] Ensure that namespace is created By default baremetals are created in openshift-machine-api. But we need to use openstack namespace instead. See the following jira: https://issues.redhat.com/browse/OSPRH-16805 There is an issue when using openstack namespace, it is not created when baremetals playbooks are executed. With this commit I ensure that namespace is created before it is needed --- roles/deploy_bmh/tasks/create_templated_resource.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/roles/deploy_bmh/tasks/create_templated_resource.yml b/roles/deploy_bmh/tasks/create_templated_resource.yml index c611405256..9745f9e3ed 100644 --- a/roles/deploy_bmh/tasks/create_templated_resource.yml +++ b/roles/deploy_bmh/tasks/create_templated_resource.yml @@ -20,6 +20,18 @@ dest: "{{ _manifest_file }}" mode: "0644" + - name: Ensure cifmw_deploy_bmh_namespace namespace exists + when: + - cifmw_deploy_bmh_apply_cr + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: present + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ cifmw_deploy_bmh_namespace }}" + - name: Apply the generated CRs when: - cifmw_deploy_bmh_apply_cr From 5f17bc55a373df911fc8017b3ddefcb4317dcb23 Mon Sep 17 00:00:00 2001 From: Sofer Athlan-Guyot Date: Fri, 11 Apr 2025 23:40:12 +0200 Subject: [PATCH 131/480] Fix continuous control plane testing during update Create a new container run by podman so that the update process won't interfere with the commands triggered on the OpenStack plateform. Closes: [OSPRH-16001](https://issues.redhat.com/browse/OSPRH-16001) --- .../tasks/collect_openstackclient_config.yml | 19 +++ .../tasks/create_local_openstackclient.yml | 109 ++++++++++++++++++ roles/update/tasks/main.yml | 6 + .../templates/workload_launch_k8s.sh.j2 | 55 +-------- 4 files changed, 139 insertions(+), 50 deletions(-) create mode 100644 roles/update/tasks/collect_openstackclient_config.yml create mode 100644 roles/update/tasks/create_local_openstackclient.yml diff --git a/roles/update/tasks/collect_openstackclient_config.yml b/roles/update/tasks/collect_openstackclient_config.yml new file mode 100644 index 0000000000..84466b7f0d --- /dev/null +++ b/roles/update/tasks/collect_openstackclient_config.yml @@ -0,0 +1,19 @@ +--- +- name: Collect file from openstackclient container + kubernetes.core.k8s_exec: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + namespace: "openstack" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + pod: "openstackclient" + container: "openstackclient" + command: "/usr/bin/cat /home/cloud-admin/.config/openstack/{{ item }}" + register: file_content + changed_when: false + +- name: Save file locally + ansible.builtin.copy: + content: "{{ file_content.stdout }}" + dest: "{{ cifmw_update_artifacts_basedir }}/{{ item }}" + mode: '0644' + changed_when: false diff --git a/roles/update/tasks/create_local_openstackclient.yml b/roles/update/tasks/create_local_openstackclient.yml new file mode 100644 index 0000000000..74557fba37 --- /dev/null +++ b/roles/update/tasks/create_local_openstackclient.yml @@ -0,0 +1,109 @@ +--- +- name: Gather NodeSet resource information + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + namespace: "openstack" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + kind: "OpenStackDataPlaneNodeSet" + api_version: "dataplane.openstack.org/v1beta1" + register: _cifmw_update_osdpns_all_info + +- name: Fail if no OSDPNS resources are found + ansible.builtin.fail: + msg: "No OSDPNS resources found in the 'openstack' namespace!" + when: _cifmw_update_osdpns_all_info.resources | length == 0 + +- name: Choose the first OSDPNS resource + ansible.builtin.set_fact: + _cifmw_update_osdpns_info: "{{ _cifmw_update_osdpns_all_info.resources[0] }}" + +- name: Display which osdpns we're using + ansible.builtin.debug: + msg: "Found OSDPNS named: '{{ _cifmw_update_osdpns_info.metadata.name }}'" + +- name: Determine registry + ansible.builtin.set_fact: + cifmw_update_local_registry: >- + {{ + (cifmw_ci_gen_kustomize_values_ooi_image.split('/')[0]) + if cifmw_ci_gen_kustomize_values_ooi_image is defined + else 'quay.io' + | trim + }} + +- name: Check if credentials exist + ansible.builtin.set_fact: + brew_username: "{{ login_username }}" + brew_password: "{{ login_dict[login_username] }}" + vars: + login_dict: >- + {{ + _cifmw_update_osdpns_info.spec.nodeTemplate.ansible.ansibleVars. + edpm_container_registry_logins[cifmw_update_local_registry] + }} + login_username: "{{ login_dict.keys()|list|first }}" + when: + - _cifmw_update_osdpns_info.spec.nodeTemplate.ansible.ansibleVars.edpm_container_registry_logins is defined + - login_dict is defined + - login_dict|length > 0 + - cifmw_update_local_registry != 'quay.io' + +- name: Log in to registry when needed + containers.podman.podman_login: + # Hardcoded for now + registry: "registry.redhat.io" + username: "{{ brew_username }}" + password: "{{ brew_password }}" + when: + - brew_username is defined + - brew_password is defined + - cifmw_update_local_registry != 'quay.io' + +- name: Retrieve the openstackclient Pod + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + namespace: "openstack" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + kind: "Pod" + name: "openstackclient" + register: _cifmw_update_openstackclient_pod + +- name: Fail if openstackclient Pod is not found + ansible.builtin.fail: + msg: "No openstackclient Pod found in the openstack namespace!" + when: _cifmw_update_openstackclient_pod.resources | length == 0 + +- name: Set the openstackclient image fact + ansible.builtin.set_fact: + openstackclient_image: "{{ _cifmw_update_openstackclient_pod.resources[0].spec.containers[0].image }}" + +- name: Collect and save OpenStack config files + ansible.builtin.include_tasks: collect_openstackclient_config.yml + loop: + - 'clouds.yaml' + - 'secure.yaml' + loop_control: + label: "{{ item }}" + +- name: Create local openstack wrapper script + ansible.builtin.copy: + dest: "{{ cifmw_update_artifacts_basedir }}/openstack" + mode: '0755' + content: | + #!/usr/bin/env bash + set -euo pipefail + OS_CLOUD=default /usr/bin/openstack --insecure "$@" + +- name: Ensure lopenstackclient container is running + containers.podman.podman_container: + name: lopenstackclient + image: "{{ openstackclient_image }}" + state: started + net: host + volumes: + - "{{ cifmw_update_artifacts_basedir }}/clouds.yaml:/home/cloud-admin/.config/openstack/clouds.yaml:ro,Z" + - "{{ cifmw_update_artifacts_basedir }}/secure.yaml:/home/cloud-admin/.config/openstack/secure.yaml:ro,Z" + - "{{ cifmw_update_artifacts_basedir }}/openstack:/home/cloud-admin/.local/bin/openstack:ro,Z" + command: ['/usr/bin/sleep', 'infinity'] diff --git a/roles/update/tasks/main.yml b/roles/update/tasks/main.yml index 183733fef2..c51b085748 100644 --- a/roles/update/tasks/main.yml +++ b/roles/update/tasks/main.yml @@ -29,6 +29,12 @@ - name: Start ping test ansible.builtin.include_tasks: l3_agent_connectivity_check_start.yml +- name: Create local openstackclient + when: + - cifmw_update_control_plane_check | bool + - not cifmw_update_run_dryrun | bool + ansible.builtin.include_tasks: create_local_openstackclient.yml + - name: Trigger the continuous control plane test when: - cifmw_update_control_plane_check | bool diff --git a/roles/update/templates/workload_launch_k8s.sh.j2 b/roles/update/templates/workload_launch_k8s.sh.j2 index 53bf6ee186..7b533477e9 100644 --- a/roles/update/templates/workload_launch_k8s.sh.j2 +++ b/roles/update/templates/workload_launch_k8s.sh.j2 @@ -1,53 +1,8 @@ #!/usr/bin/bash -set +x -export KUBECONFIG="{{ cifmw_openshift_kubeconfig }}" -export PATH="{{ cifmw_path }}" - -OS_POD_TIMEOUT={{ cifmw_update_openstackclient_pod_timeout }} -WAIT=0 - -# Temporary file where to put the error message, if any. -ERROR_FILE=/tmp/cifmw_update_ctl_testing_current_ouput.txt -rm -f "${ERROR_FILE}" - -while [ $((WAIT++)) -lt ${OS_POD_TIMEOUT} ]; do - set -o pipefail # Make sure we get the failure, as tee +set -e +set -o pipefail # Make sure we get the failure, as tee # will always succeed. - cat "{{ cifmw_update_artifacts_basedir }}/workload_launch.sh" | \ - oc rsh -n openstack openstackclient env WKL_MODE=sanityfast bash 2>&1 | tee "${ERROR_FILE}" - RC=$? - set +o pipefail - if [ "${RC}" -eq 137 ]; then - # When the command is interrupted by the restart of the - # OSclient, we have this returns code. We just retry. - sleep 1 - continue - fi - # If there's an error and the error file was created we check for - # the error message. - if [ "${RC}" -ne 0 ]; then - if [ ! -e "${ERROR_FILE}" ]; then - # no error file, rethrow the error. - exit $RC - fi - # Fragile as it depends on the exact output message. - if grep -F 'error: unable to upgrade connection: container not found' \ - "${ERROR_FILE}"; then - # Openstackclient was not able to start as it's being - # restarted, retry. - sleep 1 - continue - fi - # Error is not related to the the openstackclient not being - # available. We rethrow it. - exit ${RC} - fi - # No error. - exit 0 -done - -# We only reach this code if we reach timeout while retrying to -# trigger the openstackclient. -echo "OpenstackClient Pod unavalaible, giving up after ${OS_POD_TIMEOUT} seconds" >&2 -exit 127 +cat "{{ cifmw_update_artifacts_basedir }}/workload_launch.sh" | \ + podman exec -i lopenstackclient \ + env WKL_MODE=sanityfast bash -i 2>&1 From a1c645ba514db52d9793c01aa2efccc71ec4227f Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 26 May 2025 16:42:37 +0200 Subject: [PATCH 132/480] Increase timeout for integration tests On some infras, sometimes there is a proxy error and the pods can not start properly because it can not pull images, so the CI job fails and in the log file, there is information: oc wait openstack/openstack -n openstack-operators --for condition=Ready --timeout=300s error: timed out waiting for the condition on openstacks/openstack make[1]: *** [Makefile:760: openstack_init] Error 1 Increase timeout twice to avoid potential errors and give some time for proxy and kubernetes to recover and retry pull image. Signed-off-by: Daniel Pawlik --- .../make/files/get_makefiles_env/expected_variables_values.yml | 2 +- .../targets/make/files/get_makefiles_env/makefiles/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/targets/make/files/get_makefiles_env/expected_variables_values.yml b/tests/integration/targets/make/files/get_makefiles_env/expected_variables_values.yml index 944b06b352..2a15d7397e 100644 --- a/tests/integration/targets/make/files/get_makefiles_env/expected_variables_values.yml +++ b/tests/integration/targets/make/files/get_makefiles_env/expected_variables_values.yml @@ -259,4 +259,4 @@ variables: TELEMETRY_CR: "/home/test-user/out/operator/telemetry-operator/config/samples/telemetry_v1beta1_telemetry.yaml" TELEMETRY_IMG: "quay.io/openstack-k8s-operators/telemetry-operator-index:latest" TELEMETRY_REPO: "https://github.com/openstack-k8s-operators/telemetry-operator.git" - TIMEOUT: "300s" + TIMEOUT: "600s" diff --git a/tests/integration/targets/make/files/get_makefiles_env/makefiles/Makefile b/tests/integration/targets/make/files/get_makefiles_env/makefiles/Makefile index 6f7dc53062..2e322559f4 100644 --- a/tests/integration/targets/make/files/get_makefiles_env/makefiles/Makefile +++ b/tests/integration/targets/make/files/get_makefiles_env/makefiles/Makefile @@ -6,7 +6,7 @@ NAMESPACE ?= openstack PASSWORD ?= 12345678 SECRET ?= osp-secret OUT ?= ${PWD}/out -TIMEOUT ?= 300s +TIMEOUT ?= 600s DBSERVICE ?= galera ifeq ($(DBSERVICE), galera) DBSERVICE_CONTAINER = openstack-galera-0 From fdf83562db83096aa51c603c63ed13e4af9f7c64 Mon Sep 17 00:00:00 2001 From: Sergii Golovatiuk Date: Mon, 26 May 2025 22:35:29 +0200 Subject: [PATCH 133/480] Fix ansible.posix requirements There is no branch v2.0.0. This patch changes to actual branch which is 2.0.0 --- requirements.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.yml b/requirements.yml index aa6dac7ffb..c2d393bafe 100644 --- a/requirements.yml +++ b/requirements.yml @@ -17,7 +17,7 @@ collections: - name: https://github.com/ansible-collections/ansible.posix type: git - version: "v2.0.0" + version: "2.0.0" - name: https://github.com/ansible-collections/ansible.utils type: git version: "v6.0.0" From f6938bdc5fe3aafc0e67c86d3c8ba482199a4251 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 22 May 2025 14:07:41 +0200 Subject: [PATCH 134/480] Correct log path for Run DLRN in build_openstack_packages The path for pushing logs was set to: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/logs/dlrn.log but the directory is not pushed to the logs server (assuming by checking job result [1] - cifmw-molecule-build_openstack_packages job). [1] https://softwarefactory-project.io/zuul/t/rdoproject.org/build/5806364f9a4c45b68e4cc8b03413d281 Signed-off-by: Daniel Pawlik --- roles/build_openstack_packages/defaults/main.yml | 1 + roles/build_openstack_packages/tasks/run_dlrn.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/build_openstack_packages/defaults/main.yml b/roles/build_openstack_packages/defaults/main.yml index a8502bf04e..debd11f844 100644 --- a/roles/build_openstack_packages/defaults/main.yml +++ b/roles/build_openstack_packages/defaults/main.yml @@ -32,6 +32,7 @@ cifmw_bop_dlrn_deps: - python3-libselinux cifmw_bop_build_repo_dir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}/logs" +cifmw_bop_artifacts_basedir: "{{ ansible_user_dir ~ '/ci-framework-data' }}" cifmw_bop_dlrn_repo_url: "https://github.com/openstack-packages/DLRN.git" cifmw_bop_dlrn_from_source: false cifmw_bop_dlrn_venv: "{{ ansible_user_dir }}/dlrn_venv" diff --git a/roles/build_openstack_packages/tasks/run_dlrn.yml b/roles/build_openstack_packages/tasks/run_dlrn.yml index 6ba082dec2..8174e098aa 100644 --- a/roles/build_openstack_packages/tasks/run_dlrn.yml +++ b/roles/build_openstack_packages/tasks/run_dlrn.yml @@ -178,5 +178,5 @@ ansible.builtin.shell: cmd: > set -o pipefail && - {{ cifmw_bop_build_repo_dir }}/run_dlrn.sh 2>&1 {{ cifmw_bop_timestamper_cmd }} >> {{ cifmw_bop_build_repo_dir }}/dlrn.log + {{ cifmw_bop_build_repo_dir }}/run_dlrn.sh 2>&1 {{ cifmw_bop_timestamper_cmd }} >> {{ cifmw_bop_artifacts_basedir }}/logs/dlrn.log chdir: '{{ cifmw_bop_build_repo_dir }}' From 36a7d80203614e4cda4de8138f1827983b4e6295 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Mon, 26 May 2025 10:29:37 +0530 Subject: [PATCH 135/480] Added cs10 nodeset needed for meta content provider For meta content provider, centos-stream-10-vexxhost is needed. This pr adds the required nodeset in order to consume it with cs10 jobs. Resolves: OSPRH-16773 Signed-off-by: Chandan Kumar (raukadah) --- zuul.d/nodeset.yaml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/zuul.d/nodeset.yaml b/zuul.d/nodeset.yaml index f69d6da14e..0a51090e90 100644 --- a/zuul.d/nodeset.yaml +++ b/zuul.d/nodeset.yaml @@ -59,6 +59,21 @@ nodes: - crc +# +# CentOS Stream 10 nodeset +# +- nodeset: + name: centos-stream-10-vexxhost + nodes: + - name: controller + label: cloud-centos-10-stream-tripleo-vexxhost + groups: + - name: switch + nodes: + - controller + - name: peers + nodes: [] + # # CRC-2.30 (OCP4.14) nodesets # @@ -579,7 +594,6 @@ nodes: - crc - # todo: Remove. Temporal. Needed as the credentials used in ci-bootstrap jobs for IBM don't work - nodeset: name: centos-9-medium-centos-9-crc-cloud-ocp-4-18-1-3xl-vexxhost From bd3d0351a1bebe3792f75fdee12bda521f30d88f Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Wed, 21 May 2025 16:32:59 +0200 Subject: [PATCH 136/480] Add empty adoption scenario uni02beta Just to make ci-framework happy. --- scenarios/adoption/uni02beta.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 scenarios/adoption/uni02beta.yml diff --git a/scenarios/adoption/uni02beta.yml b/scenarios/adoption/uni02beta.yml new file mode 100644 index 0000000000..4e9e5200e7 --- /dev/null +++ b/scenarios/adoption/uni02beta.yml @@ -0,0 +1,2 @@ +libvirt_manager_patch_layout: {} +networking_mapper_definition_patch: {} From 17c72ae220f487308c9fe3e2e4a5aa579079add0 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Wed, 28 May 2025 11:41:16 +0530 Subject: [PATCH 137/480] [os_must_gather] collect logs from openshift-operators namespace cluster-observability-operator(coo) is deployed in openshift-operators namespace in watcher-operator related jobs. Currently coo deployment is failing and we are not logs collecting logs from this namespace. It is hard to debug. Jira: OSPCIX-884 Signed-off-by: Chandan Kumar (raukadah) --- roles/os_must_gather/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/os_must_gather/defaults/main.yml b/roles/os_must_gather/defaults/main.yml index 36ca557fd1..c0eac6da65 100644 --- a/roles/os_must_gather/defaults/main.yml +++ b/roles/os_must_gather/defaults/main.yml @@ -23,7 +23,7 @@ cifmw_os_must_gather_image_registry: "quay.rdoproject.org/openstack-k8s-operator cifmw_os_must_gather_output_dir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}" cifmw_os_must_gather_repo_path: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/openstack-must-gather" cifmw_os_must_gather_timeout: "10m" -cifmw_os_must_gather_additional_namespaces: "kuttl,openshift-storage,openshift-marketplace,sushy-emulator,tobiko" +cifmw_os_must_gather_additional_namespaces: "kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko" cifmw_os_must_gather_namespaces: - openstack-operators - openstack From 659f4ef0f3413c4105253cb8e0f261c666c30ae1 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Tue, 27 May 2025 11:12:19 +0200 Subject: [PATCH 138/480] Add nodesets for 3xl crc nodes with 2 compute nodes Currently, for crc 2.39.0, there are nodesets only for 1 or three compute nodes. In watcher-operator, we use 2 compute nodes, so I am creating a new nodeset definition for it both with openshift 4.16 and 4.18. --- zuul.d/nodeset.yaml | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/zuul.d/nodeset.yaml b/zuul.d/nodeset.yaml index 0a51090e90..1a94cc0778 100644 --- a/zuul.d/nodeset.yaml +++ b/zuul.d/nodeset.yaml @@ -258,6 +258,26 @@ nodes: - crc +- nodeset: + name: centos-9-medium-2x-centos-9-crc-extracted-2-39-0-3xl + nodes: + - name: controller + label: cloud-centos-9-stream-tripleo-medium + - name: compute-0 + label: cloud-centos-9-stream-tripleo + - name: compute-1 + label: cloud-centos-9-stream-tripleo + - name: crc + label: coreos-crc-extracted-2-39-0-3xl + groups: + - name: computes + nodes: + - compute-0 + - compute-1 + - name: ocps + nodes: + - crc + - nodeset: name: centos-9-2x-centos-9-xxl-crc-extracted-2-39-0-xxl nodes: @@ -385,6 +405,26 @@ nodes: - crc +- nodeset: + name: centos-9-medium-2x-centos-9-crc-cloud-ocp-4-18-1-3xl + nodes: + - name: controller + label: cloud-centos-9-stream-tripleo-medium + - name: compute-0 + label: cloud-centos-9-stream-tripleo + - name: compute-1 + label: cloud-centos-9-stream-tripleo + - name: crc + label: crc-cloud-ocp-4-18-1-3xl + groups: + - name: computes + nodes: + - compute-0 + - compute-1 + - name: ocps + nodes: + - crc + - nodeset: name: centos-9-2x-centos-9-xxl-crc-cloud-ocp-4-18-1-xxl nodes: From 20bb31ffbc4f8aec25ea3eae796c37bdfcd4c5be Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 30 May 2025 09:02:22 +0200 Subject: [PATCH 139/480] Increase wait_timeout for removing storage On some infras, the CI job is failing because it gets timeout, where it is removed 30 seconds later. The error looks like: TASK [ci_local_storage : Remove the cifmw_cls_namespace namespace task path: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/ci_local_storage/tasks/cleanup.yml:58 fatal: [localhost]: FAILED! => changed: true duration: 120 method: delete msg: '"Namespace" "openstack": Timed out waiting on resource' result: apiVersion: v1 kind: Namespace Increase timeout from 120 to 300. Signed-off-by: Daniel Pawlik --- roles/ci_local_storage/tasks/cleanup.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/ci_local_storage/tasks/cleanup.yml b/roles/ci_local_storage/tasks/cleanup.yml index 2a54649e12..111470e985 100644 --- a/roles/ci_local_storage/tasks/cleanup.yml +++ b/roles/ci_local_storage/tasks/cleanup.yml @@ -64,3 +64,4 @@ kind: Namespace name: "{{ cifmw_cls_namespace }}" wait: true + wait_timeout: 300 From 4a22f4d8a74c92fb73461543794451d5ccc6509e Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 30 May 2025 10:06:28 +0200 Subject: [PATCH 140/480] Add retry when registering node in RH subscription or container registry Sometimes the CI jobs fails because there is too many request done for login to the container registry or to register system in Red Hat registration service. Let's add delay and retry again to register the system or login to the container service. Signed-off-by: Daniel Pawlik --- roles/adoption_osp_deploy/tasks/login_registries.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/adoption_osp_deploy/tasks/login_registries.yml b/roles/adoption_osp_deploy/tasks/login_registries.yml index 1430ef85d7..570724d8f4 100644 --- a/roles/adoption_osp_deploy/tasks/login_registries.yml +++ b/roles/adoption_osp_deploy/tasks/login_registries.yml @@ -26,6 +26,10 @@ release: "{{ ansible_distribution_version }}" force_register: true state: present + retries: 5 + delay: 30 + register: _rh_result + until: not _rh_result.failed - name: Login in container registry when: @@ -50,3 +54,7 @@ loop: - zuul - root + retries: 5 + delay: 30 + register: _podman_login + until: _podman_login.rc == 0 From b34fa2ff99d81822409d0feb48f4173e7c9be5e3 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 30 May 2025 16:23:51 +0200 Subject: [PATCH 141/480] Remove release parameter in redhat_subscription Earlier, before commit [1], the release was not set. It seems that by setting the release, some packages might not be available after. Let's remove that parameter. [1] https://github.com/openstack-k8s-operators/ci-framework/commit/2d3aed4e1ac225adddfc32415dd6a13535084195 Signed-off-by: Daniel Pawlik --- roles/adoption_osp_deploy/tasks/login_registries.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/adoption_osp_deploy/tasks/login_registries.yml b/roles/adoption_osp_deploy/tasks/login_registries.yml index 570724d8f4..4aa1c43e09 100644 --- a/roles/adoption_osp_deploy/tasks/login_registries.yml +++ b/roles/adoption_osp_deploy/tasks/login_registries.yml @@ -23,7 +23,6 @@ community.general.redhat_subscription: activationkey: "{{ cifmw_adoption_osp_deploy_rhsm_key }}" org_id: "{{ cifmw_adoption_osp_deploy_rhsm_org }}" - release: "{{ ansible_distribution_version }}" force_register: true state: present retries: 5 From 4394d2a22f2d6e55ff03373eb8638ed2684b3315 Mon Sep 17 00:00:00 2001 From: mkatari Date: Tue, 20 May 2025 11:06:33 +0530 Subject: [PATCH 142/480] Update endpoints in configure_object Currently Configure_object task is only supporting greenfield where the service and endpoints are created. In case of adoption + ceph_migration, endpoints are created and we need to upate endpoints after redploying rgw (with tls). This patch allows cifmw code to update endpoints if they exists already. --- .../cifmw_cephadm/tasks/configure_object.yml | 56 ++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/roles/cifmw_cephadm/tasks/configure_object.yml b/roles/cifmw_cephadm/tasks/configure_object.yml index 649e3ea7c1..b06d266e71 100644 --- a/roles/cifmw_cephadm/tasks/configure_object.yml +++ b/roles/cifmw_cephadm/tasks/configure_object.yml @@ -65,7 +65,9 @@ environment: KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" delegate_to: localhost - when: cifmw_openshift_kubeconfig is defined + when: + - cifmw_openshift_kubeconfig is defined + - swift_endpoints_count.stdout == "0" ansible.builtin.command: "oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack {{ item.os_command }} show {{ item.os_command_object }} -c id -f value" register: all_uuids loop: @@ -84,6 +86,58 @@ - cifmw_cephadm_certificate | length > 0 - cifmw_cephadm_key | length > 0 +- name: Update Swift endpoints if exists + delegate_to: localhost + when: + - cifmw_openshift_kubeconfig is defined + - not swift_in_ctlplane.stdout | bool + - swift_endpoints_count.stdout != "0" + block: + - name: Get UUID for Swift 'public' endpoint + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + ansible.builtin.shell: | + set -euo pipefail + oc exec -t openstackclient -- \ + openstack endpoint list -f json | \ + jq -r '.[] | select(.["Service Name"] == "swift" and .Interface == "public") | .ID' + register: uuid_swift_public_ep + changed_when: false + + - name: Get UUID for Swift 'internal' endpoint + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + ansible.builtin.shell: | + set -euo pipefail + oc exec -t openstackclient -- \ + openstack endpoint list -f json | \ + jq -r '.[] | select(.["Service Name"] == "swift" and .Interface == "internal") | .ID' + register: uuid_swift_internal_ep + changed_when: false + + - name: Update Swift endpoints url + cifmw.general.ci_script: + extra_args: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + output_dir: "/home/zuul/ci-framework-data/artifacts" + script: |- + oc -n {{ cifmw_cephadm_ns }} rsh openstackclient \ + openstack endpoint set \ + --url {{ cifmw_cephadm_urischeme }}://{{ ( + cifmw_external_dns_vip_ext.values() | first + if cifmw_external_dns_vip_ext is defined + else cifmw_cephadm_rgw_vip | ansible.utils.ipaddr('address') + ) }}:8080/swift/v1/AUTH_%\(tenant_id\)s \ + {{ uuid_swift_public_ep.stdout }} + oc -n {{ cifmw_cephadm_ns }} rsh openstackclient \ + openstack endpoint set \ + --url {{ cifmw_cephadm_urischeme }}://{{ ( + cifmw_external_dns_vip_int.values() | first + if cifmw_external_dns_vip_int is defined + else cifmw_cephadm_rgw_vip | ansible.utils.ipaddr('address') + ) }}:8080/swift/v1/AUTH_%\(tenant_id\)s \ + {{ uuid_swift_internal_ep.stdout }} + - name: Configure object store to use rgw cifmw.general.ci_script: extra_args: From 29a1d2cc6813dd89e1eb60098f890ebd4bb0d5a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Mon, 2 Jun 2025 18:15:15 +0200 Subject: [PATCH 143/480] Fix network config in create_cloud_init_iso.yml The tasks before this one set a fact `_network_data`, it was looking for a defined `_network_config` in this task which is never set so `cifmw_config_drive_networkconfig` end up being `None` even if we defined network config in `cifmw_libvirt_manager_configuration`. Jira: OSPRH-17107 --- roles/libvirt_manager/tasks/create_cloud_init_iso.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml index 9f300d80d5..8420d4eb13 100644 --- a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml +++ b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml @@ -63,7 +63,7 @@ _default_uuid: "{{ 99999999 | random(seed=vm) | to_uuid | lower }}" cifmw_config_drive_uuid: "{{ _uuid.stdout | default(_default_uuid) | trim}}" cifmw_config_drive_hostname: "{{ vm }}" - cifmw_config_drive_networkconfig: "{{ _network_config | default(None) }}" + cifmw_config_drive_networkconfig: "{{ _network_data | default(None) }}" cifmw_config_drive_userdata: "{{ _user_data }}" ansible.builtin.include_role: name: config_drive From 33fb94c20aeea366f5c790da0f1017a120a14474 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Mon, 2 Jun 2025 18:52:07 +0200 Subject: [PATCH 144/480] Fix _network_data fact in create_cloud_init_iso.yml We need curly brackets to resolve the value, instead of setting the fact to string `vm_data.networkconfig` ... Jira: OSPRH-17107 --- roles/libvirt_manager/tasks/create_cloud_init_iso.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml index 8420d4eb13..9d57b94cf2 100644 --- a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml +++ b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml @@ -50,12 +50,12 @@ when: - vm_data.networkconfig | type_debug == "dict" ansible.builtin.set_fact: - _network_data: vm_data.networkconfig + _network_data: "{{ vm_data.networkconfig }}" - name: "Define the network config for each vm" when: - vm_data.networkconfig | type_debug == "list" ansible.builtin.set_fact: - _network_data: vm_data.networkconfig[vm_idx] + _network_data: "{{ vm_data.networkconfig[vm_idx] }}" - name: "Call the config_drive role" vars: From 7c6c10575126ef6eb9cbb0d21f9ea713bf78cf67 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 2 Jun 2025 14:28:33 +0530 Subject: [PATCH 145/480] Use role instead of playbooks - 08-run-tests.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- deploy-edpm.yml | 22 ++++++++++++++------- playbooks/08-run-tests.yml | 4 ++++ roles/cifmw_setup/tasks/run_tests.yml | 18 +++++++++++++++++ update-edpm.yml | 28 ++++++++++++++++----------- 4 files changed, 54 insertions(+), 18 deletions(-) create mode 100644 roles/cifmw_setup/tasks/run_tests.yml diff --git a/deploy-edpm.yml b/deploy-edpm.yml index f113724694..812dec2c07 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -85,13 +85,21 @@ tags: - admin-setup -- name: Import run test playbook - ansible.builtin.import_playbook: playbooks/08-run-tests.yml - vars: - pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" - post_tests: "{{ (lookup('vars', 'post_tempest', default=[])) }}" - tags: - - run-tests +- name: Run cifmw_setup run_tests.yml + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run Test + vars: + pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" + post_tests: "{{ (lookup('vars', 'post_tempest', default=[])) }}" + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_tests.yml + when: + - cifmw_run_tests | default('false') | bool + tags: + - run-tests - name: Run compliance tests ansible.builtin.import_playbook: playbooks/09-compliance.yml diff --git a/playbooks/08-run-tests.yml b/playbooks/08-run-tests.yml index 70fbf9a105..f087a12338 100644 --- a/playbooks/08-run-tests.yml +++ b/playbooks/08-run-tests.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: cifmw_setup/tasks/run_tests.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: "Test playbook" hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false diff --git a/roles/cifmw_setup/tasks/run_tests.yml b/roles/cifmw_setup/tasks/run_tests.yml new file mode 100644 index 0000000000..af72ec08ab --- /dev/null +++ b/roles/cifmw_setup/tasks/run_tests.yml @@ -0,0 +1,18 @@ +--- +- name: Run pre_tests hooks + vars: + step: pre_tests + ansible.builtin.import_role: + name: run_hook + +- name: Run tests + tags: + - tests + ansible.builtin.import_role: + name: "{{ cifmw_run_test_role | default('tempest') }}" + +- name: Run post_tests hooks + vars: + step: post_tests + ansible.builtin.import_role: + name: run_hook diff --git a/update-edpm.yml b/update-edpm.yml index f9dd73dda4..5ef1421ef9 100644 --- a/update-edpm.yml +++ b/update-edpm.yml @@ -20,17 +20,23 @@ tags: - update -- name: Import run test playbook - ansible.builtin.import_playbook: playbooks/08-run-tests.yml - vars: - pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" - post_tests: "{{ (lookup('vars', 'post_tempest', default=[])) }}" - cifmw_test_operator_artifacts_basedir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/tests/test_operator_update" - cifmw_test_operator_tempest_name: "post-update-tempest-tests" - when: - - cifmw_run_tests | default('false') | bool - tags: - - run-tests +- name: Run cifmw_setup run_tests.yml + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run Test + vars: + pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" + post_tests: "{{ (lookup('vars', 'post_tempest', default=[])) }}" + cifmw_test_operator_artifacts_basedir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/tests/test_operator_update" + cifmw_test_operator_tempest_name: "post-update-tempest-tests" + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_tests.yml + when: + - cifmw_run_tests | default('false') | bool + tags: + - run-tests - name: Inject status flag hosts: "{{ cifmw_target_host | default('localhost') }}" From 183e4b25106ad30cceb5abb566315dd1665d5418 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 2 Jun 2025 16:04:27 +0530 Subject: [PATCH 146/480] Use role instead of playbooks - 07-admin-setup.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- deploy-edpm.yml | 14 ++++++++++---- playbooks/07-admin-setup.yml | 4 ++++ roles/cifmw_setup/tasks/admin_setup.yml | 21 +++++++++++++++++++++ 3 files changed, 35 insertions(+), 4 deletions(-) create mode 100644 roles/cifmw_setup/tasks/admin_setup.yml diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 812dec2c07..ce88957174 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -80,10 +80,16 @@ tags: - edpm -- name: Import admin setup related playbook - ansible.builtin.import_playbook: playbooks/07-admin-setup.yml - tags: - - admin-setup +- name: Post-deployment admin setup steps + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run cifmw_setup admin_setup.yml + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: admin_setup.yml + tags: + - admin-setup - name: Run cifmw_setup run_tests.yml hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/07-admin-setup.yml b/playbooks/07-admin-setup.yml index 7513263a98..b3f67cee77 100644 --- a/playbooks/07-admin-setup.yml +++ b/playbooks/07-admin-setup.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: cifmw_setup/tasks/admin_setup.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Post-deployment admin setup steps hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false diff --git a/roles/cifmw_setup/tasks/admin_setup.yml b/roles/cifmw_setup/tasks/admin_setup.yml new file mode 100644 index 0000000000..53222c311a --- /dev/null +++ b/roles/cifmw_setup/tasks/admin_setup.yml @@ -0,0 +1,21 @@ +--- +- name: Run pre_admin_setup hooks + vars: + step: pre_admin_setup + ansible.builtin.import_role: + name: run_hook + +- name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + +- name: Create openstack network elements + ansible.builtin.import_role: + name: os_net_setup + when: not cifmw_skip_os_net_setup | default('false') | bool + +- name: Run post_admin_setup hooks + vars: + step: post_admin_setup + ansible.builtin.import_role: + name: run_hook From c1d7d04aa80d66e7d9a25683ccef86d706d04dda Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 2 Jun 2025 15:38:46 +0530 Subject: [PATCH 147/480] Use role instead of playbooks - 09-compliance.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- deploy-edpm.yml | 26 ++++++++++++++++++++++++-- playbooks/09-compliance.yml | 4 ++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/deploy-edpm.yml b/deploy-edpm.yml index ce88957174..0df27ec378 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -107,8 +107,30 @@ tags: - run-tests -- name: Run compliance tests - ansible.builtin.import_playbook: playbooks/09-compliance.yml +- name: Run operators compliance scans + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run compliance scan for controllers + ansible.builtin.import_role: + name: compliance + vars: + cifmw_compliance_podman_username: "{{ cifmw_registry_token.credentials.username }}" + cifmw_compliance_podman_password: "{{ cifmw_registry_token.credentials.password }}" + when: cifmw_run_operators_compliance_scans | default('false') | bool + tags: + - compliance + +- name: Run compliance scan for computes + hosts: "{{ groups['computes'] | default ([]) }}" + gather_facts: true + tasks: + - name: Run compliance scan for one compute + ansible.builtin.import_role: + name: compliance + tasks_from: run_compute_node_scans.yml + run_once: true + when: cifmw_run_compute_compliance_scans | default('false') | bool tags: - compliance diff --git a/playbooks/09-compliance.yml b/playbooks/09-compliance.yml index 6876b33487..6378a51fd2 100644 --- a/playbooks/09-compliance.yml +++ b/playbooks/09-compliance.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: deploy-edpm.yml#L96-119. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Run operators compliance scans hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false From 299a5d6035b6a26accd24a1b67572f9bef601b2e Mon Sep 17 00:00:00 2001 From: bshewale Date: Tue, 27 May 2025 16:22:56 +0530 Subject: [PATCH 148/480] Use local copy of common-requirements.txt while installation This looks like something that can be avoided by using the local copy of that file instead of reaching out to github. It also seems like it will improve testing if the requirements file used is in-tree. --- roles/reproducer/defaults/main.yml | 1 + roles/reproducer/tasks/configure_controller.yml | 3 ++- roles/reproducer/tasks/libvirt_layout.yml | 6 ++++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/roles/reproducer/defaults/main.yml b/roles/reproducer/defaults/main.yml index 338dc9243c..737d2ebd74 100644 --- a/roles/reproducer/defaults/main.yml +++ b/roles/reproducer/defaults/main.yml @@ -18,6 +18,7 @@ # All variables intended for modification should be placed in this file. # All variables within this role should have a prefix of "cifmw_reproducer" cifmw_reproducer_basedir: "{{ cifmw_basedir | default( ansible_user_dir ~ '/ci-framework-data') }}" +cifmw_reproducer_src_dir: "{{ cifmw_ci_src_dir | default( ansible_user_dir ~ '/src') }}" cifmw_reproducer_kubecfg: "{{ cifmw_libvirt_manager_configuration.vms.crc.image_local_dir }}/kubeconfig" cifmw_reproducer_params: {} cifmw_reproducer_run_job: true diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 71ad3e3958..305b8ad9ef 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -350,12 +350,13 @@ name: sushy_emulator tasks_from: verify.yml + # NOTE: src dir is synchronized in libvirt_layout.yml - name: Install ansible dependencies register: _async_dep_install async: 600 # 10 minutes should be more than enough poll: 0 ansible.builtin.pip: - requirements: https://raw.githubusercontent.com/openstack-k8s-operators/ci-framework/main/common-requirements.txt + requirements: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" - name: Inject most of the cifmw_ parameters passed to the reproducer run tags: diff --git a/roles/reproducer/tasks/libvirt_layout.yml b/roles/reproducer/tasks/libvirt_layout.yml index 56da59c775..c9170dd6ac 100644 --- a/roles/reproducer/tasks/libvirt_layout.yml +++ b/roles/reproducer/tasks/libvirt_layout.yml @@ -48,6 +48,12 @@ rsync -r {{ cifmw_reproducer_basedir }}/reproducer-inventory/ zuul@controller-0:reproducer-inventory + - name: Push src dir to controller-0 + ansible.builtin.command: # noqa: command-instead-of-module + cmd: >- + rsync -r {{ cifmw_reproducer_src_dir }}/ + zuul@controller-0:src + - name: Run post tasks in OCP cluster case when: - _use_ocp | bool From cbe9c0f5e1021f392b69be616596617979a343a1 Mon Sep 17 00:00:00 2001 From: Marian Krcmarik Date: Thu, 3 Apr 2025 00:29:40 +0200 Subject: [PATCH 149/480] dnsmasq: Add locahost addresses to lo The default listener.conf of cifmw-dnsmasq looks like this: except-interface=lo bind-dynamic listen-address=127.0.0.2 interface=cifmw-osp_trunk interface=ocpbm Once the cifmw-dnsmasq service is restarted A race/loop between dnsmasq's static binding (listen-address =127.0.0.2) and dynamic behavior (bind-dynamic). The service constantly throws an error: "failed to create listening socket for 127.0.0.2: Address already in use" and keeps creating Unnconnected sockets until It reaches the file descriptors limit and then stops working properly. There is no 127.0.0.2 IP assigned to lo interface so I suspect It may cause some race condition in the way a new IP address/interface is dynamically detected (bind-dynamic option) and the statis binding (local-address option). --- roles/dnsmasq/tasks/configure.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/dnsmasq/tasks/configure.yml b/roles/dnsmasq/tasks/configure.yml index 68a18c791a..aae7406e65 100644 --- a/roles/dnsmasq/tasks/configure.yml +++ b/roles/dnsmasq/tasks/configure.yml @@ -66,6 +66,15 @@ - name: Render dns configuration ansible.builtin.include_tasks: dns.yml +- name: Add localhost addresses from defined dnsmasq listen addresses to loopback interface + become: true + loop: "{{ cifmw_dnsmasq_listen_addresses }}" + when: item is match("^127\\..*") + ansible.builtin.shell: | + set -xe -o pipefail + ip addr show lo | grep -q "{{ item }}" || ip addr add {{ item }}/8 dev lo + changed_when: false + - name: Manage and start dnsmasq instance become: true when: From e11569b2fd00e3fea76b61bd893306563921f9a9 Mon Sep 17 00:00:00 2001 From: James Slagle Date: Fri, 30 May 2025 10:57:17 -0400 Subject: [PATCH 150/480] Remove bshephard Signed-off-by: James Slagle --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3c8d2cbfa2..55387f6cd0 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -66,4 +66,4 @@ roles/ci_gen_kustomize_values/templates/uni* @openstack-k8s-operators/ciops roles/update @openstack-k8s-operators/updates # Validations -roles/validations @bshephar @drosenfe +roles/validations @drosenfe From 06cfba4e04687ce22ff8ed03e04c3b68d16ae34f Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Mon, 2 Jun 2025 16:06:13 +0200 Subject: [PATCH 151/480] Add retries to kinit call We observe at times there are failures that seem related to the temporary unavailability of the authentication service when a lot of jobs attempt to get token. Hence, we add retry. Jira: OSPCIX-797 --- roles/dlrn_report/tasks/dlrn_report_results.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/dlrn_report/tasks/dlrn_report_results.yml b/roles/dlrn_report/tasks/dlrn_report_results.yml index 0abbbc56d7..7e474c1c29 100644 --- a/roles/dlrn_report/tasks/dlrn_report_results.yml +++ b/roles/dlrn_report/tasks/dlrn_report_results.yml @@ -20,6 +20,10 @@ kinit {{ cifmw_dlrn_report_krb_user_realm }} -k -t {{ cifmw_dlrn_report_keytab }} + retries: 5 + delay: 60 + register: _kinit_status + until: _kinit_status.rc == 0 when: cifmw_dlrn_report_kerberos_auth|bool - name: Set empty value for dlrnapi password From 427510fac4ac0d9382a5488ee06e93a359907af4 Mon Sep 17 00:00:00 2001 From: Jaganathan Palanisamy Date: Fri, 30 May 2025 17:31:15 +0530 Subject: [PATCH 152/480] Multinode EDPM CI job failing for nmstate provider This change is to update the incorrect network config template where used nic1 for dhcp and also used br-ex controlplane config with default route. so two default routes creating for nmstate provider scenario and this is not happened for ifcfg provider and only one default route created for nic1. Proposed this PR is to fix incorrect network config. --- hooks/playbooks/fetch_compute_facts.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index f48541bb62..bc2d94d7c7 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -157,6 +157,12 @@ path: /spec/nodeTemplate/ansible/ansibleVars/neutron_public_interface_name value: "{{ crc_ci_bootstrap_networks_out[_first_compute].default.iface | default('') }}" + {% for compute_node in groups['computes'] %} + - op: replace + path: /spec/nodes/edpm-{{ compute_node }}/networks/0/defaultRoute + value: false + {% endfor %} + {% for compute_node in groups['computes'] if compute_node != _first_compute %} - op: replace path: /spec/nodes/edpm-{{ compute_node }}/ansible/ansibleHost From a018e6b0bced870b891d976dc310b7eb3db35044 Mon Sep 17 00:00:00 2001 From: Sofer Athlan-Guyot Date: Wed, 26 Mar 2025 19:08:05 +0100 Subject: [PATCH 153/480] Monitor changes during update. This collect events in: - Pod in openstack and openstack-operators namespace; - containers on compute nodes; - current step of the update; and put it them in a timeline of event during the update process. To add a update event, we just run ```bash update_event.sh Update complete ``` And it will add `Update complete` to the timeline. You can find the timeline file in `{{ cifmw_update_artifacts_basedir }}/update_timeline.log` which is usually `/home/zuul/ci-framework-data/tests/update/update_timeline.log`. It looks like this: ```text 2025-04-25T14:11:46,489283331+00:00 [TIME:ocp-master-0] Fri Apr 25 14:11:52 UTC 2025 2025-04-25T11:25:55,000000000+00:00 [OPENSHIFT:openstack] Warning Unhealthy Pod/glance-33de1-default-internal-api-0: Liveness probe failed: HTTP probe failed with statuscode: 502 2025-04-25T14:09:14,671411856+00:00 [PODMAN:compute-5xgskqtv-1] image pull registry-proxy.engineering.redhat.com/rh-osbs/rhceph:7 2025-04-25T14:11:32,290081077+00:00 [UPDATE EVENT] Update complete ``` The monitoring is done every `cifmw_update_resources_monitoring_interval` seconds which is 10 seconds by default. Closes: [OSPRH-16018](https://issues.redhat.com/browse/OSPRH-16018) --- playbooks/update.yml | 42 ++ roles/update/README.md | 2 + roles/update/defaults/main.yml | 3 + roles/update/molecule/default/converge.yml | 12 +- roles/update/tasks/init_monitoring.yml | 57 +++ roles/update/tasks/main.yml | 83 +++- .../templates/monitor_resources_changes.sh.j2 | 429 ++++++++++++++++++ roles/update/templates/update_event.sh.j2 | 39 ++ 8 files changed, 650 insertions(+), 17 deletions(-) create mode 100644 roles/update/tasks/init_monitoring.yml create mode 100644 roles/update/templates/monitor_resources_changes.sh.j2 create mode 100644 roles/update/templates/update_event.sh.j2 diff --git a/playbooks/update.yml b/playbooks/update.yml index 631205ee54..4ebc5dc49f 100644 --- a/playbooks/update.yml +++ b/playbooks/update.yml @@ -21,6 +21,16 @@ hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: + - name: Initialize monitoring + ansible.builtin.include_role: + name: update + tasks_from: init_monitoring.yml + + - name: Set update step to Update Repo and OpenStack Services Containers + ansible.builtin.command: + cmd: > + {{ cifmw_basedir }}/tests/update/update_event.sh + Update Repo and OpenStack Services Containers - name: Copy repos to before_update_repos directory ansible.builtin.copy: remote_src: true @@ -54,6 +64,16 @@ src: "{{ cifmw_basedir }}/artifacts/repositories/" mode: "0755" +- name: Log Ceph update state + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Set update step to Ceph Update + ansible.builtin.command: + cmd: > + {{ cifmw_basedir }}/tests/update/update_event.sh + Ceph Update + - name: Run Ceph update if part of the deployment hosts: "{{ (groups[cifmw_ceph_target | default('computes')] | default([]))[:1] }}" gather_facts: true @@ -95,11 +115,33 @@ hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: + - name: Set update step to Update Role + ansible.builtin.command: + cmd: > + {{ cifmw_basedir }}/tests/update/update_event.sh + Update Role - name: Run update tags: - update ansible.builtin.import_role: name: update + - name: Set update step to End of Update Role + ansible.builtin.command: + cmd: > + {{ cifmw_basedir }}/tests/update/update_event.sh + End of Update Role + - name: Stop monitoring + block: + - name: Verify monitoring pid file + ansible.builtin.stat: + path: "{{ cifmw_update_artifacts_basedir }}/monitor_resources_changes.pid" + register: cifmw_update_monitoring_pid + - name: Stop the monitoring process + ansible.builtin.shell: + cmd: >- + kill + $(cat {{ cifmw_basedir }}/tests/update/monitor_resources_changes.pid) + when: cifmw_update_monitoring_pid.stat.exists | bool - name: Run post_update hooks vars: diff --git a/roles/update/README.md b/roles/update/README.md index f01b7cde56..3a199a8c8b 100644 --- a/roles/update/README.md +++ b/roles/update/README.md @@ -19,4 +19,6 @@ Role to run update * `cifmw_update_reboot_test`: (Boolean) Activate the reboot test after update. Default to `False`. * `cifmw_update_ansible_ssh_private_key_file`: (String) Define the path to the private key file used for the compute nodes. * `cifmw_update_wait_retries_reboot`: (Integer) Number of retries to wait for a compute node reboot. One retry is done every five seconds. Default to 60, so five minutes. +* `cifmw_update_resources_monitoring_interval`: (Integer) Interval, in seconds, between two resources monitor during update. Default to 10 seconds. + ## Examples diff --git a/roles/update/defaults/main.yml b/roles/update/defaults/main.yml index 27f8805331..38b171087c 100644 --- a/roles/update/defaults/main.yml +++ b/roles/update/defaults/main.yml @@ -59,3 +59,6 @@ cifmw_update_openstackclient_pod_timeout: 10 # in seconds. cifmw_update_ctl_plane_max_cons_fail: 2 cifmw_update_ctl_plane_max_fail: 3 cifmw_update_ctl_plane_max_tries: 84 + +# Resource Monitoring during update +cifmw_update_resources_monitoring_interval: 10 # in seconds. diff --git a/roles/update/molecule/default/converge.yml b/roles/update/molecule/default/converge.yml index 95b74bb769..d5482b8bb8 100644 --- a/roles/update/molecule/default/converge.yml +++ b/roles/update/molecule/default/converge.yml @@ -20,5 +20,13 @@ vars: ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_update_run_dryrun: true - roles: - - role: "update" + cifmw_openshift_kubeconfig: "{{ lookup('env', 'HOME') }}/.crc/machines/crc/kubeconfig" + cifmw_nolog: false + tasks: + - name: Initialize monitoring + ansible.builtin.include_role: + name: update + tasks_from: init_monitoring.yml + - name: Run Update + ansible.builtin.include_role: + name: update diff --git a/roles/update/tasks/init_monitoring.yml b/roles/update/tasks/init_monitoring.yml new file mode 100644 index 0000000000..f9fe72ae63 --- /dev/null +++ b/roles/update/tasks/init_monitoring.yml @@ -0,0 +1,57 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Ensure update log directory exists. + ansible.builtin.file: + path: "{{ cifmw_update_artifacts_basedir }}" + state: directory + mode: "0755" + +- name: Create update step monitoring file + ansible.builtin.template: + src: "update_event.sh.j2" + dest: "{{ cifmw_update_artifacts_basedir }}/update_event.sh" + mode: "0755" + +- name: Create update stage monitoring file + ansible.builtin.template: + src: "monitor_resources_changes.sh.j2" + dest: "{{ cifmw_update_artifacts_basedir }}/monitor_resources_changes.sh" + mode: "0755" + +- name: Set update step to {{ cifmw_update_state | default("Starting Update") }} + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + {{ cifmw_update_state | default("Starting Update") }} + +- name: Initialize monitoring + ansible.builtin.command: + cmd: "{{ cifmw_update_artifacts_basedir }}/monitor_resources_changes.sh -a init" + creates: "{{ cifmw_update_artifacts_basedir }}/update_timeline.log" + no_log: "{{ cifmw_nolog | default(true) | bool }}" + +- name: Start monitoring + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + {{ cifmw_update_artifacts_basedir }}/monitor_resources_changes.sh + -a monitor + -t {{ cifmw_update_resources_monitoring_interval }} + -l {{ cifmw_update_artifacts_basedir }}/monitor_resources_script.log + creates: "{{ cifmw_update_artifacts_basedir }}/monitor_resources_changes.pid" diff --git a/roles/update/tasks/main.yml b/roles/update/tasks/main.yml index c51b085748..7fb74f76e5 100644 --- a/roles/update/tasks/main.yml +++ b/roles/update/tasks/main.yml @@ -50,23 +50,37 @@ - cifmw_ci_gen_kustomize_values_installplan_approval is defined - cifmw_ci_gen_kustomize_values_installplan_approval | lower == 'manual' -- name: Initialize the openstack operator if needed - kubernetes.core.k8s: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - api_key: "{{ cifmw_openshift_token | default(omit) }}" - context: "{{ cifmw_openshift_context | default(omit) }}" - definition: "{{ _openstack_init_resource }}" - state: present - vars: - _openstack_init_resource: - apiVersion: operator.openstack.org/v1beta1 - kind: OpenStack - metadata: - name: openstack - namespace: openstack-operators +- name: Handle OpenStack operator initialization when: - cifmw_ci_gen_kustomize_values_deployment_version is defined - - cifmw_ci_gen_kustomize_values_deployment_version is in ['v1.0.3', 'v1.0.6'] + - cifmw_ci_gen_kustomize_values_deployment_version in ['v1.0.3', 'v1.0.6'] + block: + - name: Set update step to About to initialize the OpenStack operator + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + About to initialize the OpenStack operator + + - name: Initialize the OpenStack operator if needed + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + definition: "{{ _openstack_init_resource }}" + state: present + vars: + _openstack_init_resource: + apiVersion: operator.openstack.org/v1beta1 + kind: OpenStack + metadata: + name: openstack + namespace: openstack-operators + +- name: Set update step to Wait for successful deployment of the openstack operator + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Wait for successful deployment of the openstack operator - name: Ensure OpenStack deployment is successful and block until it is done kubernetes.core.k8s_info: @@ -100,6 +114,12 @@ when: - cifmw_ci_gen_kustomize_values_deployment_version is defined block: + - name: Set update step to About to get a new version + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + About to get a new version + - name: Make sure we get a new version available, block until we do. kubernetes.core.k8s_info: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" @@ -118,6 +138,13 @@ cifmw_update_next_available_version: >- {{ openstackversion_info.resources[0].status.availableVersion }} + - name: Set update step to Got new version + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Got new version {{ cifmw_update_next_available_version }} + ({{ openstackversion_info.resources[0].status.deployedVersion }}) + - name: Set openstack_update_run Makefile environment variables tags: - always @@ -147,6 +174,12 @@ default(cifmw_update_openstack_update_run_target_version) }} +- name: Set update step to About to start the Update sequence + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Update to start the Update sequence + - name: Run make openstack_update_run vars: make_openstack_update_run_env: "{{ cifmw_install_yamls_environment | combine({'PATH': cifmw_path }) }}" @@ -156,6 +189,12 @@ name: 'install_yamls_makes' tasks_from: 'make_openstack_update_run' +- name: Set update step to Update Sequence complete + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Update Sequence complete + - name: Stop the ping test ansible.builtin.include_tasks: l3_agent_connectivity_check_stop.yml when: @@ -169,7 +208,21 @@ ansible.builtin.command: | {{ cifmw_update_artifacts_basedir }}/control_plane_test_stop.sh +- name: Set update step to About to start Reboot + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + About to start Reboot + when: + - cifmw_update_reboot_test | bool + - name: Reboot the compute nodes ansible.builtin.include_tasks: reboot_computes.yml when: - cifmw_update_reboot_test | bool + +- name: Set update step to Update complete + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Update complete diff --git a/roles/update/templates/monitor_resources_changes.sh.j2 b/roles/update/templates/monitor_resources_changes.sh.j2 new file mode 100644 index 0000000000..acf832976c --- /dev/null +++ b/roles/update/templates/monitor_resources_changes.sh.j2 @@ -0,0 +1,429 @@ +#!/bin/bash +# +# Description: The script monitors OpenShift events, Podman events, +# and update stages. It logs all changes to a timeline file in +# chronological order. +# +# Unless called with `-a init` or `-f` this script will daemonize itself. +# +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -euo pipefail + +# Initialize default options +SLOG_FILE="/dev/null" +FOREGROUND=false +ACTION="all" +POLL_INTERVAL="{{ cifmw_update_resources_monitoring_interval }}" +BASE_DIR="{{ cifmw_update_artifacts_basedir }}" +TIMELINE_LOG_FILE="${BASE_DIR}/update_timeline.log" + +# Where to find the inventory to connect to the compute +CI_INVENTORY="${CI_INVENTORY:-{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml}" + +# Log files +UPDATE_EVENT_FILE="${BASE_DIR}/current_update_event.log" + +# OpenShift variables +OS_NAMESPACES=("openstack-operators" "openstack") +export KUBECONFIG="{{ cifmw_openshift_kubeconfig }}" +export PATH="{{ cifmw_path }}" + +# Script related variables +PID_FILE="${BASE_DIR}/monitor_resources_changes.pid" +TMP_LOG="${BASE_DIR}/monitor_resources_tmp_dir.txt" +TERMINATE_REQUESTED=false # Flag to indicate termination request +ORIGINAL_ARGS=("$@") # Save the original argument list + +# Get the PID back, or empty if not file +PID=$( [[ -f "${PID_FILE}" ]] && cat "${PID_FILE}" || echo "" ) + +show_help() { + echo "Usage: ${0##*/} [options] [+-a all|init|monitor] [+-t time in sec]" + echo + echo "Options:" + echo " -a ACTION Action to perform: all, init, or monitor. Default is all." + echo " -t POLL_INTERVAL Time between checks in seconds. Default is ${POLL_INTERVAL}." + echo " -l SLOG_FILE Script log file. Default is /dev/null." + echo " -f Run in the foreground (do not daemonize)." + echo " -h Display this help message." +} + +while getopts 'a:t:l:fh' OPT; do + case $OPT in + a) ACTION="$OPTARG" ;; + t) POLL_INTERVAL="$OPTARG" ;; + l) SLOG_FILE="$OPTARG" ;; + f) FOREGROUND=true ;; + h) + show_help + exit 0 + ;; + *) + show_help + exit 2 + ;; + esac +done +shift $((OPTIND - 1)) + +# Run as daemon +daemonize() { + # Manage file descriptors + exec 0<&- # Close stdin + exec 1>>"${SLOG_FILE}" # Redirect stdout to log file + exec 2>&1 # Redirect stderr to log file + + # Start a new session + setsid "$0" "${ORIGINAL_ARGS[@]}" & + echo $! > "${PID_FILE}" + # Exit parent process + exit +} + +# Check if already running +if [[ -n "${PID}" && -e /proc/${PID} ]]; then + if [[ ${PID} -ne $$ ]]; then # We are not the pid. This happens + # when we were just daemonized + echo "Daemon is already running with PID ${PID}." + exit 1 + fi +fi + +# Trap function to handle script termination +terminate_script() { + echo "Signal received, request termination..." + TERMINATE_REQUESTED=true +} + +# Register the termination signal handler +trap 'terminate_script' SIGTERM SIGINT + + +# Daemonize the script unless running init (blocking) or in the +# foreground +if [[ "${ACTION}" != "init" ]] && ! $FOREGROUND; then + if [[ -z "${PID}" ]]; then + daemonize + fi +fi + +# Temporary files handling +setup_tmp_dir() { + local action="${1:-all}" + + if [ "${action}" != "monitor" ]; then + TMP_DIR=$(mktemp -d -q -p ${BASE_DIR} -t monitor_tmp-XXX) + echo "${TMP_DIR}" > "${TMP_LOG}" + else + TMP_DIR=$(cat "${TMP_LOG}") + fi + + OCP_EVENTS_DIR="${TMP_DIR}/openshift_events" + PODMAN_EVENTS_FILE="${TMP_DIR}/podman_events.txt" + MONITOR_LAST_CHECK_FILE="${TMP_DIR}/monitor_last_check_time.txt" + CYCLE_EVENTS_FILE="${TMP_DIR}/cycle_events_init.txt" + DEDUP_FILE="${TMP_DIR}/dedup_events.txt" + SORTED_FILE="${TMP_DIR}/sorted_events.txt" + + mkdir -p "${OCP_EVENTS_DIR}" + touch "${DEDUP_FILE}" + touch "${CYCLE_EVENTS_FILE}" + touch "${MONITOR_LAST_CHECK_FILE}" +} + +get_current_timestamp() { + date --iso-8601=n +} + +## Events handling +sort_events_file() { + local cycle_file="$1" + sort -t '|' -k1,1 "${cycle_file}" > "${SORTED_FILE}" +} + +deduplicate_events() { + local content + local content_hash + while IFS= read -r line; do + content="$(echo "$line" | cut -d '|' -f2-)" + content_hash="$(echo "$content" | md5sum | awk '{print $1}')" + + if ! grep -q "$content_hash" "${DEDUP_FILE}"; then + echo "$content" >> "${TIMELINE_LOG_FILE}" + # Add the date to be able to clean up the dedup file + echo "$(date +%s) $content_hash" >> "${DEDUP_FILE}" + fi + done < "${SORTED_FILE}" +} + +cleanup_dedup_file() { + local current_time + current_time=$(date +%s) + local twice_poll_interval=$((POLL_INTERVAL * 2)) + local cutoff=$((current_time - twice_poll_interval)) + awk -v cutoff="${cutoff}" \ + '$1 >= cutoff {print}' "${DEDUP_FILE}" > "${DEDUP_FILE}.tmp" + mv "${DEDUP_FILE}.tmp" "${DEDUP_FILE}" +} + +process_events_file() { + local cycle_file="$1" + + if [[ -s "${cycle_file}" ]]; then + sort_events_file "${cycle_file}" + deduplicate_events + cleanup_dedup_file + rm -f "${cycle_file}" "${SORTED_FILE}" + fi +} + +## Collect events and information +# Time, this is triggered only once at the start +get_time_info() { + ansible -i "${CI_INVENTORY}" -m shell -a "date" all 2>>"${SLOG_FILE}" | \ + awk -v script_ts="$(get_current_timestamp)" ' + /CHANGED/ { + host=$1 + if (getline date_out) { + print script_ts " [TIME:" host "] " date_out + } + } + ' >> "${TIMELINE_LOG_FILE}" +} + +# Podman events +get_podman_events() { + local since_time="$1" # Format: "25m", "10s" + + # Use --until '+1s' to make the command non-blocking + ansible -i "${CI_INVENTORY}" -m shell -a \ + "sudo podman events --format {% raw %} {% raw %} '{{.Time}}|{{.Type}} {{.Status}} {{.Name}} {{.Image}}' --since $since_time --until '+1s' {% endraw %} {{ '{' }}% endraw %{{ '}' }} | awk '!/health_status/ {print}'" \ + computes 2>>"${SLOG_FILE}" | \ + awk ' + BEGIN { compute = "" } + /^compute/ { + compute = $1 + next + } + { + line = compute "|" $0 + print line + }' | sort > "${PODMAN_EVENTS_FILE}" +} + +# Collect OpenShift events +get_openshift_events() { + local since_time=$1 # Format: "25 minutes", "10 seconds" + local event_file=$2 + local namespace=$3 + + # Get events from the specified namespace and filter for relevant events + oc get events -n "${namespace}" -o json --sort-by='.lastTimestamp' | \ + # oc doens't have a `--since` parameter. Here we assume that + # TZ are the same on the controller and the server. + jq -r --arg time "$(date --iso-8601=s -d "$since_time ago")" \ + '.items[] | + select(.lastTimestamp >= $time) | + # Filter for important events: deletions, creations, unhealthy states, + # and failures + select( + (.reason | test("Delete|Deleted|Killing|Removed")) or + (.reason | test("Create|Created|Scheduled|Started|Pulled")) or + (.reason | test("Unhealthy|Failed|Error|BackOff|Evicted|Warning")) or + (.type == "Warning") or + (.message | test("fail|error|unable|cannot|denied|exceeded|invalid|conflict|timeout|refused|rejected")) + ) | + "\(.lastTimestamp)|\(.type) \(.reason) \(.involvedObject.kind)/\(.involvedObject.name): \(.message)"' \ + > "$event_file" +} + +# Log update event changes +get_update_events() { + local cycle_file="$1" + local update_event_lock_file="${BASE_DIR}/current_update_event.lock" + + if [[ ! -f $UPDATE_EVENT_FILE ]]; then + echo "Update event file not found. Creating empty file..." >> "${SLOG_FILE}" + touch $UPDATE_EVENT_FILE + fi + + # Use flock to ensure exclusive access: we don't want to truncate + # the file while it's being written by `update_event.sh` + ( + flock -x 200 + + # If file exists and has content + if [[ -s "${UPDATE_EVENT_FILE}" ]]; then + process_update_file "${cycle_file}" + + # Truncate the file after processing all events + : > "${UPDATE_EVENT_FILE}" + fi + ) 200>"${update_event_lock_file}" +} + +process_podman_file() { + local cycle_file="$1" + while IFS= read -r line; do + local compute + local raw_time + compute="$(echo "$line" | cut -d '|' -f1)" + raw_time="$(echo "$line" | cut -d '|' -f2)" + # Stripping UTC like part from "... +0000 UTC" Podman date + # output which is not supported by the `date` command. + local local_time + local_time="$(date --iso-8601=n -d "${raw_time% [A-Z]*}" 2>/dev/null || echo "${raw_time}")" + local message + message="$(echo "$line" | cut -d '|' -f3-)" + if [ -n "${message}" ]; then + echo "${local_time}|${local_time} [PODMAN:${compute}] ${message}" >> "${cycle_file}" + fi + done < "${PODMAN_EVENTS_FILE}" +} + +process_openshift_file() { + local event_file="$1" + local cycle_file="$2" + local namespace="$3" + while IFS= read -r line; do + local raw_time + local local_time + local message + raw_time="$(echo "$line" | cut -d '|' -f1)" + local_time="$(date --iso-8601=n -d "${raw_time}" 2>/dev/null || echo "${raw_time}")" + message="$(echo "$line" | cut -d '|' -f2-)" + if [ -n "${message}" ]; then + echo "${local_time}|${local_time} [OPENSHIFT:${namespace}] ${message}" \ + >> "${cycle_file}" + fi + done < "${event_file}" +} + +process_update_file() { + local cycle_file="$1" + + while IFS= read -r line; do + local timestamp + local event + timestamp="$(echo "$line" | cut -d '|' -f1)" + event="$(echo "$line" | cut -d '|' -f2-)" + echo "${timestamp}|${timestamp} [UPDATE EVENT] ${event}" \ + >> "${cycle_file}" + done < "${UPDATE_EVENT_FILE}" +} + +## Event processing. +# Time since last check +calculate_since_time() { + local last_check_time="$1" + local now_sec + local last_sec + now_sec=$(date +%s) + last_sec=$(date -d "$last_check_time" +%s 2>/dev/null || echo 0) + local diff_seconds=$(( now_sec - last_sec )) + + echo $diff_seconds +} + +# Main driver +collect_and_process_events() { + local since_time="$1" + local cycle_file="$2" + + # Update events + get_update_events "${cycle_file}" + + # Podman events + get_podman_events "${since_time}s" + if [[ -s "${PODMAN_EVENTS_FILE}" ]]; then + process_podman_file "${cycle_file}" + fi + + # OpenShift events + if [ -e "${KUBECONFIG}" ]; then + for namespace in "${OS_NAMESPACES[@]}"; do + local events_file="${OCP_EVENTS_DIR}/${namespace}_events.txt" + + get_openshift_events "${since_time} seconds" \ + "${events_file}" "$namespace" + + if [[ -s "${events_file}" ]]; then + process_openshift_file "${events_file}" "${cycle_file}" "${namespace}" + fi + done + fi + + process_events_file "${cycle_file}" +} + +# Initial gathering of states +initialize() { + echo "Gathering initial states..." > "${SLOG_FILE}" + + get_time_info + + # Get initial events + local initial_time="120" # Look back 2 minutes for initial + # events + + get_current_timestamp > "${MONITOR_LAST_CHECK_FILE}" + + # Get initial events + collect_and_process_events "${initial_time}" "${CYCLE_EVENTS_FILE}" +} + +# Main monitoring loop +monitor() { + echo "Starting monitoring loop..." + local last_check_time + local cycle_file + local since_time + + while true; do + if [[ -f "${MONITOR_LAST_CHECK_FILE}" ]]; then + last_check_time="$(cat "${MONITOR_LAST_CHECK_FILE}")" + else + last_check_time="$(get_current_timestamp)" + fi + + cycle_file="${TMP_DIR}/cycle_events_$(date +%s).txt" + touch "${cycle_file}" + + since_time="$(calculate_since_time "${last_check_time}")" + + # Add some overlap to ensure we don't miss any event + since_time=$((since_time + (POLL_INTERVAL / 3))) + + collect_and_process_events "${since_time}" "${cycle_file}" + + get_current_timestamp > "${MONITOR_LAST_CHECK_FILE}" + + if $TERMINATE_REQUESTED; then + echo "Termination request processed. Exiting..." + exit 0 + fi + + sleep "${POLL_INTERVAL}" + done +} + +case $ACTION in + init) setup_tmp_dir init; initialize ;; + monitor) setup_tmp_dir monitor; monitor ;; + all) setup_tmp_dir; initialize; monitor ;; + *) echo "Choose between all, init and monitor for action"; exit 1; +esac diff --git a/roles/update/templates/update_event.sh.j2 b/roles/update/templates/update_event.sh.j2 new file mode 100644 index 0000000000..122320f720 --- /dev/null +++ b/roles/update/templates/update_event.sh.j2 @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# To prevent date sorting issues with OpenShift logs which only have +# second precision, we pause for one second to ensure our entry +# follows OpenShift log events. +sleep 1 + +CURRENT_EVENT=( "$@" ) + +if [ -z "${CURRENT_EVENT}" ]; then + echo "Please provide a event" + CURRENT_EVENT="UNKNOWN" +fi + +UPDATE_EVENT_FILE="{{ cifmw_update_artifacts_basedir }}/current_update_event.log" +UPDATE_EVENT_LOCK_FILE="{{ cifmw_update_artifacts_basedir }}/current_update_event.lock" + +# Use flock to safely append the update event +( + flock -x 200 + echo "$(date --iso-8601=n)|${CURRENT_EVENT[@]}" >> "${UPDATE_EVENT_FILE}" +) 200>"${UPDATE_EVENT_LOCK_FILE}" + +# Again, to ensure our entry precedes OpenShift log events. +sleep 1 From 26ccf80933595ccfbde22e2143b50320a8b0c156 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 5 Jun 2025 11:04:45 +0200 Subject: [PATCH 154/480] Print /etc/redhat-release after setting subscription or enabling repos Sometimes the CI jobs can not find packages that normally are available in the repository for the RHEL release that the image is started in the CI, but the packages are not available in newer version. Print the version to make sure that we are using proper version. Signed-off-by: Daniel Pawlik --- .../tasks/login_registries.yml | 32 ++++++++++++------- .../tasks/prepare_overcloud.yml | 9 ++++++ .../tasks/prepare_undercloud.yml | 8 +++++ roles/ci_setup/tasks/repos.yml | 9 ++++++ 4 files changed, 47 insertions(+), 11 deletions(-) diff --git a/roles/adoption_osp_deploy/tasks/login_registries.yml b/roles/adoption_osp_deploy/tasks/login_registries.yml index 4aa1c43e09..2eba3d5496 100644 --- a/roles/adoption_osp_deploy/tasks/login_registries.yml +++ b/roles/adoption_osp_deploy/tasks/login_registries.yml @@ -18,17 +18,27 @@ when: - cifmw_adoption_osp_deploy_rhsm_org is defined - cifmw_adoption_osp_deploy_rhsm_key is defined - become: true - no_log: true - community.general.redhat_subscription: - activationkey: "{{ cifmw_adoption_osp_deploy_rhsm_key }}" - org_id: "{{ cifmw_adoption_osp_deploy_rhsm_org }}" - force_register: true - state: present - retries: 5 - delay: 30 - register: _rh_result - until: not _rh_result.failed + block: + - name: Make redhat subscription + become: true + no_log: true + community.general.redhat_subscription: + activationkey: "{{ cifmw_adoption_osp_deploy_rhsm_key }}" + org_id: "{{ cifmw_adoption_osp_deploy_rhsm_org }}" + force_register: true + state: present + retries: 5 + delay: 30 + register: _rh_result + until: not _rh_result.failed + + - name: Get current /etc/redhat-release + ansible.builtin.command: cat /etc/redhat-release + register: _current_rh_release + + - name: Print current /etc/redhat-release + ansible.builtin.debug: + msg: "{{ _current_rh_release.stdout }}" - name: Login in container registry when: diff --git a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml index 96b7faccae..6902c4260e 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml @@ -121,6 +121,15 @@ loop_var: _vm pause: 1 + - name: Get current /etc/redhat-release + delegate_to: "{{ _vm }}" + ansible.builtin.command: cat /etc/redhat-release + register: _current_rh_release + + - name: Print current /etc/redhat-release + ansible.builtin.debug: + msg: "{{ _current_rh_release.stdout }}" + - name: Copy network data file if it's not a template when: _network_data_extension != '.j2' delegate_to: "osp-undercloud-0" diff --git a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml index bf5871a059..8e49301122 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml @@ -29,6 +29,14 @@ name: "{{ cifmw_adoption_osp_deploy_repos }}" state: enabled + - name: Get current /etc/redhat-release + ansible.builtin.command: cat /etc/redhat-release + register: _current_rh_release + + - name: Print current /etc/redhat-release + ansible.builtin.debug: + msg: "{{ _current_rh_release.stdout }}" + - name: Install director packages become: true ansible.builtin.dnf: diff --git a/roles/ci_setup/tasks/repos.yml b/roles/ci_setup/tasks/repos.yml index e18eadeda9..0b4830760c 100644 --- a/roles/ci_setup/tasks/repos.yml +++ b/roles/ci_setup/tasks/repos.yml @@ -39,6 +39,15 @@ name: "{{ item }}" state: "{{ rhsm_repo_state | default('enabled') }}" loop: "{{ _repos }}" + + - name: Get current /etc/redhat-release + ansible.builtin.command: cat /etc/redhat-release + register: _current_rh_release + + - name: Print current /etc/redhat-release + ansible.builtin.debug: + msg: "{{ _current_rh_release.stdout }}" + rescue: - name: RHSM unavailable ansible.builtin.debug: From 9381c3dadf18beff6acf469574de32a5674d8b69 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Thu, 15 May 2025 10:22:40 +0530 Subject: [PATCH 155/480] [repo_setup] Add cifmw_repo_setup_venv var Currently repo-setup gets installed with {{ cifmw_repo_setup_basedir }}/venv. This venv is shared with other roles. Dependencies coming from different repo causes issue with repo-setup. Currently ansible-core deps installed via ci-framework gets used in repo-setup causes following issue: ``` HTTPSConnection.__init__() got an unexpected keyword argument 'cert_file'. Failed to create HashInfo object ``` Updating ansible-core will fix the issue but it may cause issue with other ci-framework roles. In order to avoid that, we are adding a seperate var to create seperate venv to keep repo-setup dependency seperate and avoid issues. Jira: OSPRH-16694 Signed-off-by: Chandan Kumar (raukadah) --- roles/repo_setup/README.md | 1 + roles/repo_setup/defaults/main.yml | 1 + roles/repo_setup/molecule/default/converge.yml | 3 ++- roles/repo_setup/tasks/artifacts.yml | 2 +- roles/repo_setup/tasks/cleanup.yml | 2 +- roles/repo_setup/tasks/configure.yml | 2 +- roles/repo_setup/tasks/install.yml | 7 ++++--- 7 files changed, 11 insertions(+), 7 deletions(-) diff --git a/roles/repo_setup/README.md b/roles/repo_setup/README.md index 46dc74eb86..6b7f41657f 100644 --- a/roles/repo_setup/README.md +++ b/roles/repo_setup/README.md @@ -12,6 +12,7 @@ using `cifmw_repo_setup_src` role default var. ## Parameters * `cifmw_repo_setup_basedir`: (String) Installation base directory. Defaults to `cifmw_basedir` which defaults to `~/ci-framework-data`. +* `cifmw_repo_setup_venv: (String) repo-setup virtualenv. Defaults to `{{ cifmw_repo_setup_basedir }}/venv/repo-setup`. * `cifmw_repo_setup_promotion`: (String) Promotion line you want to deploy. Defaults to `current-podified`. * `cifmw_repo_setup_branch`: (String) Branch/release you want to deploy. Defaults to `zed`. * `cifmw_repo_setup_dlrn_uri`: (String) DLRN base URI. Defaults to `https://trunk.rdoproject.org/`. diff --git a/roles/repo_setup/defaults/main.yml b/roles/repo_setup/defaults/main.yml index 4cddb08773..61d4bd8ed0 100644 --- a/roles/repo_setup/defaults/main.yml +++ b/roles/repo_setup/defaults/main.yml @@ -20,6 +20,7 @@ # To get dlrn md5 hash for components [baremetal,cinder,clients,cloudops,common, # compute,glance,manila,network,octavia,security,swift,tempest,podified,ui,validation] cifmw_repo_setup_basedir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}" +cifmw_repo_setup_venv: "{{ cifmw_repo_setup_basedir }}/venv/repo-setup" cifmw_repo_setup_promotion: "current-podified" cifmw_repo_setup_branch: "antelope" cifmw_repo_setup_dlrn_uri: "https://trunk.rdoproject.org/" diff --git a/roles/repo_setup/molecule/default/converge.yml b/roles/repo_setup/molecule/default/converge.yml index e1246c5d1f..2aa6e44038 100644 --- a/roles/repo_setup/molecule/default/converge.yml +++ b/roles/repo_setup/molecule/default/converge.yml @@ -21,6 +21,7 @@ cifmw_repo_setup_os_release: centos cifmw_repo_setup_component_name: baremetal cifmw_repo_setup_component_promotion_tag: consistent + cifmw_repo_setup_venv: "{{ cifmw_repo_setup_basedir }}/venv/repo-setup_test" roles: - role: "repo_setup" tasks: @@ -52,7 +53,7 @@ path: "{{ ansible_user_dir }}/ci-framework-data/{{ item }}" loop: - 'artifacts/repositories/delorean.repo.md5' - - 'venv' + - 'venv/repo_setup_test' - 'artifacts/repositories' - name: Assert file status ansible.builtin.assert: diff --git a/roles/repo_setup/tasks/artifacts.yml b/roles/repo_setup/tasks/artifacts.yml index 96ac16ed22..b20a05d290 100644 --- a/roles/repo_setup/tasks/artifacts.yml +++ b/roles/repo_setup/tasks/artifacts.yml @@ -2,7 +2,7 @@ - name: Run repo-setup-get-hash ansible.builtin.command: cmd: >- - {{ cifmw_repo_setup_basedir }}/venv/bin/repo-setup-get-hash + {{ cifmw_repo_setup_venv }}/bin/repo-setup-get-hash --dlrn-url {{ cifmw_repo_setup_dlrn_uri[:-1] }} --os-version {{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }} --release {{ cifmw_repo_setup_branch }} diff --git a/roles/repo_setup/tasks/cleanup.yml b/roles/repo_setup/tasks/cleanup.yml index 79b3660b44..7edb1972b0 100644 --- a/roles/repo_setup/tasks/cleanup.yml +++ b/roles/repo_setup/tasks/cleanup.yml @@ -16,7 +16,7 @@ - name: Remove virtualenv ansible.builtin.file: - path: "{{ cifmw_repo_setup_basedir }}/venv" + path: "{{ cifmw_repo_setup_venv }}" state: absent - name: Remove repositories diff --git a/roles/repo_setup/tasks/configure.yml b/roles/repo_setup/tasks/configure.yml index 9bd9936da5..a88c66796e 100644 --- a/roles/repo_setup/tasks/configure.yml +++ b/roles/repo_setup/tasks/configure.yml @@ -11,7 +11,7 @@ become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" ansible.builtin.command: cmd: >- - {{ cifmw_repo_setup_basedir }}/venv/bin/repo-setup + {{ cifmw_repo_setup_venv }}/bin/repo-setup {{ cifmw_repo_setup_promotion }} {{ cifmw_repo_setup_additional_repos }} -d {{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }} -b {{ cifmw_repo_setup_branch }} diff --git a/roles/repo_setup/tasks/install.yml b/roles/repo_setup/tasks/install.yml index e384f2df85..e47676adeb 100644 --- a/roles/repo_setup/tasks/install.yml +++ b/roles/repo_setup/tasks/install.yml @@ -7,6 +7,7 @@ loop: - tmp - artifacts/repositories + - venv/repo_setup - name: Make sure git-core package is installed become: true @@ -25,12 +26,12 @@ - name: Initialize python venv and install requirements ansible.builtin.pip: - virtualenv: "{{ cifmw_repo_setup_basedir }}/venv" + virtualenv: "{{ cifmw_repo_setup_venv }}" requirements: "{{ cifmw_repo_setup_basedir }}/tmp/repo-setup/requirements.txt" virtualenv_command: "python3 -m venv --system-site-packages --upgrade-deps" - name: Install repo-setup package ansible.builtin.command: - cmd: "{{ cifmw_repo_setup_basedir }}/venv/bin/python setup.py install" + cmd: "{{ cifmw_repo_setup_venv }}/bin/python setup.py install" chdir: "{{ cifmw_repo_setup_basedir }}/tmp/repo-setup" - creates: "{{ cifmw_repo_setup_basedir }}/venv/bin/repo-setup" + creates: "{{ cifmw_repo_setup_venv }}/bin/repo-setup" From b0fbfb38a275626fc8f54e8fd8f409de146936a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Ciecierski?= Date: Fri, 6 Jun 2025 12:14:56 +0200 Subject: [PATCH 156/480] Rename _network_data in create_cloud_init_iso to _network_data_config_drive There is conflict of variable names. _network_data is already defined in[1] and exposed using set_fact. Variable for config_drive should be named diffrently to not generate wrong network-configs for vms. [1] https://github.com/openstack-k8s-operators/ci-framework/blob/main/roles/libvirt_manager/tasks/create_networks.yml#L185 --- roles/libvirt_manager/tasks/create_cloud_init_iso.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml index 9d57b94cf2..b05fc30fa5 100644 --- a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml +++ b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml @@ -50,12 +50,12 @@ when: - vm_data.networkconfig | type_debug == "dict" ansible.builtin.set_fact: - _network_data: "{{ vm_data.networkconfig }}" + _libvirt_manager_network_data: "{{ vm_data.networkconfig }}" - name: "Define the network config for each vm" when: - vm_data.networkconfig | type_debug == "list" ansible.builtin.set_fact: - _network_data: "{{ vm_data.networkconfig[vm_idx] }}" + _libvirt_manager_network_data: "{{ vm_data.networkconfig[vm_idx] }}" - name: "Call the config_drive role" vars: @@ -63,7 +63,7 @@ _default_uuid: "{{ 99999999 | random(seed=vm) | to_uuid | lower }}" cifmw_config_drive_uuid: "{{ _uuid.stdout | default(_default_uuid) | trim}}" cifmw_config_drive_hostname: "{{ vm }}" - cifmw_config_drive_networkconfig: "{{ _network_data | default(None) }}" + cifmw_config_drive_networkconfig: "{{ _libvirt_manager_network_data | default(None) }}" cifmw_config_drive_userdata: "{{ _user_data }}" ansible.builtin.include_role: name: config_drive From f7f8c57d09b976994f1af38b0b0cc69627db7940 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 9 Jun 2025 10:39:10 +0530 Subject: [PATCH 157/480] Temporary override keystone config in job Until [1] get's included in RDO antelope and promoted temporary patch to workaround the issue. [1] https://review.opendev.org/c/openstack/oslo.cache/+/952014 Related-Issue: #OSPCIX-901 --- zuul.d/tempest_multinode.yaml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/zuul.d/tempest_multinode.yaml b/zuul.d/tempest_multinode.yaml index 0d76851d4a..3375a82f94 100644 --- a/zuul.d/tempest_multinode.yaml +++ b/zuul.d/tempest_multinode.yaml @@ -72,6 +72,26 @@ - '@scenarios/centos-9/ci.yml' - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/ceph_backends.yml' + # Temporary until https://review.opendev.org/c/openstack/oslo.cache/+/952014 + # is included in rdo and promoted + cifmw_edpm_prepare_kustomizations: + - apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: openstack + patches: + - patch: |- + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: unused + spec: + keystone: + template: + customServiceConfig: | + [cache] + memcache_sasl_enabled = true + target: + kind: OpenStackControlPlane cifmw_test_operator_tempest_include_list: | ^tempest.api.identity.*.v3 ^tempest.api.volume From afcf42b8786995235b4d3849130d18f35a164a41 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 9 Jun 2025 10:22:39 +0530 Subject: [PATCH 158/480] Fix pre_tests not running This patch fix the issue created by the recent patch https://github.com/openstack-k8s-operators/ci-framework/pull/3027 Now, it will run pre_tests without checking cifmw_run_tests var, just like the older playbook way. --- deploy-edpm.yml | 16 +++------------- roles/cifmw_setup/tasks/run_tests.yml | 2 ++ update-edpm.yml | 5 ----- 3 files changed, 5 insertions(+), 18 deletions(-) diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 0df27ec378..a479c9a7af 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -80,7 +80,7 @@ tags: - edpm -- name: Post-deployment admin setup steps +- name: Run Post-deployment admin setup steps, test, and compliance scan hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: @@ -91,10 +91,6 @@ tags: - admin-setup -- name: Run cifmw_setup run_tests.yml - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - tasks: - name: Run Test vars: pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" @@ -102,15 +98,9 @@ ansible.builtin.import_role: name: cifmw_setup tasks_from: run_tests.yml - when: - - cifmw_run_tests | default('false') | bool tags: - run-tests -- name: Run operators compliance scans - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - tasks: - name: Run compliance scan for controllers ansible.builtin.import_role: name: compliance @@ -118,8 +108,8 @@ cifmw_compliance_podman_username: "{{ cifmw_registry_token.credentials.username }}" cifmw_compliance_podman_password: "{{ cifmw_registry_token.credentials.password }}" when: cifmw_run_operators_compliance_scans | default('false') | bool - tags: - - compliance + tags: + - compliance - name: Run compliance scan for computes hosts: "{{ groups['computes'] | default ([]) }}" diff --git a/roles/cifmw_setup/tasks/run_tests.yml b/roles/cifmw_setup/tasks/run_tests.yml index af72ec08ab..2a19bf3fc0 100644 --- a/roles/cifmw_setup/tasks/run_tests.yml +++ b/roles/cifmw_setup/tasks/run_tests.yml @@ -10,9 +10,11 @@ - tests ansible.builtin.import_role: name: "{{ cifmw_run_test_role | default('tempest') }}" + when: cifmw_run_tests | default('false') | bool - name: Run post_tests hooks vars: step: post_tests ansible.builtin.import_role: name: run_hook + when: cifmw_run_tests | default('false') | bool diff --git a/update-edpm.yml b/update-edpm.yml index 5ef1421ef9..cad55172d2 100644 --- a/update-edpm.yml +++ b/update-edpm.yml @@ -33,14 +33,9 @@ ansible.builtin.import_role: name: cifmw_setup tasks_from: run_tests.yml - when: - - cifmw_run_tests | default('false') | bool tags: - run-tests -- name: Inject status flag - hosts: "{{ cifmw_target_host | default('localhost') }}" - tasks: - name: Inject success flag ansible.builtin.file: path: "{{ ansible_user_dir }}/cifmw-success" From a131a7400a4ee7ea0de5aaaba1323e91721f869d Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 9 Jun 2025 12:09:04 +0200 Subject: [PATCH 159/480] Remove printing redhat-release in adoption_osp_deploy The task should iterate on list of hosts, which should be provided later as "_vm" variable. That was missing, so the Ansible was failing because of undefined variable. Printing the redhat-release should be added into pre-run config after registration, but in this kind of CI jobs we did not spotted any issues now. Signed-off-by: Daniel Pawlik --- roles/adoption_osp_deploy/tasks/prepare_overcloud.yml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml index 6902c4260e..96b7faccae 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml @@ -121,15 +121,6 @@ loop_var: _vm pause: 1 - - name: Get current /etc/redhat-release - delegate_to: "{{ _vm }}" - ansible.builtin.command: cat /etc/redhat-release - register: _current_rh_release - - - name: Print current /etc/redhat-release - ansible.builtin.debug: - msg: "{{ _current_rh_release.stdout }}" - - name: Copy network data file if it's not a template when: _network_data_extension != '.j2' delegate_to: "osp-undercloud-0" From f81801039769671cf761a3f0acd8c86cd2988833 Mon Sep 17 00:00:00 2001 From: Fiorella Yanac Date: Tue, 3 Jun 2025 17:29:15 +0100 Subject: [PATCH 160/480] Add nat64 appliance for adoption --- create-infra.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/create-infra.yml b/create-infra.yml index 6d7e8b4149..6aac66b364 100644 --- a/create-infra.yml +++ b/create-infra.yml @@ -103,6 +103,14 @@ apply: delegate_to: "{{ cifmw_target_host | default('localhost') }}" + - name: Bootstrap nat64 if needed + when: + - cifmw_use_libvirt | default(false) | bool + - cifmw_use_nat64 | default(false) | bool + ansible.builtin.include_role: + name: reproducer + tasks_from: nat64_appliance + # This bootstraps the controller-0 node, and RedFish virtual BMC is # spawned if cifmw_use_sushy_emulator is enabled. - name: Bootstrap sushy-emulator (RedFish Virtual BMC) on controller-0 From ededd7105c20be51b1905d4e900539034ac87ddb Mon Sep 17 00:00:00 2001 From: bshewale Date: Mon, 9 Jun 2025 10:59:39 +0530 Subject: [PATCH 161/480] Sync the src dir with all hosts in inventory in reproducer Some files are required on the instance spawned by the reproducer. Co-Authored-By: Daniel Pawlik --- roles/reproducer/tasks/configure_controller.yml | 9 +++++++++ roles/reproducer/tasks/libvirt_layout.yml | 6 ------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 305b8ad9ef..663aa60669 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -350,6 +350,15 @@ name: sushy_emulator tasks_from: verify.yml + - name: Sync local repositories to other hosts + delegate_to: localhost + ansible.posix.synchronize: + src: "{{ cifmw_reproducer_src_dir }}/" + dest: "zuul@{{ item }}:{{ cifmw_reproducer_src_dir }}" + archive: true + recursive: true + loop: "{{ groups['computes'] + groups['controllers'] }}" + # NOTE: src dir is synchronized in libvirt_layout.yml - name: Install ansible dependencies register: _async_dep_install diff --git a/roles/reproducer/tasks/libvirt_layout.yml b/roles/reproducer/tasks/libvirt_layout.yml index c9170dd6ac..56da59c775 100644 --- a/roles/reproducer/tasks/libvirt_layout.yml +++ b/roles/reproducer/tasks/libvirt_layout.yml @@ -48,12 +48,6 @@ rsync -r {{ cifmw_reproducer_basedir }}/reproducer-inventory/ zuul@controller-0:reproducer-inventory - - name: Push src dir to controller-0 - ansible.builtin.command: # noqa: command-instead-of-module - cmd: >- - rsync -r {{ cifmw_reproducer_src_dir }}/ - zuul@controller-0:src - - name: Run post tasks in OCP cluster case when: - _use_ocp | bool From a9203ec413fc5cbdab48f4110b5fb055bca29b66 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 28 Mar 2025 15:57:52 +0530 Subject: [PATCH 162/480] [ci bootstrap] Do not add external gateway to router For clouds where instances directly attached to public network, having a router with default gateway is unnecessary as secondary network is mostly used for internal traffic. This will save some external IPs i.e 1 per job. This needs to be set only when the test vm needs to have external connectivity which is not needed in most cases. Depends-On: https://review.rdoproject.org/r/c/config/+/57579 Related-Issue: OSPCIX-771 --- zuul.d/adoption.yaml | 4 ++-- zuul.d/base.yaml | 2 +- zuul.d/edpm_multinode.yaml | 8 ++++---- zuul.d/kuttl_multinode.yaml | 2 +- zuul.d/podified_multinode.yaml | 2 +- zuul.d/tempest_multinode.yaml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index ef537ac056..f9e7aa10da 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -47,7 +47,7 @@ networks: default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" range: 192.168.122.0/24 internal-api: vlan: 20 @@ -221,7 +221,7 @@ networks: &multinode_networks default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" range: 192.168.122.0/24 internal-api: vlan: 20 diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 25e92d2fc8..2f4b8baced 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -165,7 +165,7 @@ networks: default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" range: 192.168.122.0/24 internal-api: vlan: 20 diff --git a/zuul.d/edpm_multinode.yaml b/zuul.d/edpm_multinode.yaml index 055bde916e..ded4fae462 100644 --- a/zuul.d/edpm_multinode.yaml +++ b/zuul.d/edpm_multinode.yaml @@ -12,7 +12,7 @@ networks: default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" range: 192.168.122.0/24 internal-api: vlan: 20 @@ -75,7 +75,7 @@ networks: default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" range: 192.168.122.0/24 internal-api: vlan: 20 @@ -152,7 +152,7 @@ networks: default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" range: 192.168.122.0/24 internal-api: vlan: 20 @@ -244,7 +244,7 @@ networks: default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" range: 192.168.122.0/24 internal-api: vlan: 20 diff --git a/zuul.d/kuttl_multinode.yaml b/zuul.d/kuttl_multinode.yaml index e45498d190..4c86308c40 100644 --- a/zuul.d/kuttl_multinode.yaml +++ b/zuul.d/kuttl_multinode.yaml @@ -14,7 +14,7 @@ default: range: 192.168.122.0/24 mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" router: false internal-api: vlan: 20 diff --git a/zuul.d/podified_multinode.yaml b/zuul.d/podified_multinode.yaml index 22458538a5..da4a3d5b75 100644 --- a/zuul.d/podified_multinode.yaml +++ b/zuul.d/podified_multinode.yaml @@ -22,7 +22,7 @@ default: range: 192.168.122.0/24 mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" internal-api: vlan: 20 range: 172.17.0.0/24 diff --git a/zuul.d/tempest_multinode.yaml b/zuul.d/tempest_multinode.yaml index 3375a82f94..48a1561445 100644 --- a/zuul.d/tempest_multinode.yaml +++ b/zuul.d/tempest_multinode.yaml @@ -22,7 +22,7 @@ default: range: 192.168.122.0/24 mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" - router_net: "{{ ('ibm' in nodepool.cloud) | ternary('hostonly', 'public') }}" + router_net: "" internal-api: vlan: 20 range: 172.17.0.0/24 From 6c6a85a33dc9404be1c45a27836c42634bde2a34 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Wed, 28 May 2025 13:08:19 +0300 Subject: [PATCH 163/480] Enable cleanup for test operator This patch is meant to allow independent cleanup of resources created by the test operator. - Use files to manage CRs - Add cleanup tasks to remove CRs and resources created by test operator - Reduce redundant evaluations in test_operator role --- clean_openstack_deployment.yaml | 5 + roles/test_operator/README.md | 18 ++ roles/test_operator/defaults/main.yml | 16 + roles/test_operator/tasks/cleanup-run.yaml | 35 +++ roles/test_operator/tasks/cleanup.yaml | 24 ++ roles/test_operator/tasks/collect-logs.yaml | 81 +++++ .../tasks/run-test-operator-job.yml | 288 +++++------------- 7 files changed, 260 insertions(+), 207 deletions(-) create mode 100644 roles/test_operator/tasks/cleanup-run.yaml create mode 100644 roles/test_operator/tasks/cleanup.yaml create mode 100644 roles/test_operator/tasks/collect-logs.yaml diff --git a/clean_openstack_deployment.yaml b/clean_openstack_deployment.yaml index 1bbc61b03b..9d7667e501 100644 --- a/clean_openstack_deployment.yaml +++ b/clean_openstack_deployment.yaml @@ -1,6 +1,11 @@ - name: Clean OpenStack deployment hosts: "{{ target_host | default('localhost') }}" tasks: + - name: Clean up testing resources + ansible.builtin.include_role: + name: test_operator + tasks_from: cleanup + - name: Clean up OpenStack operators vars: cifmw_kustomize_deploy_keep_generated_crs: false diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 0f648e218b..0dfb4eb651 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -13,6 +13,24 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. NOTE: This parameter is deprecated, please use `cifmw_test_operator_tempest_concurrency` instead. Default value: `8` * `cifmw_test_operator_cleanup`: (Bool) Delete all resources created by the role at the end of the testing. Default value: `false` * `cifmw_test_operator_tempest_cleanup`: (Bool) Run tempest cleanup after test execution (tempest run) to delete any resources created by tempest that may have been left out. +* `cifmw_test_operator_crs_path`: (String) The path into which the tests CRs file will be created in. Default value: `{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/test-operator-crs` +* `cifmw_test_operator_log_pod_definition`: (Object) The CR definition template for creating the test log pod. Default value: +``` + apiVersion: v1 + kind: Pod + metadata: + name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}" + namespace: "{{ cifmw_test_operator_namespace }}" + spec: + containers: + - name: test-operator-logs-container + image: "{{ cifmw_test_operator_logs_image }}" + command: ["sleep"] + args: ["infinity"] + volumeMounts: "{{ volume_mounts }}" + volumes: "{{ volumes }}" + tolerations: "{{ cifmw_test_operator_tolerations | default(omit) }}" +``` * `cifmw_test_operator_default_groups`: (List) List of groups in the include list to search for tests to be executed. Default value: `[ 'default' ]` * `cifmw_test_operator_default_jobs`: (List) List of jobs in the exclude list to search for tests to be excluded. Default value: `[ 'default' ]` * `cifmw_test_operator_dry_run`: (Boolean) Whether test-operator should run or not. Default value: `false` diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index fa66b163fb..586e96fcf6 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -42,6 +42,22 @@ cifmw_test_operator_storage_class: "{{ cifmw_test_operator_storage_class_prefix cifmw_test_operator_delete_logs_pod: false cifmw_test_operator_privileged: true cifmw_test_operator_selinux_level: "s0:c478,c978" +cifmw_test_operator_crs_path: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/test-operator-crs" +cifmw_test_operator_log_pod_definition: + apiVersion: v1 + kind: Pod + metadata: + name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}" + namespace: "{{ cifmw_test_operator_namespace }}" + spec: + containers: + - name: test-operator-logs-container + image: "{{ cifmw_test_operator_logs_image }}" + command: ["sleep"] + args: ["infinity"] + volumeMounts: "{{ volume_mounts }}" + volumes: "{{ volumes }}" + tolerations: "{{ cifmw_test_operator_tolerations | default(omit) }}" # default test framework registry, namespace and tag can be overridden per test framework (tempest, tobiko, horizontest and ansibletest) cifmw_test_operator_default_registry: quay.io cifmw_test_operator_default_namespace: podified-antelope-centos9 diff --git a/roles/test_operator/tasks/cleanup-run.yaml b/roles/test_operator/tasks/cleanup-run.yaml new file mode 100644 index 0000000000..f3f8c2e5e9 --- /dev/null +++ b/roles/test_operator/tasks/cleanup-run.yaml @@ -0,0 +1,35 @@ +- name: Delete {{ run_test_fw }} + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + state: absent + src: "{{ cifmw_test_operator_crs_path }}/{{ test_operator_instance_name }}.yaml" + wait: true + wait_timeout: 600 + +- name: Delete CRD for {{ run_test_fw }} + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + kind: CustomResourceDefinition + state: absent + api_version: v1 + name: "{{ test_operator_crd_name }}" + namespace: "{{ cifmw_test_operator_namespace }}" + wait: true + wait_timeout: 600 + +- name: Delete test-operator-logs-pod + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + state: absent + api_version: v1 + src: "{{ cifmw_test_operator_crs_path }}/{{ test_operator_instance_name }}-log-pod.yaml" + wait: true + wait_timeout: 600 + when: + - cifmw_test_operator_delete_logs_pod | bool or cifmw_test_operator_cleanup | bool diff --git a/roles/test_operator/tasks/cleanup.yaml b/roles/test_operator/tasks/cleanup.yaml new file mode 100644 index 0000000000..6437f55f0c --- /dev/null +++ b/roles/test_operator/tasks/cleanup.yaml @@ -0,0 +1,24 @@ +--- +- name: List all CR files in the test operator CRs path + ansible.builtin.find: + paths: "{{ cifmw_test_operator_crs_path }}" + patterns: "*.yaml" + register: test_operator_cr_files + +- name: Delete all CRs in OCP + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + state: absent + src: "{{ item.path }}" + wait: true + wait_timeout: 600 + loop: "{{ test_operator_cr_files.files }}" + failed_when: false + +- name: Delete test operator CRs files + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ test_operator_cr_files.files }}" diff --git a/roles/test_operator/tasks/collect-logs.yaml b/roles/test_operator/tasks/collect-logs.yaml new file mode 100644 index 0000000000..34d7413475 --- /dev/null +++ b/roles/test_operator/tasks/collect-logs.yaml @@ -0,0 +1,81 @@ +- name: Reset volumes and volume_mounts to an empty list + ansible.builtin.set_fact: + volumes: [] + volume_mounts: [] + +- name: Get information about PVCs that store the logs + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + namespace: "{{ cifmw_test_operator_namespace }}" + kind: PersistentVolumeClaim + label_selectors: + - "instanceName={{ test_operator_instance_name }}" + register: logsPVCs + +- name: Set up volume mounts and volumes for all PVCs + ansible.builtin.set_fact: + volume_mounts: > + {{ + (volume_mounts | default([])) + [{ + 'name': "logs-volume-" ~ index, + 'mountPath': "/mnt/logs-{{ test_operator_instance_name }}-step-" ~ index + }] + }} + volumes: > + {{ + (volumes | default([])) + [{ + 'name': "logs-volume-" ~ index, + 'persistentVolumeClaim': { + 'claimName': pvc.metadata.name + } + }] + }} + loop: "{{ logsPVCs.resources }}" + loop_control: + loop_var: pvc + index_var: index + +- name: Write log pod definition to file + ansible.builtin.copy: + content: "{{ cifmw_test_operator_log_pod_definition }}" + dest: "{{ cifmw_test_operator_crs_path }}/{{ test_operator_instance_name }}-log-pod.yaml" + mode: '0644' + +- name: Start test-operator-logs-pod + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + state: present + wait: true + src: "{{ cifmw_test_operator_crs_path }}/{{ test_operator_instance_name }}-log-pod.yaml" + +- name: Ensure that the test-operator-logs-pod is Running + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ cifmw_test_operator_namespace }}" + kind: Pod + name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}" + wait: true + register: logs_pod + until: logs_pod.resources[0].status.phase == "Running" + delay: 10 + retries: 20 + +- name: Get logs from test-operator-logs-pod + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars: + pod_path: mnt/logs-{{ test_operator_instance_name }}-step-{{ index }} + ansible.builtin.shell: > + oc cp -n {{ cifmw_test_operator_namespace }} + test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}:{{ pod_path }} + {{ cifmw_test_operator_artifacts_basedir }} + loop: "{{ logsPVCs.resources }}" + loop_control: + index_var: index diff --git a/roles/test_operator/tasks/run-test-operator-job.yml b/roles/test_operator/tasks/run-test-operator-job.yml index 72663c54f1..adc6aca853 100644 --- a/roles/test_operator/tasks/run-test-operator-job.yml +++ b/roles/test_operator/tasks/run-test-operator-job.yml @@ -27,236 +27,110 @@ ansible.builtin.debug: msg: "{{ test_operator_cr }}" -- name: Start tests - {{ run_test_fw }} - kubernetes.core.k8s: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - api_key: "{{ cifmw_openshift_token | default(omit)}}" - context: "{{ cifmw_openshift_context | default(omit)}}" - state: present - wait: true - definition: "{{ test_operator_cr }}" +- name: Not dry run block when: not cifmw_test_operator_dry_run | bool - -- name: Wait for the last Pod to be Completed - {{ run_test_fw }} - kubernetes.core.k8s_info: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - api_key: "{{ cifmw_openshift_token | default(omit) }}" - context: "{{ cifmw_openshift_context | default(omit) }}" - namespace: "{{ cifmw_test_operator_namespace }}" - kind: Pod - label_selectors: - - "workflowStep={{ [(test_operator_workflow | length) - 1, 0] | max }}" - - "instanceName={{ test_operator_instance_name }}" - retries: "{{ (cifmw_test_operator_timeout / 10) | round | int }}" - delay: 10 - until: > - testpod.resources[0].status.phase | default(omit) == "Succeeded" or - testpod.resources[0].status.phase | default(omit) == "Failed" - ignore_errors: true - register: testpod - when: not cifmw_test_operator_dry_run | bool - -- name: Check whether timed out - {{ run_test_fw }} - ansible.builtin.set_fact: - testpod_timed_out: >- - {{ testpod.attempts == (cifmw_test_operator_timeout / 10) | round | int }} - when: not cifmw_test_operator_dry_run | bool - -- name: Collect logs - when: - - not cifmw_test_operator_dry_run | bool - - not testpod_timed_out block: - - name: Reset volumes and volume_mounts to an empty list - ansible.builtin.set_fact: - volumes: [] - volume_mounts: [] - - - name: Get information about PVCs that store the logs - kubernetes.core.k8s_info: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - api_key: "{{ cifmw_openshift_token | default(omit)}}" - context: "{{ cifmw_openshift_context | default(omit)}}" - namespace: "{{ cifmw_test_operator_namespace }}" - kind: PersistentVolumeClaim - label_selectors: - - "instanceName={{ test_operator_instance_name }}" - register: logsPVCs - - - name: Set up volume mounts and volumes for all PVCs - ansible.builtin.set_fact: - volume_mounts: > - {{ - (volume_mounts | default([])) + [{ - 'name': "logs-volume-" ~ index, - 'mountPath': "/mnt/logs-{{ test_operator_instance_name }}-step-" ~ index - }] - }} - volumes: > - {{ - (volumes | default([])) + [{ - 'name': "logs-volume-" ~ index, - 'persistentVolumeClaim': { - 'claimName': pvc.metadata.name - } - }] - }} - loop: "{{ logsPVCs.resources }}" - loop_control: - loop_var: pvc - index_var: index - - - name: Start test-operator-logs-pod + - name: Make sure test-operator CR directory exists + ansible.builtin.file: + path: "{{ cifmw_test_operator_crs_path }}" + state: directory + mode: '0755' + + - name: Write test-operator CR to file + ansible.builtin.copy: + content: "{{ test_operator_cr }}" + dest: "{{ cifmw_test_operator_crs_path }}/{{ test_operator_instance_name }}.yaml" + mode: '0644' + + - name: Start tests - {{ run_test_fw }} kubernetes.core.k8s: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit)}}" context: "{{ cifmw_openshift_context | default(omit)}}" state: present wait: true - definition: - apiVersion: v1 - kind: Pod - metadata: - name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}" - namespace: "{{ cifmw_test_operator_namespace }}" - spec: - containers: - - name: test-operator-logs-container - image: "{{ cifmw_test_operator_logs_image }}" - command: ["sleep"] - args: ["infinity"] - volumeMounts: "{{ volume_mounts }}" - volumes: "{{ volumes }}" - tolerations: "{{ cifmw_test_operator_tolerations | default(omit) }}" + src: "{{ cifmw_test_operator_crs_path }}/{{ test_operator_instance_name }}.yaml" - - name: Ensure that the test-operator-logs-pod is Running + - name: Wait for the last Pod to be Completed - {{ run_test_fw }} kubernetes.core.k8s_info: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit) }}" context: "{{ cifmw_openshift_context | default(omit) }}" namespace: "{{ cifmw_test_operator_namespace }}" kind: Pod - name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}" - wait: true - register: logs_pod - until: logs_pod.resources[0].status.phase == "Running" + label_selectors: + - "workflowStep={{ [(test_operator_workflow | length) - 1, 0] | max }}" + - "instanceName={{ test_operator_instance_name }}" + retries: "{{ (cifmw_test_operator_timeout / 10) | round | int }}" delay: 10 - retries: 20 + until: > + testpod.resources[0].status.phase | default(omit) == "Succeeded" or + testpod.resources[0].status.phase | default(omit) == "Failed" + ignore_errors: true + register: testpod - - name: Get logs from test-operator-logs-pod - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - vars: - pod_path: mnt/logs-{{ test_operator_instance_name }}-step-{{ index }} - ansible.builtin.shell: > - oc cp -n {{ cifmw_test_operator_namespace }} - test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}:{{ pod_path }} - {{ cifmw_test_operator_artifacts_basedir }} - loop: "{{ logsPVCs.resources }}" - loop_control: - index_var: index - -- name: Get list of all pods - kubernetes.core.k8s_info: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - api_key: "{{ cifmw_openshift_token | default(omit)}}" - context: "{{ cifmw_openshift_context | default(omit) }}" - namespace: "{{ cifmw_test_operator_namespace }}" - kind: Pod - register: pod_list - when: not cifmw_test_operator_dry_run | bool - -- name: Get test results from all test pods (Success / Fail) - register: test_pod_results - kubernetes.core.k8s_info: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - api_key: "{{ cifmw_openshift_token | default(omit) }}" - context: "{{ cifmw_openshift_context | default(omit) }}" - namespace: "{{ cifmw_test_operator_namespace }}" - kind: Pod - label_selectors: - - "instanceName={{ test_operator_instance_name }}" - when: not cifmw_test_operator_dry_run | bool - -- name: Get status from test pods - when: not cifmw_test_operator_dry_run | bool - ansible.builtin.set_fact: - pod_status: >- - {{ - test_pod_results.resources | - map(attribute='status.phase') | - list | unique - }} - -- name: Check whether test pods finished successfully - when: not cifmw_test_operator_dry_run | bool - ansible.builtin.set_fact: - successful_execution: >- - {{ - pod_status | length == 1 and - pod_status | first == 'Succeeded' - }} + - name: Check whether timed out - {{ run_test_fw }} + ansible.builtin.set_fact: + testpod_timed_out: >- + {{ testpod.attempts == (cifmw_test_operator_timeout / 10) | round | int }} -- name: Fail fast if a pod did not succeed - {{ run_test_fw }} - when: - - not cifmw_test_operator_dry_run | bool - - cifmw_test_operator_fail_fast | bool - ansible.builtin.assert: - that: successful_execution + - name: Collect logs + when: + - not testpod_timed_out + ansible.builtin.include_tasks: collect-logs.yaml -- name: Save result - {{ run_test_fw }} - when: not cifmw_test_operator_dry_run | bool - ansible.builtin.set_fact: - test_operator_results: >- - {{ - test_operator_results | default({}) | - combine({run_test_fw: successful_execution}) - }} - -- name: Delete tempest and/or tobiko pods - when: - - cifmw_test_operator_cleanup | bool - - not cifmw_test_operator_dry_run | bool - block: - - name: Delete {{ run_test_fw }} - kubernetes.core.k8s: + - name: Get list of all pods + kubernetes.core.k8s_info: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit)}}" - context: "{{ cifmw_openshift_context | default(omit)}}" - kind: "{{ test_operator_kind_name }}" - state: absent - api_version: test.openstack.org/v1beta1 - name: "{{ test_operator_instance_name }}" + context: "{{ cifmw_openshift_context | default(omit) }}" namespace: "{{ cifmw_test_operator_namespace }}" - wait: true - wait_timeout: 600 + kind: Pod + register: pod_list - - name: Delete CRD for {{ run_test_fw }} - kubernetes.core.k8s: + - name: Get test results from all test pods (Success / Fail) + register: test_pod_results + kubernetes.core.k8s_info: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - api_key: "{{ cifmw_openshift_token | default(omit)}}" - context: "{{ cifmw_openshift_context | default(omit)}}" - kind: CustomResourceDefinition - state: absent - api_version: v1 - name: "{{ test_operator_crd_name }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" namespace: "{{ cifmw_test_operator_namespace }}" - wait: true - wait_timeout: 600 + kind: Pod + label_selectors: + - "instanceName={{ test_operator_instance_name }}" + + - name: Get status from test pods + ansible.builtin.set_fact: + pod_status: >- + {{ + test_pod_results.resources | + map(attribute='status.phase') | + list | unique + }} + + - name: Check whether test pods finished successfully + ansible.builtin.set_fact: + successful_execution: >- + {{ + pod_status | length == 1 and + pod_status | first == 'Succeeded' + }} + + - name: Fail fast if a pod did not succeed - {{ run_test_fw }} + when: + - cifmw_test_operator_fail_fast | bool + ansible.builtin.assert: + that: successful_execution + + - name: Save result - {{ run_test_fw }} + ansible.builtin.set_fact: + test_operator_results: >- + {{ + test_operator_results | default({}) | + combine({run_test_fw: successful_execution}) + }} -- name: Delete test-operator-logs-pod - kubernetes.core.k8s: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - api_key: "{{ cifmw_openshift_token | default(omit)}}" - context: "{{ cifmw_openshift_context | default(omit)}}" - kind: Pod - state: absent - api_version: v1 - name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}" - namespace: "{{ cifmw_test_operator_namespace }}" - wait: true - wait_timeout: 600 - when: - - cifmw_test_operator_cleanup | bool and not cifmw_test_operator_dry_run | bool or - cifmw_test_operator_delete_logs_pod | bool + - name: Delete tempest and/or tobiko pods + when: + - cifmw_test_operator_cleanup | bool + ansible.builtin.include_tasks: cleanup-run.yaml From b121940b92f2d32a1c9a4619963084e6c09d60d1 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Tue, 10 Jun 2025 16:14:22 +0530 Subject: [PATCH 164/480] Temporary override keystone config in job v2 Until [1] get's included in RDO antelope and promoted temporary patch to workaround the issue. Previous PR[2] applied it on one job, moving it to parent job to cover other impacted jobs. [1] https://review.opendev.org/c/openstack/oslo.cache/+/952014 [2] https://github.com/openstack-k8s-operators/ci-framework/pull/3049 Related-Issue: #OSPCIX-901 --- zuul.d/edpm_multinode.yaml | 20 ++++++++++++++++++++ zuul.d/tempest_multinode.yaml | 20 -------------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/zuul.d/edpm_multinode.yaml b/zuul.d/edpm_multinode.yaml index ded4fae462..7efbcec1c6 100644 --- a/zuul.d/edpm_multinode.yaml +++ b/zuul.d/edpm_multinode.yaml @@ -317,6 +317,26 @@ cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' + # Temporary until https://review.opendev.org/c/openstack/oslo.cache/+/952014 + # is included in rdo and promoted + cifmw_edpm_prepare_kustomizations: + - apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: openstack + patches: + - patch: |- + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: unused + spec: + keystone: + template: + customServiceConfig: | + [cache] + memcache_sasl_enabled = true + target: + kind: OpenStackControlPlane run: - ci/playbooks/edpm/run.yml diff --git a/zuul.d/tempest_multinode.yaml b/zuul.d/tempest_multinode.yaml index 48a1561445..2d88eafc58 100644 --- a/zuul.d/tempest_multinode.yaml +++ b/zuul.d/tempest_multinode.yaml @@ -72,26 +72,6 @@ - '@scenarios/centos-9/ci.yml' - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/ceph_backends.yml' - # Temporary until https://review.opendev.org/c/openstack/oslo.cache/+/952014 - # is included in rdo and promoted - cifmw_edpm_prepare_kustomizations: - - apiVersion: kustomize.config.k8s.io/v1beta1 - kind: Kustomization - namespace: openstack - patches: - - patch: |- - apiVersion: core.openstack.org/v1beta1 - kind: OpenStackControlPlane - metadata: - name: unused - spec: - keystone: - template: - customServiceConfig: | - [cache] - memcache_sasl_enabled = true - target: - kind: OpenStackControlPlane cifmw_test_operator_tempest_include_list: | ^tempest.api.identity.*.v3 ^tempest.api.volume From c8b0cc090b3d0ff2f7d2955d2382b581cf962bfc Mon Sep 17 00:00:00 2001 From: bshewale Date: Tue, 10 Jun 2025 16:10:24 +0530 Subject: [PATCH 165/480] Sync the src dir with controllers in reproducer inventory --- roles/reproducer/tasks/configure_controller.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 663aa60669..7284a4981b 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -357,7 +357,7 @@ dest: "zuul@{{ item }}:{{ cifmw_reproducer_src_dir }}" archive: true recursive: true - loop: "{{ groups['computes'] + groups['controllers'] }}" + loop: "{{ groups['controllers'] }}" # NOTE: src dir is synchronized in libvirt_layout.yml - name: Install ansible dependencies From 16491ee07c58ecf298273aec228f91194ea3564b Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 4 Jun 2025 11:35:24 +0530 Subject: [PATCH 166/480] Use role instead of playbooks - nfs.yml Before simplifying 06-deploy-edpm.yml, it is necessary to take care of import_playbook calls within that play There are three import_playbook calls within 06-deploy-edpm.yml - validations.yml - nfs.yml - ceph.yml This PR takes care of nfs.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- playbooks/06-deploy-edpm.yml | 12 ++- playbooks/nfs.yml | 5 ++ roles/cifmw_nfs/README.md | 23 +++++ roles/cifmw_nfs/defaults/main.yml | 22 +++++ roles/cifmw_nfs/meta/main.yml | 30 +++++++ roles/cifmw_nfs/tasks/main.yml | 136 ++++++++++++++++++++++++++++++ 6 files changed, 227 insertions(+), 1 deletion(-) create mode 100644 roles/cifmw_nfs/README.md create mode 100644 roles/cifmw_nfs/defaults/main.yml create mode 100644 roles/cifmw_nfs/meta/main.yml create mode 100644 roles/cifmw_nfs/tasks/main.yml diff --git a/playbooks/06-deploy-edpm.yml b/playbooks/06-deploy-edpm.yml index 014705112c..16eb140881 100644 --- a/playbooks/06-deploy-edpm.yml +++ b/playbooks/06-deploy-edpm.yml @@ -102,7 +102,17 @@ name: edpm_deploy - name: Deploy NFS server on target nodes - ansible.builtin.import_playbook: "nfs.yml" + become: true + hosts: "{{ groups[cifmw_nfs_target | default('computes')][0] | default([]) }}" + tasks: + - name: Run cifmw_nfs role + vars: + nftables_path: /etc/nftables + nftables_conf: /etc/sysconfig/nftables.conf + when: + - cifmw_edpm_deploy_nfs | default('false') | bool + ansible.builtin.import_role: + name: cifmw_nfs - name: Clear ceph target hosts facts to force refreshing in HCI deployments hosts: "{{ cifmw_ceph_target | default('computes') }}" diff --git a/playbooks/nfs.yml b/playbooks/nfs.yml index 5d20b62b6a..72932bb8bf 100644 --- a/playbooks/nfs.yml +++ b/playbooks/nfs.yml @@ -14,6 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. +# +# NOTE: Playbook migrated to: roles/cifmw_nfs/tasks/main.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# + - name: Deploy an NFS server become: true hosts: "{{ groups[cifmw_nfs_target | default('computes')][0] | default([]) }}" diff --git a/roles/cifmw_nfs/README.md b/roles/cifmw_nfs/README.md new file mode 100644 index 0000000000..5e23f0d6bb --- /dev/null +++ b/roles/cifmw_nfs/README.md @@ -0,0 +1,23 @@ +# cifmw_nfs +This role deploys an NFS Server. + +## Privilege escalation +sudo privilege is required for this role. + +## Parameters +* `nftables_path`: path to nftables files +* `nftables_conf`: path to nftables config file + +## Examples +``` +- name: Deploy NFS server on target nodes + become: true + hosts: "{{ groups[cifmw_nfs_target | default('computes')][0] | default([]) }}" + vars: + nftables_path: /etc/nftables + nftables_conf: /etc/sysconfig/nftables.conf + when: + - cifmw_edpm_deploy_nfs | default('false') | bool + ansible.builtin.import_role: + name: cifmw_nfs +``` diff --git a/roles/cifmw_nfs/defaults/main.yml b/roles/cifmw_nfs/defaults/main.yml new file mode 100644 index 0000000000..3d1a27cb63 --- /dev/null +++ b/roles/cifmw_nfs/defaults/main.yml @@ -0,0 +1,22 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# All variables intended for modification should be placed in this file. +# All variables within this role should have a prefix of "cifmw_nfs" + +cifmw_nfs_network: "storage" +cifmw_nfs_target: "compute" diff --git a/roles/cifmw_nfs/meta/main.yml b/roles/cifmw_nfs/meta/main.yml new file mode 100644 index 0000000000..74715f7700 --- /dev/null +++ b/roles/cifmw_nfs/meta/main.yml @@ -0,0 +1,30 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +galaxy_info: + author: CI Framework + description: CI Framework Role -- cifmw_nfs + company: Red Hat + license: Apache-2.0 + min_ansible_version: "2.14" + namespace: cifmw + galaxy_tags: + - cifmw + +# List your role dependencies here, one per line. Be sure to remove the '[]' above, +# if you add dependencies to this list. +dependencies: [] diff --git a/roles/cifmw_nfs/tasks/main.yml b/roles/cifmw_nfs/tasks/main.yml new file mode 100644 index 0000000000..9fdcca9116 --- /dev/null +++ b/roles/cifmw_nfs/tasks/main.yml @@ -0,0 +1,136 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Set custom cifmw PATH reusable fact + tags: + - always + when: + - cifmw_path is not defined + ansible.builtin.set_fact: + cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" + cacheable: true + +- name: Install required packages + ansible.builtin.package: + name: + - nfs-utils + - iptables + +- name: Configure nfs to use v4 only + community.general.ini_file: + path: /etc/nfs.conf + section: nfsd + option: vers3 + value: n + backup: true + mode: "0644" + +- name: Disable NFSv3-related services + ansible.builtin.systemd_service: + name: "{{ item }}" + masked: true + loop: + - rpc-statd.service + - rpcbind.service + - rpcbind.socket + +- name: Ensure shared folder exist + ansible.builtin.file: + path: "/data/{{ item }}" + state: directory + mode: '755' + loop: "{{ cifmw_nfs_shares }}" + +- name: Set nfs network vars + delegate_to: controller + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars: + _nfs_network_name: "{{ cifmw_nfs_network }}" + _nfs_host: "{{ [groups[cifmw_nfs_target][0], ansible_domain] | select() | join('.') | default('') }}" + _ipset_namespace: "{{ cifmw_install_yamls_defaults['NAMESPACE'] | default('openstack') }}" + ansible.builtin.command: + cmd: oc get ipset {{ _nfs_host }} -n {{ _ipset_namespace }} -o jsonpath='{.status.reservations[?(@.network=="{{ _nfs_network_name }}")]}' + register: cifmw_nfs_network_out + +- name: Store nfs network vars + delegate_to: controller + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/parameters/nfs-params.yml" + content: >- + {{ + { + 'cifmw_nfs_ip': cifmw_nfs_network_out.stdout | from_json | json_query('address'), + 'cifmw_nfs_network_range': cifmw_nfs_network_out.stdout | from_json | json_query('cidr') + } | to_nice_yaml + }} + mode: "0644" + +# NOTE: This represents a workaround because there's an edpm-nftables role +# in edpm-ansible already. That role should contain the implementation +# of the firewall rules for NFS, and they should be included in the +# main edpm-rules.nft file. The following firewall config assumes that +# the EDPM node has been configured in terms of networks and firewall. +- name: Configure firewall + become: true + tags: + - nft + block: + - name: Generate nftables rules file + ansible.builtin.copy: + content: | + add rule inet filter EDPM_INPUT tcp dport 2049 accept + dest: "{{ nftables_path }}/nfs-server.nft" + mode: '0666' + + - name: Update nftables.conf and include nfs rules at the bottom + ansible.builtin.lineinfile: + path: "{{ nftables_conf }}" + line: include "{{ nftables_path }}/nfs-server.nft" + insertafter: EOF + + - name: Restart nftables service + ansible.builtin.systemd: + name: nftables + state: restarted + +- name: Configure the ip the nfs server should listen on + community.general.ini_file: + path: /etc/nfs.conf + section: nfsd + option: host + value: "{{ cifmw_nfs_network_out.stdout | from_json | json_query('address') }}" + backup: true + mode: "0644" + +- name: Enable and restart nfs-server service + ansible.builtin.systemd: + name: nfs-server + state: restarted + enabled: true + +- name: Add shares to /etc/exports + ansible.builtin.lineinfile: + path: /etc/exports + line: "/data/{{ item }} {{ cifmw_nfs_network_out.stdout | from_json | json_query('cidr') }}(rw,sync,no_root_squash)" + loop: "{{ cifmw_nfs_shares }}" + register: _export_shares + +- name: Export the shares # noqa: no-handler + when: + - _export_shares.changed + ansible.builtin.command: exportfs -a From 702e475c7a460f86ef8df6cf7d0cc2c7b5392135 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 3 Jun 2025 16:05:37 +0530 Subject: [PATCH 167/480] Use role instead of playbooks - validations.yml Before simplifying 06-deploy-edpm.yml, it is necessary to take care of import_playbook calls within that play There are three import_playbook calls within 06-deploy-edpm.yml - validations.yml - nfs.yml - ceph.yml This PR takes care of validations.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- playbooks/06-deploy-architecture.yml | 9 +++++++-- playbooks/06-deploy-edpm.yml | 13 +++++++++---- playbooks/validations.yml | 5 +++++ 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/playbooks/06-deploy-architecture.yml b/playbooks/06-deploy-architecture.yml index d87a7c3125..0d2e708a01 100644 --- a/playbooks/06-deploy-architecture.yml +++ b/playbooks/06-deploy-architecture.yml @@ -290,5 +290,10 @@ name: run_hook - name: Validations workflow - ansible.builtin.import_playbook: validations.yml - when: cifmw_execute_validations | default('false') | bool + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run validations + ansible.builtin.include_role: + name: validations + when: cifmw_execute_validations | default('false') | bool diff --git a/playbooks/06-deploy-edpm.yml b/playbooks/06-deploy-edpm.yml index 16eb140881..9a691c162d 100644 --- a/playbooks/06-deploy-edpm.yml +++ b/playbooks/06-deploy-edpm.yml @@ -173,7 +173,12 @@ - name: Validations workflow # If we're doing an architecture deployment, we need to skip validations here. # Instead, they will be executed in the 06-deploy-architecture.yml playbook. - when: - - cifmw_architecture_scenario is not defined - - cifmw_execute_validations | default('false') | bool - ansible.builtin.import_playbook: validations.yml + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run validations + ansible.builtin.include_role: + name: validations + when: + - cifmw_architecture_scenario is not defined + - cifmw_execute_validations | default('false') | bool diff --git a/playbooks/validations.yml b/playbooks/validations.yml index 9de115bf77..9c65cfbf97 100644 --- a/playbooks/validations.yml +++ b/playbooks/validations.yml @@ -1,3 +1,8 @@ +# +# NOTE: Playbook migrated to: 06-deploy-edpm.yml & 06-deploy-architecture.yml. +# This migration is temporary, and will be further migrated to role. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Execute the validations role hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false From 002a16f199083aa6bac6cda778b34b26c4ea4e12 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Tue, 10 Jun 2025 15:43:53 +0300 Subject: [PATCH 168/480] Add ansible cache dir to git ignore as suggested in the ansible project and RH vscode ansible plugin adding .ansible/ to git ignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 27e66f9c45..fa625530bd 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ docs/dictionary/tmp .venv/* .env .idea/ +.ansible/ From 109056b50aeff88f416b5ad4e4c00f72648ce4c7 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Thu, 29 May 2025 18:06:43 +0300 Subject: [PATCH 169/480] Allow bmh cleanup and move cleanup to role - Allow cleanup of baremetal hosts (BMH) in OpenStack deployments. (required due to bug/change in OCP 4.18) - Move cleanup tasks to a dedicated Ansible role for better organization and reusability. - reduce cleanup time by deataching BMHs from the cluster before removing them. --- clean_openstack_deployment.yaml | 21 +--- docs/dictionary/en-custom.txt | 1 + roles/cleanup_openstack/README.md | 11 ++ roles/cleanup_openstack/defaults/main.yaml | 1 + .../cleanup_openstack/tasks/cleanup_crs.yaml | 31 +++++ roles/cleanup_openstack/tasks/detach_bmh.yaml | 42 +++++++ roles/cleanup_openstack/tasks/main.yaml | 109 ++++++++++++++++++ 7 files changed, 197 insertions(+), 19 deletions(-) create mode 100644 roles/cleanup_openstack/README.md create mode 100644 roles/cleanup_openstack/defaults/main.yaml create mode 100644 roles/cleanup_openstack/tasks/cleanup_crs.yaml create mode 100644 roles/cleanup_openstack/tasks/detach_bmh.yaml create mode 100644 roles/cleanup_openstack/tasks/main.yaml diff --git a/clean_openstack_deployment.yaml b/clean_openstack_deployment.yaml index 9d7667e501..1ae7570d52 100644 --- a/clean_openstack_deployment.yaml +++ b/clean_openstack_deployment.yaml @@ -1,23 +1,6 @@ - name: Clean OpenStack deployment hosts: "{{ target_host | default('localhost') }}" tasks: - - name: Clean up testing resources + - name: Cleanup openstack deployment ansible.builtin.include_role: - name: test_operator - tasks_from: cleanup - - - name: Clean up OpenStack operators - vars: - cifmw_kustomize_deploy_keep_generated_crs: false - ansible.builtin.include_role: - name: kustomize_deploy - tasks_from: cleanup - - - name: Remove logs and tests directories - ansible.builtin.file: - path: "{{ item }}" - state: absent - loop: - - "/home/zuul/ci-framework-data/logs" - - "/home/zuul/ci-framework-data/tests" - become: true + name: cleanup_openstack diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index f19c53b9dd..c826798514 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -131,6 +131,7 @@ ddthh deepscrub delorean deployer +deprovision deps dest dev diff --git a/roles/cleanup_openstack/README.md b/roles/cleanup_openstack/README.md new file mode 100644 index 0000000000..c1fef01b85 --- /dev/null +++ b/roles/cleanup_openstack/README.md @@ -0,0 +1,11 @@ +# cleanup_openstack + +Cleans up openstack resources created by CIFMW by deleting CRs + +## Privilege escalation +None + +## Parameters +As this role is for cleanup it utilizes default vars from other roles which can be referenced at their role readme page: kustomize_deploy, deploy_bmh + +* `cifmw_cleanup_openstack_detach_bmh`: (Boolean) Detach BMH when cleaning flag, this is used to avoid deprovision when is not required. Default: `true` diff --git a/roles/cleanup_openstack/defaults/main.yaml b/roles/cleanup_openstack/defaults/main.yaml new file mode 100644 index 0000000000..1f6654fe5d --- /dev/null +++ b/roles/cleanup_openstack/defaults/main.yaml @@ -0,0 +1 @@ +cifmw_cleanup_openstack_detach_bmh: true diff --git a/roles/cleanup_openstack/tasks/cleanup_crs.yaml b/roles/cleanup_openstack/tasks/cleanup_crs.yaml new file mode 100644 index 0000000000..d6e7bdb5cd --- /dev/null +++ b/roles/cleanup_openstack/tasks/cleanup_crs.yaml @@ -0,0 +1,31 @@ +--- +- name: Ensure that kustomization files are present + ansible.builtin.stat: + path: "{{ item }}" + loop: "{{ _crs_to_delete }}" + register: _crs_to_delete_files + +- name: Cleaning operators resources + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + state: absent + src: "{{ item.stat.path }}" + wait: true + wait_timeout: 600 + loop: "{{ _crs_to_delete_files.results }}" + register: _cleanup_results + until: "_cleanup_results is success" + retries: 3 + delay: 120 + when: + - item.stat.exists + +- name: Cleanup generated CRs if requested + ansible.builtin.file: + path: "{{ item.stat.path }}" + state: absent + loop: "{{ _crs_to_delete_files.results }}" + when: + - item.stat.exists diff --git a/roles/cleanup_openstack/tasks/detach_bmh.yaml b/roles/cleanup_openstack/tasks/detach_bmh.yaml new file mode 100644 index 0000000000..0c047b3be2 --- /dev/null +++ b/roles/cleanup_openstack/tasks/detach_bmh.yaml @@ -0,0 +1,42 @@ +# This task file detaches the BMH (Bare Metal Host) resources to prevent deprovisioning them +--- +- name: Skip deprovision for BMH + when: cifmw_deploy_bmh_bm_hosts_list | length > 0 + block: + - name: Patch bmh with detached + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + state: patched + wait: true + wait_timeout: 600 + api_version: metal3.io/v1alpha1 + kind: BareMetalHost + namespace: "{{ cifmw_deploy_bmh_namespace }}" + name: "{{ item }}" + definition: + metadata: + annotations: + baremetalhost.metal3.io/detached: "" + loop: "{{ cifmw_deploy_bmh_bm_hosts_list }}" + loop_control: + label: "{{ item }}" + + - name: Wait for operationalStatus to become detached + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + namespace: "{{ cifmw_deploy_bmh_namespace }}" + kind: BareMetalHost + api_version: metal3.io/v1alpha1 + name: "{{ item }}" + retries: 60 + delay: 10 + until: + - bmh_status.resources | length == 0 or bmh_status.resources[0].status.operationalStatus == 'detached' + register: bmh_status + loop: "{{ cifmw_deploy_bmh_bm_hosts_list }}" + loop_control: + label: "{{ item }}" diff --git a/roles/cleanup_openstack/tasks/main.yaml b/roles/cleanup_openstack/tasks/main.yaml new file mode 100644 index 0000000000..4830834096 --- /dev/null +++ b/roles/cleanup_openstack/tasks/main.yaml @@ -0,0 +1,109 @@ +--- +- name: Include required vars + ansible.builtin.include_vars: + file: "{{ item }}" + loop: + - roles/kustomize_deploy/defaults/main.yml + - roles/deploy_bmh/defaults/main.yml + +- name: Load architecture automation file + register: _automation + ansible.builtin.slurp: + path: "{{ cifmw_architecture_automation_file }}" + +- name: Prepare automation data + vars: + _parsed: "{{ _automation.content | b64decode | from_yaml }}" + ansible.builtin.set_fact: + cifmw_deploy_architecture_steps: >- + {{ _parsed['vas'][cifmw_architecture_scenario] }} + +- name: Clean up testing resources + ansible.builtin.include_role: + name: test_operator + tasks_from: cleanup + +- name: Set baremetal hosts facts + vars: + _cifmw_deploy_bmh_bm_hosts: >- + {{ + cifmw_baremetal_hosts | default({}) | dict2items | + rejectattr('key', 'in', ['crc', 'controller', 'ocp']) | + items2dict + }} + ansible.builtin.set_fact: + cifmw_deploy_bmh_bm_hosts_list: "{{ _cifmw_deploy_bmh_bm_hosts.keys() | list | default([]) }}" + +- name: Get bmh crs + ansible.builtin.find: + path: "{{ cifmw_deploy_bmh_dest_dir }}" + patterns: "*.yml" + excludes: "bmh-secret*" + register: bmh_crs + +- name: Get bmh secrets crs + ansible.builtin.find: + path: "{{ cifmw_deploy_bmh_dest_dir }}" + patterns: "bmh-secret*" + register: bmh_secrets_crs + +- name: Detach bmh to skip deprovisioning + ansible.builtin.import_tasks: detach_bmh.yaml + when: cifmw_cleanup_openstack_detach_bmh + +- name: Delete deployment CRs + vars: + _stages_crs: >- + {{ + cifmw_deploy_architecture_steps['stages'] | + reverse | + selectattr('build_output', 'defined') | + map(attribute='build_output') | + map('basename') | + list + }} + _stages_crs_path: >- + {{ + [cifmw_kustomize_deploy_kustomizations_dest_dir] + | product(_stages_crs) + | map('join', '/') + | unique + }} + _external_dns_crs: + - /home/zuul/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-dns.yml + - /home/zuul/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-cert.yml + _operators_crs: + - "{{ cifmw_kustomize_deploy_nmstate_dest_file }}" + - "{{ cifmw_kustomize_deploy_metallb_dest_file }}" + - "{{ cifmw_kustomize_deploy_kustomizations_dest_dir }}/openstack.yaml" + - "{{ cifmw_kustomize_deploy_olm_dest_file }}" + _bmh_crs: >- + {{ + bmh_crs.files | + map(attribute='path') | + list + }} + _bmh_secrets_crs: >- + {{ + bmh_secrets_crs.files | + map(attribute='path') | + list + }} + _crs_to_delete: >- + {{ + _external_dns_crs + + _stages_crs_path + + _bmh_crs + + _bmh_secrets_crs + + _operators_crs + }} + ansible.builtin.import_tasks: cleanup_crs.yaml + +- name: Remove logs and tests directories + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - "/home/zuul/ci-framework-data/logs" + - "/home/zuul/ci-framework-data/tests" + become: true From eb83327233fe9dcb8255e9ce8edc324086b41c5a Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Tue, 10 Jun 2025 16:13:52 +0300 Subject: [PATCH 170/480] Fix task and vars names - Change var names to be more specific - Rename task to match actual behavior --- roles/test_operator/README.md | 4 ++-- roles/test_operator/defaults/main.yml | 4 ++-- roles/test_operator/tasks/collect-logs.yaml | 12 ++++++------ roles/test_operator/tasks/run-test-operator-job.yml | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 0dfb4eb651..458ec12ce6 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -27,8 +27,8 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ image: "{{ cifmw_test_operator_logs_image }}" command: ["sleep"] args: ["infinity"] - volumeMounts: "{{ volume_mounts }}" - volumes: "{{ volumes }}" + volumeMounts: "{{ _test_operator_volume_mounts }}" + volumes: "{{ _test_operator_volumes }}" tolerations: "{{ cifmw_test_operator_tolerations | default(omit) }}" ``` * `cifmw_test_operator_default_groups`: (List) List of groups in the include list to search for tests to be executed. Default value: `[ 'default' ]` diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 586e96fcf6..1e27bba62f 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -55,8 +55,8 @@ cifmw_test_operator_log_pod_definition: image: "{{ cifmw_test_operator_logs_image }}" command: ["sleep"] args: ["infinity"] - volumeMounts: "{{ volume_mounts }}" - volumes: "{{ volumes }}" + volumeMounts: "{{ _test_operator_volume_mounts }}" + volumes: "{{ _test_operator_volumes }}" tolerations: "{{ cifmw_test_operator_tolerations | default(omit) }}" # default test framework registry, namespace and tag can be overridden per test framework (tempest, tobiko, horizontest and ansibletest) cifmw_test_operator_default_registry: quay.io diff --git a/roles/test_operator/tasks/collect-logs.yaml b/roles/test_operator/tasks/collect-logs.yaml index 34d7413475..a778d4923b 100644 --- a/roles/test_operator/tasks/collect-logs.yaml +++ b/roles/test_operator/tasks/collect-logs.yaml @@ -1,7 +1,7 @@ - name: Reset volumes and volume_mounts to an empty list ansible.builtin.set_fact: - volumes: [] - volume_mounts: [] + _test_operator_volumes: [] + _test_operator_volume_mounts: [] - name: Get information about PVCs that store the logs kubernetes.core.k8s_info: @@ -16,16 +16,16 @@ - name: Set up volume mounts and volumes for all PVCs ansible.builtin.set_fact: - volume_mounts: > + _test_operator_volume_mounts: > {{ - (volume_mounts | default([])) + [{ + (_test_operator_volume_mounts | default([])) + [{ 'name': "logs-volume-" ~ index, 'mountPath': "/mnt/logs-{{ test_operator_instance_name }}-step-" ~ index }] }} - volumes: > + _test_operator_volumes: > {{ - (volumes | default([])) + [{ + (_test_operator_volumes | default([])) + [{ 'name': "logs-volume-" ~ index, 'persistentVolumeClaim': { 'claimName': pvc.metadata.name diff --git a/roles/test_operator/tasks/run-test-operator-job.yml b/roles/test_operator/tasks/run-test-operator-job.yml index adc6aca853..658ee5970f 100644 --- a/roles/test_operator/tasks/run-test-operator-job.yml +++ b/roles/test_operator/tasks/run-test-operator-job.yml @@ -130,7 +130,7 @@ combine({run_test_fw: successful_execution}) }} - - name: Delete tempest and/or tobiko pods + - name: Delete test resources when: - cifmw_test_operator_cleanup | bool ansible.builtin.include_tasks: cleanup-run.yaml From 14950c1934077d873be18c85f4fcde4e55e287ca Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 12 Jun 2025 07:42:40 +0200 Subject: [PATCH 171/480] Add missing molecule jobs After merging [1] [2] it seems the CI job did not trigger tests for verifying molecule roles. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/3038/ [2] https://github.com/openstack-k8s-operators/ci-framework/pull/3020/ Signed-off-by: Daniel Pawlik --- zuul.d/molecule.yaml | 18 ++++++++++++++++++ zuul.d/projects.yaml | 2 ++ 2 files changed, 20 insertions(+) diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 7796925872..79db6dedb8 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -875,6 +875,15 @@ - ^.config/molecule/.* name: cifmw-molecule-cifmw_external_dns parent: cifmw-molecule-noop +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/cifmw_nfs/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-cifmw_nfs + parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt @@ -884,6 +893,15 @@ - ^.config/molecule/.* name: cifmw-molecule-cifmw_setup parent: cifmw-molecule-noop +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/cleanup_openstack/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-cleanup_openstack + parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index ee6b054032..c7f8d7bdae 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -33,9 +33,11 @@ - cifmw-molecule-cifmw_cephadm - cifmw-molecule-cifmw_create_admin - cifmw-molecule-cifmw_external_dns + - cifmw-molecule-cifmw_nfs - cifmw-molecule-cifmw_ntp - cifmw-molecule-cifmw_setup - cifmw-molecule-cifmw_test_role + - cifmw-molecule-cleanup_openstack - cifmw-molecule-compliance - cifmw-molecule-config_drive - cifmw-molecule-copy_container From fd9af20a5a99fb7277bb822c1d7c3044f864241d Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 9 Jun 2025 12:45:15 +0530 Subject: [PATCH 172/480] Use role instead of playbooks - 03-build-packages.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- deploy-edpm.yml | 14 +++++++++---- playbooks/03-build-packages.yml | 4 ++++ roles/cifmw_setup/tasks/build_packages.yml | 24 ++++++++++++++++++++++ 3 files changed, 38 insertions(+), 4 deletions(-) create mode 100644 roles/cifmw_setup/tasks/build_packages.yml diff --git a/deploy-edpm.yml b/deploy-edpm.yml index a479c9a7af..c46da927d4 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -55,10 +55,16 @@ tags: - infra -- name: Import package build playbook - ansible.builtin.import_playbook: playbooks/03-build-packages.yml - tags: - - build-packages +- name: Build package playbook + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Build package playbook + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: build_packages.yml + tags: + - build-packages - name: Import containers build playbook ansible.builtin.import_playbook: playbooks/04-build-containers.yml diff --git a/playbooks/03-build-packages.yml b/playbooks/03-build-packages.yml index b29129a38e..44fdc2dda2 100644 --- a/playbooks/03-build-packages.yml +++ b/playbooks/03-build-packages.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: cifmw_setup/build_packages.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Build package playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false diff --git a/roles/cifmw_setup/tasks/build_packages.yml b/roles/cifmw_setup/tasks/build_packages.yml new file mode 100644 index 0000000000..4ec5d765c4 --- /dev/null +++ b/roles/cifmw_setup/tasks/build_packages.yml @@ -0,0 +1,24 @@ +--- +- name: Run pre_package_build hooks + vars: + step: pre_package_build + ansible.builtin.import_role: + name: run_hook + +- name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + +- name: Build packages + when: + - cifmw_pkg_build_list is defined + - cifmw_pkg_build_list | length > 0 + ansible.builtin.import_role: + name: pkg_build + tasks_from: build.yml + +- name: Run post_package_build hooks + vars: + step: post_package_build + ansible.builtin.import_role: + name: run_hook From 5486e254db14394ff723ef91587934af2458d786 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Wed, 11 Jun 2025 12:53:12 -0400 Subject: [PATCH 173/480] rsync only if source directory exists In reproducer role's configure_controller.yml tasks file, stat the cifmw_reproducer_src_dir before attempting to rsync it and then only rsync it if it exists. This is a reasonable thing to do before calling rsync. If the cifmw_reproducer_src_dir does not exist, then fall back to github for the Ansible dependencies. This will not break the current behavior for which the rsync task was added. Jira: OSPRH-17434 Signed-off-by: John Fulton --- .../reproducer/tasks/configure_controller.yml | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 7284a4981b..14a4bc6028 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -350,7 +350,14 @@ name: sushy_emulator tasks_from: verify.yml - - name: Sync local repositories to other hosts + - name: Check if cifmw_reproducer_src_dir is on localhost + delegate_to: localhost + ansible.builtin.stat: + path: "{{ cifmw_reproducer_src_dir }}" + register: cifmw_reproducer_src_dir_stat + run_once: true + + - name: Sync local repositories to other hosts if present delegate_to: localhost ansible.posix.synchronize: src: "{{ cifmw_reproducer_src_dir }}/" @@ -358,14 +365,20 @@ archive: true recursive: true loop: "{{ groups['controllers'] }}" + when: + - cifmw_reproducer_src_dir_stat.stat.exists + - cifmw_reproducer_src_dir_stat.stat.isdir - # NOTE: src dir is synchronized in libvirt_layout.yml - name: Install ansible dependencies register: _async_dep_install async: 600 # 10 minutes should be more than enough poll: 0 ansible.builtin.pip: - requirements: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + requirements: "{{ have_local | ternary(local, remote) }}" + vars: + have_local: "{{ cifmw_reproducer_src_dir_stat.stat.exists and cifmw_reproducer_src_dir_stat.stat.isdir }}" + local: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + remote: https://raw.githubusercontent.com/openstack-k8s-operators/ci-framework/main/common-requirements.txt - name: Inject most of the cifmw_ parameters passed to the reproducer run tags: From 91d3e597df718ce5f67589e032d6aa8d794837ec Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Thu, 12 Jun 2025 12:32:38 +0200 Subject: [PATCH 174/480] Add retries to kustomize_deploy tasks We observe the following errors randomly occurring from `kubernetes.core.k8s_info` module, typically just after waiting for the openstack operators InstallPlan to be finished: `AttributeError: ''NoneType'' object has no attribute ''status''`. This may be because the OpenShift cluster gets overwhelmed and sometimes the module receives the unexpected response, so in `custom_condition()` in `plugins/module_utils/k8s/waiter.py` there is no `status` field in `resource` [1]. As a mitigation, let's try retrying the task after some delay. [1] https://github.com/ansible-collections/kubernetes.core/blob/main/plugins/module_utils/k8s/waiter.py#L86 --- .../kustomize_deploy/tasks/install_operators.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index 28584c6d19..862c358ab4 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -166,6 +166,10 @@ type: Ready status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _cert_manager_operator_pods + until: _cert_manager_operator_pods is success - name: Wait for cainjector pods kubernetes.core.k8s_info: @@ -179,6 +183,10 @@ type: Ready status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _cainjector_pods + until: _cainjector_pods is success - name: Wait for webhook pods kubernetes.core.k8s_info: @@ -192,6 +200,10 @@ type: Ready status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _webhook_pods + until: _webhook_pods is success - name: Wait for certmanager pods kubernetes.core.k8s_info: @@ -205,6 +217,10 @@ type: Ready status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _certmanager_pods + until: _certmanager_pods is success - name: Create catalog source and switch dependent operators to consume it when: From 07f3e155c409cbcff5c02d83fb5003e743df0eb3 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 11 Jun 2025 19:41:29 +0530 Subject: [PATCH 175/480] Remove Temporary keystone config workaround oslo.cache commit[1] is now included in RDO[2] and promoted. [1] https://review.opendev.org/c/openstack/oslo.cache/+/952014 [2] https://review.rdoproject.org/r/c/rdoinfo/+/57810 Related-Issue: #OSPCIX-901 --- zuul.d/edpm_multinode.yaml | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/zuul.d/edpm_multinode.yaml b/zuul.d/edpm_multinode.yaml index 7efbcec1c6..ded4fae462 100644 --- a/zuul.d/edpm_multinode.yaml +++ b/zuul.d/edpm_multinode.yaml @@ -317,26 +317,6 @@ cifmw_extras: - '@scenarios/centos-9/multinode-ci.yml' - '@scenarios/centos-9/horizon.yml' - # Temporary until https://review.opendev.org/c/openstack/oslo.cache/+/952014 - # is included in rdo and promoted - cifmw_edpm_prepare_kustomizations: - - apiVersion: kustomize.config.k8s.io/v1beta1 - kind: Kustomization - namespace: openstack - patches: - - patch: |- - apiVersion: core.openstack.org/v1beta1 - kind: OpenStackControlPlane - metadata: - name: unused - spec: - keystone: - template: - customServiceConfig: | - [cache] - memcache_sasl_enabled = true - target: - kind: OpenStackControlPlane run: - ci/playbooks/edpm/run.yml From 7b3e11d3b3a63456997e59ca501811e3899e2668 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Fri, 13 Jun 2025 13:46:52 +0200 Subject: [PATCH 176/480] Let sync workflow work with non-default branches Currently the sync workflow checkouts always the default branch in repository, so in case where we want to sync other branch, such as `18.0-fr2`, the attempt of rebase will result in unknown branch (as it was not pulled by default to the locally cloned repository). Hence, after this commit is applied, we will perform rebase to what was explicitly fetched. --- .github/workflows/sync_branches_reusable_workflow.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync_branches_reusable_workflow.yml b/.github/workflows/sync_branches_reusable_workflow.yml index 9e88e5889c..2e4fdae8aa 100644 --- a/.github/workflows/sync_branches_reusable_workflow.yml +++ b/.github/workflows/sync_branches_reusable_workflow.yml @@ -36,5 +36,5 @@ jobs: run: | git fetch origin ${{ inputs.source-branch }} git checkout ${{ inputs.target-branch }} - git rebase ${{ inputs.source-branch }} + git rebase FETCH_HEAD git push --force origin ${{ inputs.target-branch }} From f028bc1128b020d5dddeeed1eaf7e883942217ce Mon Sep 17 00:00:00 2001 From: Antonio Romito Date: Tue, 27 May 2025 18:55:53 +0200 Subject: [PATCH 177/480] fix(sushy_emulator): move register out of slurp task block The `register` keyword was incorrectly placed inside the `slurp` module block, which caused Ansible to fail with a syntax error: "Unsupported parameters for (ansible.builtin.slurp) module: register" This commit moves `register` outside the module block to fix the failure. File affected: - roles/sushy_emulator/tasks/collect_details.yml --- roles/sushy_emulator/tasks/collect_details.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/sushy_emulator/tasks/collect_details.yml b/roles/sushy_emulator/tasks/collect_details.yml index 2791295ec3..e54aa6ccb7 100644 --- a/roles/sushy_emulator/tasks/collect_details.yml +++ b/roles/sushy_emulator/tasks/collect_details.yml @@ -80,7 +80,7 @@ - name: "Slurp content of: {{ _uuid_file }}" ansible.builtin.slurp: src: "{{ _uuid_file }}" - register: _libvirt_uuids_file + register: _libvirt_uuids_file - name: "Set cifmw_libvirt_manager_uuids fact from {{ _uuid_file }}" vars: From 08d9645139bb3bd6afcc1d8018222a3413426578 Mon Sep 17 00:00:00 2001 From: Sergii Golovatiuk Date: Thu, 12 Jun 2025 15:38:41 +0200 Subject: [PATCH 178/480] Increase vm boot wait time * Increased the async timeout from 120 seconds to 300 seconds (5 minutes) for the initial SSH wait. * Increased the retry mechanism from 60 retries with 2 second delay to 90 retries with 3 second delay, which gives about 4.5 minutes of total retry time. These changes allows to run OCP and OSP in paralel and not serial. --- roles/libvirt_manager/tasks/start_vms.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/libvirt_manager/tasks/start_vms.yml b/roles/libvirt_manager/tasks/start_vms.yml index 5ab369f8f9..6d14e72f0f 100644 --- a/roles/libvirt_manager/tasks/start_vms.yml +++ b/roles/libvirt_manager/tasks/start_vms.yml @@ -57,7 +57,7 @@ loop_control: loop_var: _vm label: "{{ _hostname }}.utility" - async: 120 + async: 300 poll: 0 - name: Ensure we get SSH on nodes @@ -70,5 +70,5 @@ loop_var: a_result register: a_poll_result until: a_poll_result.finished - retries: 60 - delay: 2 + retries: 90 + delay: 3 From d1b6cdcd4b62475b501863ebb5a13de2e9e58aa7 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 9 Jun 2025 12:56:25 +0530 Subject: [PATCH 179/480] Use role instead of playbooks - 05-build-operators.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- deploy-edpm.yml | 16 ++++++++++---- playbooks/05-build-operators.yml | 4 ++++ roles/cifmw_setup/tasks/build_operators.yml | 23 +++++++++++++++++++++ 3 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 roles/cifmw_setup/tasks/build_operators.yml diff --git a/deploy-edpm.yml b/deploy-edpm.yml index c46da927d4..6a09444438 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -71,10 +71,18 @@ tags: - build-containers -- name: Import operators build playbook - ansible.builtin.import_playbook: playbooks/05-build-operators.yml - tags: - - build-operators +- name: Build operators playbook + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + environment: + PATH: "{{ cifmw_path }}" + tasks: + - name: Build operators playbook + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: build_operators.yml + tags: + - build-operators - name: Import deploy edpm playbook ansible.builtin.import_playbook: playbooks/06-deploy-edpm.yml diff --git a/playbooks/05-build-operators.yml b/playbooks/05-build-operators.yml index 28218f4908..1de74ad195 100644 --- a/playbooks/05-build-operators.yml +++ b/playbooks/05-build-operators.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: cifmw_setup/tasks/build_operators.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Build operators playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false diff --git a/roles/cifmw_setup/tasks/build_operators.yml b/roles/cifmw_setup/tasks/build_operators.yml new file mode 100644 index 0000000000..f2314b5f29 --- /dev/null +++ b/roles/cifmw_setup/tasks/build_operators.yml @@ -0,0 +1,23 @@ +--- +- name: Run pre_operator_build hooks + vars: + step: pre_operator_build + ansible.builtin.import_role: + name: run_hook + +- name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + +- name: Build operator and meta-operator + when: + - cifmw_operator_build_operators is defined + - cifmw_operator_build_operators | length > 0 + ansible.builtin.import_role: + name: operator_build + +- name: Run post_operator_build hooks + vars: + step: post_operator_build + ansible.builtin.import_role: + name: run_hook From a0cb51745e67abe091e96eda4a0e961c7d3cfff9 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 9 Jun 2025 12:50:38 +0530 Subject: [PATCH 180/480] Use role instead of playbooks - 04-build-containers.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- deploy-edpm.yml | 14 ++++++++++---- playbooks/04-build-containers.yml | 4 ++++ roles/cifmw_setup/tasks/build_containers.yml | 20 ++++++++++++++++++++ 3 files changed, 34 insertions(+), 4 deletions(-) create mode 100644 roles/cifmw_setup/tasks/build_containers.yml diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 6a09444438..09c253d4ee 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -66,10 +66,16 @@ tags: - build-packages -- name: Import containers build playbook - ansible.builtin.import_playbook: playbooks/04-build-containers.yml - tags: - - build-containers +- name: Build container playbook + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Build container playbook + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: build_containers.yml + tags: + - build-containers - name: Build operators playbook hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/04-build-containers.yml b/playbooks/04-build-containers.yml index 6c0a303231..e990a0b8ad 100644 --- a/playbooks/04-build-containers.yml +++ b/playbooks/04-build-containers.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: cifmw_setup/build_containers.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Build container playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false diff --git a/roles/cifmw_setup/tasks/build_containers.yml b/roles/cifmw_setup/tasks/build_containers.yml new file mode 100644 index 0000000000..57b1b8fe24 --- /dev/null +++ b/roles/cifmw_setup/tasks/build_containers.yml @@ -0,0 +1,20 @@ +--- +- name: Run pre_container_build hooks + vars: + step: pre_container_build + ansible.builtin.import_role: + name: run_hook + +- name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + +- name: Nothing to do yet + ansible.builtin.debug: + msg: "No support for that step yet" + +- name: Run post_container_build hooks + vars: + step: post_container_build + ansible.builtin.import_role: + name: run_hook From c055a355eaf05a73bd32bdac59be8eaeee76d76e Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Fri, 13 Jun 2025 11:38:43 +0200 Subject: [PATCH 181/480] adoption_osp_deploy: allow to pass private overcloud args A new parameter allow users to specify an additional overcloud deploy environment file (-e), which can be used to pass private/restricted parameters. cifmw_adoption_osp_deploy_overcloud_extra_args: points to a string which contains an environment file. The string is set to an empty value, so in the worst case the file will just be empty, which is fine for `openstack overcloud deploy`. --- roles/adoption_osp_deploy/README.md | 3 +++ roles/adoption_osp_deploy/defaults/main.yml | 2 ++ roles/adoption_osp_deploy/tasks/deploy_overcloud.yml | 9 +++++++++ 3 files changed, 14 insertions(+) diff --git a/roles/adoption_osp_deploy/README.md b/roles/adoption_osp_deploy/README.md index c687d146f4..89f929134f 100644 --- a/roles/adoption_osp_deploy/README.md +++ b/roles/adoption_osp_deploy/README.md @@ -25,6 +25,9 @@ configure the OSP17.1 deployment. networks in the ci-framework Network Mapper data to exclude when generating the adoption variables. By default it excludes the ci-framework "public" network (`ocpbm`). +* `cifmw_adoption_osp_deploy_overcloud_extra_args`: (String) The content of a + file which will be used with the -e option in the overcloud deploy command. + This is useful to specify private/restricted parameters. ### Break point diff --git a/roles/adoption_osp_deploy/defaults/main.yml b/roles/adoption_osp_deploy/defaults/main.yml index b906367f5d..4c3e20016e 100644 --- a/roles/adoption_osp_deploy/defaults/main.yml +++ b/roles/adoption_osp_deploy/defaults/main.yml @@ -28,3 +28,5 @@ cifmw_adoption_osp_deploy_repos: cifmw_adoption_osp_deploy_adoption_vars_exclude_nets: - "{{ cifmw_libvirt_manager_pub_net | default('ocpbm') }}" + +cifmw_adoption_osp_deploy_overcloud_extra_args: '' diff --git a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml index 1ee13ba8b1..4275ba65be 100644 --- a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml @@ -46,6 +46,7 @@ }} _network_provision_output: "network_provision_{{ _overcloud_name }}_out.yaml" _vips_provision_output: "vips_provision_{{ _overcloud_name }}_out.yaml" + _private_overcloud_conf_file: "{{ ansible_user_dir }}/internal-configuration.yaml" block: - name: Copy roles file delegate_to: "osp-undercloud-0" @@ -54,6 +55,13 @@ dest: "{{ _roles_file_dest }}" mode: "0644" + - name: Create the private configuration file with the specified configuration or empty + delegate_to: "osp-undercloud-0" + ansible.builtin.copy: + content: "{{ cifmw_adoption_osp_deploy_overcloud_extra_args }}" + dest: "{{ _private_overcloud_conf_file }}" + mode: "0644" + - name: Run overcloud deploy delegate_to: "osp-undercloud-0" vars: @@ -69,6 +77,7 @@ -e {{ ansible_user_dir }}/config_download_{{ _overcloud_name }}.yaml -e {{ ansible_user_dir }}/{{ _vips_provision_output }} -e {{ ansible_user_dir }}/{{ _network_provision_output }} + -e {{ _private_overcloud_conf_file }} _source_cmd: "source {{ ansible_user_dir }}/stackrc" _default_overcloud_deploy_cmd: "{{ _source_cmd }}; {{ _overcloud_deploy_cmd }}" cifmw.general.ci_script: From 6c050d2b425a3fe31f8caa64305d19821868bc81 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Tue, 3 Jun 2025 09:36:39 -0400 Subject: [PATCH 182/480] Set default values to Resources parameter There is a recent change that the Resources parameter will not have defaults for workflow, but it will be propagated from the main section of the CR. To make it work, it is important that there will be no empty value set, instead Resources parameter should be omitted. --- roles/test_operator/defaults/main.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 1e27bba62f..734022c63a 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -148,7 +148,7 @@ cifmw_test_operator_tempest_config: nodeSelector: "{{ cifmw_test_operator_node_selector | default(omit) }}" extraConfigmapsMounts: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_configmaps_mounts | default(omit) }}" extraMounts: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_mounts | default(omit) }}" - resources: "{{ stage_vars_dict.cifmw_test_operator_tempest_resources }}" + resources: "{{ stage_vars_dict.cifmw_test_operator_tempest_resources | default(omit) }}" tempestRun: includeList: | {{ stage_vars_dict.cifmw_test_operator_tempest_include_list | default('') }} @@ -188,7 +188,6 @@ cifmw_test_operator_tobiko_ssh_keysize: "{{ cifmw_ssh_keysize | default(521) }}" cifmw_test_operator_tobiko_debug: false cifmw_test_operator_tobiko_network_attachments: [] cifmw_test_operator_tobiko_workflow: [] -cifmw_test_operator_tobiko_resources: {} cifmw_test_operator_tobiko_config: apiVersion: test.openstack.org/v1beta1 kind: Tobiko @@ -209,7 +208,7 @@ cifmw_test_operator_tobiko_config: debug: "{{ stage_vars_dict.cifmw_test_operator_tobiko_debug }}" networkAttachments: "{{ stage_vars_dict.cifmw_test_operator_tobiko_network_attachments }}" extraMounts: "{{ stage_vars_dict.cifmw_test_operator_tobiko_extra_mounts | default(omit) }}" - resources: "{{ stage_vars_dict.cifmw_test_operator_tobiko_resources }}" + resources: "{{ stage_vars_dict.cifmw_test_operator_tobiko_resources | default(omit) }}" # preventCreate: preventCreate is generated by the test_operator role based on the value of stage_vars_dict.cifmw_test_operator_tobiko_prevent_create # numProcesses: numProcesses is generated by the test_operator role based on the value of stage_vars_dict.cifmw_test_operator_tobiko_num_processes # privateKey: privateKey is automatically by the test_operator role @@ -237,7 +236,6 @@ cifmw_test_operator_ansibletest_openstack_config_secret: "openstack-config-secre cifmw_test_operator_ansibletest_debug: false cifmw_test_operator_ansibletest_workflow: [] cifmw_test_operator_ansibletest_extra_configmaps_mounts: [] -cifmw_test_operator_ansibletest_resources: {} cifmw_test_operator_ansibletest_config: apiVersion: test.openstack.org/v1beta1 kind: AnsibleTest @@ -263,7 +261,7 @@ cifmw_test_operator_ansibletest_config: openStackConfigSecret: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_openstack_config_secret }}" workflow: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_workflow }}" debug: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_debug }}" - resources: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_resources }}" + resources: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_resources | default(omit) }}" # Section 5: horizontest parameters - used when run_test_fw is 'horizontest' cifmw_test_operator_horizontest_name: "horizontest-tests" @@ -288,7 +286,6 @@ cifmw_test_operator_horizontest_debug: false cifmw_test_operator_horizontest_horizon_test_dir: "/var/lib/horizontest" cifmw_test_operator_horizontest_extra_flag: "not pagination" cifmw_test_operator_horizontest_project_name_xpath: "//span[@class='rcueicon rcueicon-folder-open']/ancestor::li" -cifmw_test_operator_horizontest_resources: {} cifmw_test_operator_horizontest_config: apiVersion: test.openstack.org/v1beta1 kind: HorizonTest @@ -317,4 +314,4 @@ cifmw_test_operator_horizontest_config: projectNameXpath: "{{ stage_vars_dict.cifmw_test_operator_horizontest_project_name_xpath }}" horizonTestDir: "{{ stage_vars_dict.cifmw_test_operator_horizontest_horizon_test_dir }}" extraMounts: "{{ stage_vars_dict.cifmw_test_operator_horizontest_extra_mounts | default(omit) }}" - resources: "{{ stage_vars_dict.cifmw_test_operator_horizontest_resources }}" + resources: "{{ stage_vars_dict.cifmw_test_operator_horizontest_resources | default(omit) }}" From 3e749afcc9d5323fdd8a1a9532e210d1dc72c6eb Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Tue, 3 Jun 2025 15:51:14 +0300 Subject: [PATCH 183/480] Separate deployment and post-deployment tasks This patch separates the deployment and post-deployment tasks to allow for better reuse of the deployment/post-deployment tasks - Separate the deployment and post-deployment tasks in the reproducer playbook - Add a new playbook for post-deployment tasks (split deploy-edpm.yml) - Update the deploy-edpm-reuse.yaml to include the new post-deployment playbook - Update the roles/test_operator to include old run cleanup to allow re-use - Update the roles/reproducer to include the new post-deployment tasks --- deploy-edpm-reuse.yaml | 10 +++ deploy-edpm.yml | 67 ------------------- post-deployment.yml | 66 ++++++++++++++++++ reproducer.yml | 10 +++ .../tasks/configure_post_deployment.yml | 27 ++++++++ roles/reproducer/tasks/main.yml | 9 +++ roles/reproducer/tasks/reuse_main.yaml | 9 +++ roles/test_operator/README.md | 1 + roles/test_operator/defaults/main.yml | 1 + roles/test_operator/tasks/main.yml | 6 ++ 10 files changed, 139 insertions(+), 67 deletions(-) create mode 100644 post-deployment.yml create mode 100644 roles/reproducer/tasks/configure_post_deployment.yml diff --git a/deploy-edpm-reuse.yaml b/deploy-edpm-reuse.yaml index fdbb0442bd..d6a3f0cfc1 100644 --- a/deploy-edpm-reuse.yaml +++ b/deploy-edpm-reuse.yaml @@ -114,3 +114,13 @@ delegate_to: controller-0 ansible.builtin.command: cmd: "/home/zuul/deploy-architecture.sh {{ cifmw_deploy_architecture_args | default('') }}" + + - name: Run post deployment if instructed to + when: + - cifmw_post_deployment | default(true) | bool + no_log: "{{ cifmw_nolog | default(true) | bool }}" + async: "{{ 7200 + cifmw_test_operator_timeout | default(7200) }}" # 2h should be enough to deploy EDPM and rest for tests. + poll: 20 + delegate_to: controller-0 + ansible.builtin.command: + cmd: "/home/zuul/post_deployment.sh {{ cifmw_post_deploy_args | default('') }}" diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 09c253d4ee..3a5f55c113 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -99,70 +99,3 @@ ansible.builtin.import_playbook: playbooks/06-deploy-architecture.yml tags: - edpm - -- name: Run Post-deployment admin setup steps, test, and compliance scan - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - tasks: - - name: Run cifmw_setup admin_setup.yml - ansible.builtin.import_role: - name: cifmw_setup - tasks_from: admin_setup.yml - tags: - - admin-setup - - - name: Run Test - vars: - pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" - post_tests: "{{ (lookup('vars', 'post_tempest', default=[])) }}" - ansible.builtin.import_role: - name: cifmw_setup - tasks_from: run_tests.yml - tags: - - run-tests - - - name: Run compliance scan for controllers - ansible.builtin.import_role: - name: compliance - vars: - cifmw_compliance_podman_username: "{{ cifmw_registry_token.credentials.username }}" - cifmw_compliance_podman_password: "{{ cifmw_registry_token.credentials.password }}" - when: cifmw_run_operators_compliance_scans | default('false') | bool - tags: - - compliance - -- name: Run compliance scan for computes - hosts: "{{ groups['computes'] | default ([]) }}" - gather_facts: true - tasks: - - name: Run compliance scan for one compute - ansible.builtin.import_role: - name: compliance - tasks_from: run_compute_node_scans.yml - run_once: true - when: cifmw_run_compute_compliance_scans | default('false') | bool - tags: - - compliance - -- name: Run hooks and inject status flag - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - tasks: - - name: Run pre_end hooks - tags: - - pre-end - vars: - step: pre_end - ansible.builtin.import_role: - name: run_hook - - - name: Inject success flag - ansible.builtin.file: - path: "{{ ansible_user_dir }}/cifmw-success" - state: touch - mode: "0644" - -- name: Run log related tasks - ansible.builtin.import_playbook: playbooks/99-logs.yml - tags: - - logs diff --git a/post-deployment.yml b/post-deployment.yml new file mode 100644 index 0000000000..2160c4e092 --- /dev/null +++ b/post-deployment.yml @@ -0,0 +1,66 @@ +- name: Run Post-deployment admin setup steps, test, and compliance scan + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run cifmw_setup admin_setup.yml + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: admin_setup.yml + tags: + - admin-setup + + - name: Run Test + vars: + pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" + post_tests: "{{ (lookup('vars', 'post_tempest', default=[])) }}" + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_tests.yml + tags: + - run-tests + + - name: Run compliance scan for controllers + ansible.builtin.import_role: + name: compliance + vars: + cifmw_compliance_podman_username: "{{ cifmw_registry_token.credentials.username }}" + cifmw_compliance_podman_password: "{{ cifmw_registry_token.credentials.password }}" + when: cifmw_run_operators_compliance_scans | default('false') | bool + tags: + - compliance + +- name: Run compliance scan for computes + hosts: "{{ groups['computes'] | default ([]) }}" + gather_facts: true + tasks: + - name: Run compliance scan for one compute + ansible.builtin.import_role: + name: compliance + tasks_from: run_compute_node_scans.yml + run_once: true + when: cifmw_run_compute_compliance_scans | default('false') | bool + tags: + - compliance + +- name: Run hooks and inject status flag + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run pre_end hooks + tags: + - pre-end + vars: + step: pre_end + ansible.builtin.import_role: + name: run_hook + + - name: Inject success flag + ansible.builtin.file: + path: "{{ ansible_user_dir }}/cifmw-success" + state: touch + mode: "0644" + +- name: Run log related tasks + ansible.builtin.import_playbook: playbooks/99-logs.yml + tags: + - logs diff --git a/reproducer.yml b/reproducer.yml index 536d159d18..e308ba7261 100644 --- a/reproducer.yml +++ b/reproducer.yml @@ -103,3 +103,13 @@ delegate_to: controller-0 ansible.builtin.command: cmd: "/home/zuul/deploy-architecture.sh {{ cifmw_deploy_architecture_args | default('') }}" + + - name: Run post deployment if instructed to + when: + - cifmw_post_deployment | default(true) | bool + no_log: "{{ cifmw_nolog | default(true) | bool }}" + async: "{{ 7200 + cifmw_test_operator_timeout | default(3600) }}" # 2h should be enough to deploy EDPM and rest for tests. + poll: 20 + delegate_to: controller-0 + ansible.builtin.command: + cmd: "/home/zuul/post_deployment.sh {{ cifmw_post_deploy_args | default('') }}" diff --git a/roles/reproducer/tasks/configure_post_deployment.yml b/roles/reproducer/tasks/configure_post_deployment.yml new file mode 100644 index 0000000000..a8a6a27c3c --- /dev/null +++ b/roles/reproducer/tasks/configure_post_deployment.yml @@ -0,0 +1,27 @@ +--- +- name: Prepare scripts on controller-0 + delegate_to: controller-0 + block: + - name: Push script + vars: + run_directory: "{{ _cifmw_reproducer_framework_location }}" + exports: + ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/ansible-post-deployment.log" + default_extravars: + - "@{{ ansible_user_dir }}/ci-framework-data/parameters/reproducer-variables.yml" + - "@{{ ansible_user_dir }}/ci-framework-data/parameters/openshift-environment.yml" + extravars: "{{ cifmw_reproducer_play_extravars }}" + playbook: "post-deployment.yml" + ansible.builtin.template: + dest: "/home/zuul/post_deployment.sh" + src: "script.sh.j2" + mode: "0755" + owner: "zuul" + group: "zuul" + + - name: Rotate some logs + tags: + - always + ansible.builtin.include_tasks: rotate_log.yml + loop: + - ansible-post-deployment.log diff --git a/roles/reproducer/tasks/main.yml b/roles/reproducer/tasks/main.yml index 31167eee19..14671a1750 100644 --- a/roles/reproducer/tasks/main.yml +++ b/roles/reproducer/tasks/main.yml @@ -372,6 +372,15 @@ tags: - deploy_architecture + - name: Prepare VA post deployment + when: + - cifmw_architecture_scenario is defined + - cifmw_job_uri is undefined + tags: + - deploy_architecture + ansible.builtin.include_tasks: + file: configure_post_deployment.yml + - name: Prepare ci-like EDPM deploy when: - cifmw_job_uri is undefined diff --git a/roles/reproducer/tasks/reuse_main.yaml b/roles/reproducer/tasks/reuse_main.yaml index 2d4c4ea8cb..fe05c8a708 100644 --- a/roles/reproducer/tasks/reuse_main.yaml +++ b/roles/reproducer/tasks/reuse_main.yaml @@ -192,6 +192,15 @@ tags: - deploy_architecture + - name: Prepare VA post deployment + when: + - cifmw_architecture_scenario is defined + - cifmw_job_uri is undefined + tags: + - deploy_architecture + ansible.builtin.include_tasks: + file: configure_post_deployment.yml + - name: Set facts related to the reproducer ansible.builtin.set_fact: _ctl_reproducer_basedir: >- diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 458ec12ce6..5b87b85b5d 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -11,6 +11,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `cifmw_test_operator_timeout`: (Integer) Timeout in seconds for the execution of the tests. Default value: `3600` * `cifmw_test_operator_logs_image`: (String) Image that should be used to collect logs from the pods spawned by the test-operator. Default value: `quay.io/quay/busybox` * `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. NOTE: This parameter is deprecated, please use `cifmw_test_operator_tempest_concurrency` instead. Default value: `8` +* `cifmw_test_operator_clean_last_run`: (Bool) Delete all resources created by the previous run at the beginning of the role. Default value: `false` * `cifmw_test_operator_cleanup`: (Bool) Delete all resources created by the role at the end of the testing. Default value: `false` * `cifmw_test_operator_tempest_cleanup`: (Bool) Run tempest cleanup after test execution (tempest run) to delete any resources created by tempest that may have been left out. * `cifmw_test_operator_crs_path`: (String) The path into which the tests CRs file will be created in. Default value: `{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts/test-operator-crs` diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 734022c63a..bfa978eeb0 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -31,6 +31,7 @@ cifmw_test_operator_timeout: 3600 cifmw_test_operator_logs_image: quay.io/quay/busybox cifmw_test_operator_concurrency: 8 cifmw_test_operator_cleanup: false +cifmw_test_operator_clean_last_run: false cifmw_test_operator_dry_run: false cifmw_test_operator_default_groups: - default diff --git a/roles/test_operator/tasks/main.yml b/roles/test_operator/tasks/main.yml index 4e5b8952b9..a83a3c181e 100644 --- a/roles/test_operator/tasks/main.yml +++ b/roles/test_operator/tasks/main.yml @@ -14,6 +14,12 @@ # License for the specific language governing permissions and limitations # under the License. +- name: Cleanup previous test-operator resources + when: + - not cifmw_test_operator_dry_run | bool + - cifmw_test_operator_clean_last_run | bool + ansible.builtin.include_tasks: cleanup.yaml + - name: Ensure test_operator folder exists ansible.builtin.file: path: "{{ cifmw_test_operator_artifacts_basedir }}" From 1537f57034f7b07855d69bd492a76e96517bbaf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Ciecierski?= Date: Wed, 14 May 2025 11:31:37 +0200 Subject: [PATCH 184/480] Adjust Adoption OSP Deploy to use ipv4 or ipv6 based Currently ipv4 is the only supported protocol for initial OSP17 deployment. With this change based on cifmw_networking_env_definition file network version protocol is detected and used for generating network related files. --- .../tasks/config_files.yml | 27 ++++++++++++------- .../tasks/generate_adoption_vars.yml | 6 ++--- .../tasks/prepare_overcloud.yml | 8 +++--- .../tasks/prepare_undercloud.yml | 16 +++++------ .../templates/adoption_vars.yaml.j2 | 16 +++++------ .../templates/os_net_config_overcloud.yml.j2 | 4 +-- .../templates/os_net_config_undercloud.yml.j2 | 20 +++++++++++--- .../tripleo-ansible-inventory.yaml.j2 | 8 +++--- 8 files changed, 64 insertions(+), 41 deletions(-) diff --git a/roles/adoption_osp_deploy/tasks/config_files.yml b/roles/adoption_osp_deploy/tasks/config_files.yml index e8485d814e..8273db4ae8 100644 --- a/roles/adoption_osp_deploy/tasks/config_files.yml +++ b/roles/adoption_osp_deploy/tasks/config_files.yml @@ -70,11 +70,20 @@ loop_var: group label: "{{ group.key }}" + - name: Override network version protocol vars to ipv6 + ansible.builtin.set_fact: + network_version: network_v6 + ip_version: ip_v6 + dns_version: dns_v6 + gw_version: gw_v6 + prefix_length_version: prefix_length_v6 + when: cifmw_networking_env_definition.networks.ctlplane.network_v6 is defined + - name: Generate DeployedServerPortMap field vars: _node_instance_net: "{{ cifmw_networking_env_definition.instances[node] }}" _key_name: "{{ node }}-ctlplane" - _ctlplane_ip: "{{ _node_instance_net.networks.ctlplane.ip_v4 }}" + _ctlplane_ip: "{{ _node_instance_net.networks.ctlplane[ip_version|default('ip_v4')] }}" _server_port: >- {%- set port = {_key_name: {}} -%} {%- set _ = port[_key_name].update({ @@ -82,10 +91,10 @@ { 'ip_address': _ctlplane_ip } ], 'subnets': [ - {'cidr': _ctlplane_net.network_v4} + {'cidr': _ctlplane_net[network_version|default("network_v4")]} ], 'network': { - 'tags': [ _ctlplane_net.network_v4 ] + 'tags': [ _ctlplane_net[network_version|default("network_v4")] ] }}) -%} {{ port }} ansible.builtin.set_fact: @@ -105,14 +114,14 @@ _node_port: > {%- set nodeport = {node: {}} -%} {% for network, net_info in _node_instance_net.networks.items() if network != 'ocpbm' %} - {%- set subnet = cifmw_networking_env_definition.networks[network].network_v4 -%} + {%- set subnet = cifmw_networking_env_definition.networks[network][network_version|default("network_v4")] -%} {%- set network_name = ['storage_mgmt'] if network == 'storagemgmt' else [network] -%} {%- set network_name = ['internal_api'] if network == 'internalapi' else [network] -%} {%- set _ = nodeport[node].update( { network_name[0]: { - 'ip_address': net_info.ip_v4, - 'ip_address_uri': net_info.ip_v4, + 'ip_address': net_info[ip_version|default("ip_v4")], + 'ip_address_uri': net_info[ip_version|default("ip_v4")], 'ip_subnet': subnet } } @@ -135,8 +144,8 @@ _cloud_domain: "{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}" _dns_server: >- {{ - (_ctlplane_net.dns_v4 | length > 0) | - ternary(_ctlplane_net.dns_v4, _ctlplane_net.gw_v4) + (_ctlplane_net[dns_version|default("dns_v4")] | length > 0) | + ternary(_ctlplane_net[dns_version|default("dns_v4")], _ctlplane_net[gw_version|default("gw_v4")]) }} ansible.builtin.set_fact: _ctlplanenet_attributes: @@ -146,7 +155,7 @@ subnets: ctlplane-subnet: dns_nameservers: "{{ _dns_server }}" - gateway_ip: "{{ _ctlplane_net.gw_v4 }}" + gateway_ip: "{{ _ctlplane_net[gw_version|default('gw_v4')] }}" - name: Create new config download file vars: diff --git a/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml b/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml index 3b8f2771f7..a90a1a7b92 100644 --- a/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml +++ b/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml @@ -26,13 +26,13 @@ vars: _undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}" _undercloud_net: "{{ cifmw_networking_env_definition.instances[_undercloud_name] }}" - _undercloud_ip: "{{ _undercloud_net.networks.ctlplane.ip_v4 }}" + _undercloud_ip: "{{ _undercloud_net.networks.ctlplane[ip_version|default('ip_v4')] }}" _controller_1_name: "{{ _vm_groups['osp-controllers'] | first }}" _controller_1_net: "{{ cifmw_networking_env_definition.instances[_controller_1_name] }}" - _controller_1_internalapi_ip: "{{ _controller_1_net.networks.internalapi.ip_v4 }}" + _controller_1_internalapi_ip: "{{ _controller_1_net.networks.internalapi[ip_version|default('ip_v4')] }}" _compute_1_name: "{{ _vm_groups['osp-computes'] | first }}" _compute_1_net: "{{ cifmw_networking_env_definition.instances[_compute_1_name] }}" - _compute_1_ip: "{{ _compute_1_net.networks.ctlplane.ip_v4 }}" + _compute_1_ip: "{{ _compute_1_net.networks.ctlplane[ip_version|default('ip_v4')] }}" ansible.builtin.template: src: "adoption_vars.yaml.j2" dest: "{{ ansible_user_dir }}/adoption_vars.yaml" diff --git a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml index 96b7faccae..cbbdb2d56b 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml @@ -220,12 +220,12 @@ delegate_to: "{{ overcloud_vm }}" vars: _node_net: "{{ cifmw_networking_env_definition.instances[overcloud_vm] }}" - _ctlplane_ip: "{{ _node_net.networks.ctlplane.ip_v4 }}" + _ctlplane_ip: "{{ _node_net.networks.ctlplane[ip_version|default('ip_v4')] }}" _ctlplane_net: "{{ cifmw_networking_env_definition.networks.ctlplane }}" - _dns_server: "{{ _ctlplane_net.dns_v4 }}" - _gateway_ip: "{{ _ctlplane_net.gw_v4 }}" + _dns_server: "{{ _ctlplane_net.[dns_version|default('dns_v4')] }}" + _gateway_ip: "{{ _ctlplane_net[gw_version|default('gw_v4')] }}" _interface_mtu: "{{ _node_net.networks.ctlplane.mtu }}" - _ctlplane_cidr: "{{ _node_net.networks.ctlplane.prefix_length_v4 }}" + _ctlplane_cidr: "{{ _node_net.networks.ctlplane[prefix_length_version|default('prefix_length_v4')] }}" ansible.builtin.template: src: "os_net_config_overcloud.yml.j2" dest: /etc/os-net-config/tripleo_config.yaml diff --git a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml index 8e49301122..14cf0b4b08 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml @@ -100,13 +100,13 @@ vars: _undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}" _undercloud_net: "{{ cifmw_networking_env_definition.instances[_undercloud_name] }}" - _ctlplane_ip: "{{ _undercloud_net.networks.ctlplane.ip_v4 }}" + _ctlplane_ip: "{{ _undercloud_net.networks.ctlplane[ip_version|default('ip_v4')] }}" _ctlplane_vip: "{{ cifmw_adoption_osp_deploy_scenario.undercloud.ctlplane_vip }}" _ctlplane_net: "{{ cifmw_networking_env_definition.networks.ctlplane }}" - _dns_server: "{{ _ctlplane_net.dns_v4 }}" - _gateway_ip: "{{ _ctlplane_net.gw_v4 }}" + _dns_server: "{{ _ctlplane_net[dns_version|default('dns_v4')] }}" + _gateway_ip: "{{ _ctlplane_net[gw_version|default('gw_v4')] }}" _interface_mtu: "{{ _undercloud_net.networks.ctlplane.mtu }}" - _ctlplane_cidr: "{{ _undercloud_net.networks.ctlplane.prefix_length_v4 }}" + _ctlplane_cidr: "{{ _undercloud_net.networks.ctlplane[prefix_length_version|default('prefix_length_v4')] }}" ansible.builtin.template: src: "os_net_config_undercloud.yml.j2" dest: /etc/os-net-config/tripleo_config.yaml @@ -123,9 +123,9 @@ vars: _undercloud_name: "{{ _vm_groups['osp-underclouds'] | first }}" _undercloud_net: "{{ cifmw_networking_env_definition.instances[_undercloud_name] }}" - _undercloud_ip: "{{ _undercloud_net.networks.ctlplane.ip_v4 }}" - _undercloud_net_prefix: "{{ _undercloud_net.networks.ctlplane.prefix_length_v4 }}" - _ctlplane_cidr: "{{ cifmw_networking_env_definition.networks.ctlplane.network_v4 }}" + _undercloud_ip: "{{ _undercloud_net.networks.ctlplane[ip_version|default('ip_v4')] }}" + _undercloud_net_prefix: "{{ _undercloud_net.networks.ctlplane[prefix_length_version|default('prefix_length_v4')] }}" + _ctlplane_cidr: "{{ cifmw_networking_env_definition.networks.ctlplane[network_version|default('network_v4')] }}" _interface_mtu: "{{ _undercloud_net.networks.ctlplane.mtu }}" _env_undercloud: config: @@ -161,7 +161,7 @@ value: "{{ _ctlplane_cidr | ansible.utils.nthhost(130) }}" - section: "ctlplane-subnet" option: "gateway" - value: "{{ cifmw_networking_env_definition.networks.ctlplane.gw_v4 }}" + value: "{{ cifmw_networking_env_definition.networks.ctlplane[gw_version|default('gw_v4')] }}" - section: "ctlplane-subnet" option: "inspection_iprange" value: "{{ _ctlplane_cidr | ansible.utils.nthhost(200) }},{{ _ctlplane_cidr | ansible.utils.nthhost(220) }}" diff --git a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 index 49c7468036..61cd49d2e5 100644 --- a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 +++ b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 @@ -8,7 +8,7 @@ edpm_node_ip: {{ _compute_1_ip }} edpm_computes: | {% for compute in _vm_groups['osp-computes'] %} {% set node_nets = cifmw_networking_env_definition.instances[compute] %} - ["{{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplane.ip_v4 }}" + ["{{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }}" {% endfor %} {% endif %} @@ -16,14 +16,14 @@ edpm_computes: | edpm_networkers: | {% for networker in _vm_groups['osp-networkers'] | default([]) %} {% set node_nets = cifmw_networking_env_definition.instances[networker] %} - ["{{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplane.ip_v4 }}" + ["{{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }}" {% endfor %} {% endif %} source_galera_members: | {% for controller in _vm_groups['osp-controllers'] %} {% set node_nets = cifmw_networking_env_definition.instances[controller] %} - ["{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.internalapi.ip_v4 }}" + ["{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.internalapi[ip_version|default('ip_v4')] }}" {% endfor %} {% if _vm_groups['osp-computes'] | default([]) | length > 0 %} @@ -33,10 +33,10 @@ edpm_nodes: {{ compute }}: hostName: {{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }} ansible: - ansibleHost: {{ node_nets.networks.ctlplane.ip_v4 }} + ansibleHost: {{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }} networks: {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %} - - fixedIP: {{ node_nets.networks[net].ip_v4 }} + - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }} name: {{ net }} subnetName: subnet1 {% if net == 'ctlplane' %} @@ -53,10 +53,10 @@ edpm_nodes_networker: {{ networker }}: hostName: {{ networker }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }} ansible: - ansibleHost: {{ node_nets.networks.ctlplane.ip_v4 }} + ansibleHost: {{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }} networks: {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %} - - fixedIP: {{ node_nets.networks[net].ip_v4 }} + - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }} name: {{ net }} subnetName: subnet1 {% if net == 'ctlplane' %} @@ -66,6 +66,6 @@ edpm_nodes_networker: {% endfor %} {% endif %} -upstream_dns: {{ cifmw_networking_env_definition.networks.ctlplane.dns_v4 | first }} +upstream_dns: {{ cifmw_networking_env_definition.networks.ctlplane[dns_version|default('dns_v4')] | first }} os_cloud_name: {{ cifmw_adoption_osp_deploy_scenario.stacks[0].stackname }} standalone_ip: {{ _undercloud_ip }} diff --git a/roles/adoption_osp_deploy/templates/os_net_config_overcloud.yml.j2 b/roles/adoption_osp_deploy/templates/os_net_config_overcloud.yml.j2 index f66f28e485..5a22412a01 100644 --- a/roles/adoption_osp_deploy/templates/os_net_config_overcloud.yml.j2 +++ b/roles/adoption_osp_deploy/templates/os_net_config_overcloud.yml.j2 @@ -36,7 +36,7 @@ network_config: mtu: {{ net.mtu }} vlan_id: {{ net.vlan_id }} addresses: - - ip_netmask: {{ net.ip_v4 }}/{{ net.prefix_length_v4 }} + - ip_netmask: {{ net[ip_version|default('ip_v4')] }}/{{ net[prefix_length_version|default('prefix_length_v4')] }} routes: [] {% endif %} {% endfor %} @@ -46,7 +46,7 @@ network_config: - type: ovs_bridge name: br-baremetal addresses: - - ip_netmask: {{ net.ip_v4 }}/{{ net.prefix_length_v4 }} + - ip_netmask: {{ net[ip_version|default('ip_v4')] }}/{{ net[prefix_length_version|default('prefix_length_v4')] }} use_dhcp: false routes: [] members: diff --git a/roles/adoption_osp_deploy/templates/os_net_config_undercloud.yml.j2 b/roles/adoption_osp_deploy/templates/os_net_config_undercloud.yml.j2 index 739b779adc..aed44366f8 100644 --- a/roles/adoption_osp_deploy/templates/os_net_config_undercloud.yml.j2 +++ b/roles/adoption_osp_deploy/templates/os_net_config_undercloud.yml.j2 @@ -1,4 +1,14 @@ #jinja2: trim_blocks:True, lstrip_blocks:True +{% if ':' in _ctlplane_ip %} +{% set _ctlplane_ip_cidr = 128 %} +{% else %} +{% set _ctlplane_ip_cidr = 32 %} +{% endif %} +{% if ':' in _ctlplane_vip %} +{% set _ctlplane_vip_cidr = 128 %} +{% else %} +{% set _ctlplane_vip_cidr = 32 %} +{% endif %} network_config: - type: ovs_bridge name: br-ctlplane @@ -12,8 +22,8 @@ network_config: domain: [] addresses: - ip_netmask: {{ _ctlplane_ip }}/{{ _ctlplane_cidr }} - - ip_netmask: {{ _ctlplane_ip }}/32 - - ip_netmask: {{ _ctlplane_vip }}/32 + - ip_netmask: {{ _ctlplane_ip }}/{{ _ctlplane_ip_cidr }} + - ip_netmask: {{ _ctlplane_vip }}/{{ _ctlplane_vip_cidr }} {% if cifmw_adoption_osp_deploy_scenario.undercloud.routes is defined %} {%- for route in cifmw_adoption_osp_deploy_scenario.undercloud.routes %} routes: @@ -38,8 +48,12 @@ network_config: mtu: {{ net.mtu }} vlan_id: {{ net.vlan_id }} addresses: - - ip_netmask: {{ net.ip_v4 }}/{{ net.prefix_length_v4 }} + - ip_netmask: {{ net[ip_version|default('ip_v4')] }}/{{ net[prefix_length_version|default('prefix_length_v4')] }} + {% if '.' in net[ip_version|default('ip_v4')] %} - ip_netmask: {{ net.ip_v4.split('.')[:3] | join('.') }}.2/32 + {% else %} + - ip_netmask: {{ net.ip_v6.split(':')[:5] | join(':') }}:99/128 + {% endif %} routes: [] {% endif %} {% endfor %} diff --git a/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory.yaml.j2 b/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory.yaml.j2 index 90c34976e8..f12efe0ffa 100644 --- a/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory.yaml.j2 +++ b/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory.yaml.j2 @@ -5,14 +5,14 @@ {% for node in _vm_groups[group] %} {% set node_nets = cifmw_networking_env_definition.instances[node] %} {{ node }}: - ansible_host: {{ node_nets.networks.ctlplane.ip_v4 }} + ansible_host: {{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }} canonical_hostname: {{ node }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }} - ctlplane_ip: {{ node_nets.networks.ctlplane.ip_v4 }} + ctlplane_ip: {{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }} {% for network_name, net in node_nets.networks.items() %} {% if 'vlan_id' in net %} {% set net_name = ['storage_mgmt'] if network_name == 'storagemgmt' else [network_name] %} {% set net_name = ['internal_api'] if network_name == 'internalapi' else [network_name] %} - {{ net_name[0] }}_ip: {{ net.ip_v4 }} + {{ net_name[0] }}_ip: {{ net[ip_version|default('ip_v4')] }} {% endif %} {% endfor %} {% endfor %} @@ -41,6 +41,6 @@ computes: {% set node_nets = cifmw_networking_env_definition.instances[node] %} {{ node }}: ansible_user: tripleo-admin - ansible_host: {{ node_nets.networks.ctlplane.ip_v4 }} + ansible_host: {{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }} {% endfor %} {% endfor %} From 1353fd8752f93dc5941efb99ea6849515ad43ed2 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Wed, 18 Jun 2025 13:43:14 +0300 Subject: [PATCH 185/480] fix upstream gate patch 3036 seperated testing from deployment but did not updated upstream job this patch should fix upstream gating and run post deployment as well as deployment playbook --- ci/playbooks/edpm/run.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/ci/playbooks/edpm/run.yml b/ci/playbooks/edpm/run.yml index acd2a186e5..53504af261 100644 --- a/ci/playbooks/edpm/run.yml +++ b/ci/playbooks/edpm/run.yml @@ -32,3 +32,21 @@ {%- endfor %} {%- endif %} -e "@{{ ansible_user_dir }}/ci-framework-data/artifacts/parameters/zuul-params.yml" + + - name: Run Podified EDPM post deployment + ansible.builtin.command: + chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" + cmd: >- + ansible-playbook post-deployment.yml + -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @scenarios/centos-9/base.yml + -e @scenarios/centos-9/edpm_ci.yml + {%- if edpm_file.stat.exists %} + -e @{{ ansible_user_dir }}/ci-framework-data/artifacts/edpm-ansible.yml + {%- endif %} + {%- if cifmw_extras is defined %} + {%- for extra_var in cifmw_extras %} + -e "{{ extra_var }}" + {%- endfor %} + {%- endif %} + -e "@{{ ansible_user_dir }}/ci-framework-data/artifacts/parameters/zuul-params.yml" From aaf2ea517cfd583e1851ed4f7a17feecdc974ddd Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Tue, 17 Jun 2025 18:13:54 +0530 Subject: [PATCH 186/480] Donot update go.mod file for standalone operator Watcher operator is not yet intergrated in openstack operator. When ` Get base module name from go.mod operator_base` tasks runs, It fails with following error: ``` msg: 'Unexpected templating type error occurred on ({{ go_mod_out[''content''] | b64decode | regex_search(cifmw_operator_build_org + ''/'' + operator.name + ''/(\\w*)\\s'', ''\\1'') | first }}): ''NoneType'' object is not iterable. ''NoneType'' object is not iterable' ``` As the content of openstack operator go.mod file does not contains reference of watcher operator. In order to fix that, this pr breaks the above task into two tasks. First task get the operator_base_module and in second task, we set operator_base_module_name var if operator_base_module is not empty. It will make sure non integrated operator as well as integrated operator works with this role by updating go.mod file based on need. Signed-off-by: Chandan Kumar (raukadah) --- roles/operator_build/tasks/build.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/roles/operator_build/tasks/build.yml b/roles/operator_build/tasks/build.yml index 1573fd8389..2588caec77 100644 --- a/roles/operator_build/tasks/build.yml +++ b/roles/operator_build/tasks/build.yml @@ -24,11 +24,17 @@ src: "{{ cifmw_operator_build_meta_src }}/go.mod" register: go_mod_out - - name: "{{ operator.name }} - Get base module name from go.mod" # noqa: name[template] + - name: "{{ operator.name }} - Get base module from go.mod" # noqa: name[template] ansible.builtin.set_fact: - operator_base_module_name: "{{ go_mod_out['content'] | b64decode | regex_search(cifmw_operator_build_org + '/' + operator.name + '/(\\w*)\\s', '\\1') | first }}" + operator_base_module: "{{ go_mod_out['content'] | b64decode | regex_search(cifmw_operator_build_org + '/' + operator.name + '/(\\w*)\\s', '\\1') }}" + + - name: Get the base module name not empty operator_base_module + when: operator_base_module is not none + ansible.builtin.set_fact: + operator_base_module_name: "{{ operator_base_module | first }}" - name: "{{ operator.name }} - Set default api path" # noqa: name[template] + when: operator_base_module is not none ansible.builtin.set_fact: operator_api_path: "github.com/{{ cifmw_operator_build_org }}/{{ operator.name }}/{{ operator_base_module_name }}" @@ -49,6 +55,7 @@ - operator.name != cifmw_operator_build_meta_name - operator.pr_owner is defined - operator.pr_sha is defined + - operator_base_module is not none - name: "{{ operator.name }} - Get latest commit when no PR is provided" # noqa: name[template] command-instead-of-module ansible.builtin.command: From a3b2b785e87239c51bc1c57457bdcb8d6f02f5cc Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Wed, 18 Jun 2025 17:18:59 +0200 Subject: [PATCH 187/480] Patch samples registry Due to https://issues.redhat.com/browse/OCPBUGS-30313 we observe the following message when checking the cluster health: `clusteroperators/openshift-samples is progressing and degraded`. This change simply patches the samples registry so the problem is not occurring anymore. --- roles/openshift_setup/tasks/main.yml | 3 +++ .../tasks/patch_samples_registry.yml | 15 +++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 roles/openshift_setup/tasks/patch_samples_registry.yml diff --git a/roles/openshift_setup/tasks/main.yml b/roles/openshift_setup/tasks/main.yml index 0d4fc9159c..38315d3aa6 100644 --- a/roles/openshift_setup/tasks/main.yml +++ b/roles/openshift_setup/tasks/main.yml @@ -218,5 +218,8 @@ - name: Patch network operator when using OVNKubernetes backend ansible.builtin.import_tasks: patch_network_operator.yml +- name: Patch samples registry + ansible.builtin.import_tasks: patch_samples_registry.yml + - name: Fix openshift-marketplace pods ansible.builtin.import_tasks: fix_openshift_marketplace.yml diff --git a/roles/openshift_setup/tasks/patch_samples_registry.yml b/roles/openshift_setup/tasks/patch_samples_registry.yml new file mode 100644 index 0000000000..b4a193072e --- /dev/null +++ b/roles/openshift_setup/tasks/patch_samples_registry.yml @@ -0,0 +1,15 @@ +--- +- name: Patch samples registry configuration + when: + - not cifmw_openshift_setup_dry_run + kubernetes.core.k8s_json_patch: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + api_version: samples.operator.openshift.io/v1 + kind: Config + name: cluster + patch: + - op: replace + path: /spec/samplesRegistry + value: registry.redhat.io From 88bb724594666e913ceab23fb03ee91486244f77 Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Mon, 14 Apr 2025 16:02:54 +0200 Subject: [PATCH 188/480] Add multus_type to cifmw_networking_env_definition variable We now can select the multus_type between "bridge" and macvlan" so ci_multus would render each NetworkAttachmentDefinition appropiate without the assumption that each NAD has the same multus type defined at cifmw_ci_multus_default_nad_type. For a given NAD, if there's no multus_type variable, then it'd take (as previously) the default one. For adding the multus_type to cifmw_networking_env_definition we need to add the argument "type" for the input of networking_mapper. --- .ansible-lint | 1 + .../net_map/networking_definition.py | 25 +++++++++++ .../net_map/networking_env_definitions.py | 2 + .../module_utils/net_map/networking_mapper.py | 5 ++- roles/ci_multus/README.md | 4 +- roles/ci_multus/molecule/default/converge.yml | 11 ----- roles/ci_multus/molecule/default/molecule.yml | 1 + .../molecule/default/nads_output.yml | 44 +++++++++++++++++++ .../ci_multus/molecule/default/verify_crc.yml | 14 ++++++ .../molecule/resources/vars/shared_vars.yml | 5 ++- roles/ci_multus/templates/nad.yml.j2 | 12 +++-- .../molecule/default/converge.yml | 1 + .../molecule/default/vars/input.yml | 1 + ...finition-valid-all-tools-full-map-out.json | 6 ++- ...finition-valid-all-tools-networks-out.json | 6 ++- ...ition-valid-all-tools-partial-map-out.json | 6 ++- .../networking-definition-valid-all-tools.yml | 2 + 17 files changed, 122 insertions(+), 24 deletions(-) create mode 100644 roles/ci_multus/molecule/default/nads_output.yml diff --git a/.ansible-lint b/.ansible-lint index 3234711638..7f7c4b5b64 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -20,6 +20,7 @@ exclude_paths: - roles/kustomize_deploy/molecule/flexible_loop/files/networking-environment-definition.yml # Generated - roles/kustomize_deploy/molecule/flexible_loop/prepare.yml # import_playbook - roles/*/molecule/*/side_effect.yml # syntax-check[empty-playbook] https://github.com/ansible/molecule/issues/3617 + - roles/ci_multus/molecule/*/nads_output.yml # internal-error due to "---" characters strict: true quiet: false verbosity: 1 diff --git a/plugins/module_utils/net_map/networking_definition.py b/plugins/module_utils/net_map/networking_definition.py index e82bd81524..3897831b77 100644 --- a/plugins/module_utils/net_map/networking_definition.py +++ b/plugins/module_utils/net_map/networking_definition.py @@ -1044,6 +1044,7 @@ class SubnetBasedNetworkToolDefinition: __FIELD_ROUTES = "routes" __FIELD_ROUTES_IPV4 = "routes-v4" __FIELD_ROUTES_IPV6 = "routes-v6" + __FIELD_TYPE = "type" def __init__( self, @@ -1067,6 +1068,7 @@ def __init__( self.__ipv6_ranges: typing.List[HostNetworkRange] = [] self.__ipv4_routes: typing.List[HostNetworkRoute] = [] self.__ipv6_routes: typing.List[HostNetworkRoute] = [] + self.__type: typing.Optional[str] = None self.__parse_raw(raw_config) @@ -1092,6 +1094,13 @@ def __parse_raw(self, raw_definition: typing.Dict[str, typing.Any]): alone_field=self.__FIELD_ROUTES, ) + _validate_fields_one_of( + [ + self.__FIELD_TYPE, + ], + raw_definition, + parent_name=self.__object_name, + ) self.__parse_raw_range_field(raw_definition, self.__FIELD_RANGES) self.__parse_raw_range_field( raw_definition, self.__FIELD_RANGES_IPV4, ip_version=4 @@ -1107,6 +1116,7 @@ def __parse_raw(self, raw_definition: typing.Dict[str, typing.Any]): self.__parse_raw_route_field( raw_definition, self.__FIELD_ROUTES_IPV6, ip_version=6 ) + self.__parse_raw_type_field(raw_definition, self.__FIELD_TYPE) def __parse_raw_range_field( self, @@ -1190,6 +1200,21 @@ def __parse_raw_route_field( if ipv6_route: self.__ipv6_routes.append(ipv6_route) + @property + def type(self) -> str: + """The type of the tool for multus.""" + return self.__type + + def __parse_raw_type_field(self, raw_definition, field_name: str): + if field_name in raw_definition: + type = _validate_parse_field_type( + field_name, + raw_definition, + str, + parent_name=self.__object_name, + ) + self.__type = type + class MultusNetworkDefinition(SubnetBasedNetworkToolDefinition): """Parses and holds Multus configuration for a given network.""" diff --git a/plugins/module_utils/net_map/networking_env_definitions.py b/plugins/module_utils/net_map/networking_env_definitions.py index b9943db84c..7435608c7e 100644 --- a/plugins/module_utils/net_map/networking_env_definitions.py +++ b/plugins/module_utils/net_map/networking_env_definitions.py @@ -136,6 +136,7 @@ class MappedMultusNetworkConfig: ipv6_ranges: IPv6 ranges assigned to Multus. ipv4_routes: IPv4 routes assigned to Multus. ipv6_routes: IPv6 routes assigned to Multus. + multus_type: The type of the multus network. """ @@ -143,6 +144,7 @@ class MappedMultusNetworkConfig: ipv6_ranges: typing.List[MappedIpv6NetworkRange] ipv4_routes: typing.List[MappedIpv4NetworkRoute] ipv6_routes: typing.List[MappedIpv6NetworkRoute] + multus_type: typing.Optional[str] = None @dataclasses.dataclass(frozen=True) diff --git a/plugins/module_utils/net_map/networking_mapper.py b/plugins/module_utils/net_map/networking_mapper.py index c4fc222297..307975e69d 100644 --- a/plugins/module_utils/net_map/networking_mapper.py +++ b/plugins/module_utils/net_map/networking_mapper.py @@ -678,12 +678,15 @@ def __build_network_tool_common( for ip_route in tool_net_def.routes_ipv6 ], ] + multus_type = [] + if tool_type.__name__ == "MappedMultusNetworkConfig": + multus_type.append(tool_net_def.type) if any( route_field in tool_type.__dataclass_fields__ for route_field in ["ipv4_routes", "ipv6_routes"] ): - args_list = args_list + route_args_list + args_list = args_list + route_args_list + multus_type return tool_type(*args_list) diff --git a/roles/ci_multus/README.md b/roles/ci_multus/README.md index dbf302246a..f32f349303 100644 --- a/roles/ci_multus/README.md +++ b/roles/ci_multus/README.md @@ -10,7 +10,7 @@ Creates additional networks in a OCP cluster using NetworkAttachmentDefinition * `cifmw_ci_multus_namespace`: (String) The namespace where OCP resources will be installed. Defaults to `openstack`. * `cifmw_ci_multus_ocp_hostname`: (String) The OCP inventory hostname. Used to gather network information specific to those nodes, mostly the interfaces. Defaults to `crc`. * `cifmw_ci_multus_cniversion`: (String) The CNI specification version used when creating the resource. Defaults to `0.3.1`. -* `cifmw_ci_multus_default_nad_type`: (String) Default NAD type used when not specified by the network configuration. Defaults to `macvlan`. +* `cifmw_ci_multus_default_nad_type`: (String) Default NAD type used when not specified by the network configuration. Defaults to `macvlan`. You can select the type of each NAD by "multus_type" * `cifmw_ci_multus_default_nad_ipam_type`: (String) Default NAD IPAM type to be used when not specified by the network configuration. Defaults to `whereabouts`. * `cifmw_ci_multus_default_nad_ipam_type_ip_version``: (String) Default IP version to use in IPAM config. Defaults to `v4`. * `cifmw_ci_multus_dryrun`: (Bool) When enabled, tasks that require an OCP environment are skipped. Defaults to `false`. @@ -36,6 +36,7 @@ cifmw_ci_multus_net_info_patch_1: ipv4_ranges: - start: 192.168.122.30 end: 192.168.122.70 + type: bridge ``` ## Limitations @@ -70,6 +71,7 @@ cifmw_ci_multus_net_info_patch_1: ipv4_ranges: - start: 192.168.122.30 end: 192.168.122.70 + type: macvlan ansible.builtin.include_role: name: "ci_multus" ``` diff --git a/roles/ci_multus/molecule/default/converge.yml b/roles/ci_multus/molecule/default/converge.yml index 01fbfaf3a8..a216ed8bb0 100644 --- a/roles/ci_multus/molecule/default/converge.yml +++ b/roles/ci_multus/molecule/default/converge.yml @@ -44,17 +44,6 @@ ansible.builtin.include_vars: file: ../resources/vars/shared_vars.yml - - name: Override interface name in cifmw_networking_env_definition - vars: - _cifmw_networking_env_definition_patch: - instances: - crc: - networks: - default: - interface_name: "{{ hostvars.crc.ansible_default_ipv4.interface }}" - ansible.builtin.set_fact: - cifmw_networking_env_definition: "{{ cifmw_networking_env_definition | combine(_cifmw_networking_env_definition_patch, recursive=True) }}" - - name: Call ci_multus role ansible.builtin.include_role: name: "ci_multus" diff --git a/roles/ci_multus/molecule/default/molecule.yml b/roles/ci_multus/molecule/default/molecule.yml index 0e9c7db50e..4e06ba3303 100644 --- a/roles/ci_multus/molecule/default/molecule.yml +++ b/roles/ci_multus/molecule/default/molecule.yml @@ -26,6 +26,7 @@ provisioner: ipv4_ranges: - start: 192.168.122.30 end: 192.168.122.70 + multus_type: macvlan cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" cifmw_openshift_kubeconfig: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" diff --git a/roles/ci_multus/molecule/default/nads_output.yml b/roles/ci_multus/molecule/default/nads_output.yml new file mode 100644 index 0000000000..a82492df46 --- /dev/null +++ b/roles/ci_multus/molecule/default/nads_output.yml @@ -0,0 +1,44 @@ +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: default + name: default + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "default", + "type": "bridge", + "bridge": "eth0", + "ipam": { + "type": "whereabouts", + "range": "192.168.122.0/24", + "range_start": "192.168.122.30", + "range_end": "192.168.122.70" + } + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: patchnetwork + name: patchnetwork + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "patchnetwork", + "type": "macvlan", + "master": "eth2", + "ipam": { + "type": "whereabouts", + "range": "192.168.122.0/24", + "range_start": "192.168.122.30", + "range_end": "192.168.122.70" + } + } diff --git a/roles/ci_multus/molecule/default/verify_crc.yml b/roles/ci_multus/molecule/default/verify_crc.yml index 6f8e17e078..ce032a848f 100644 --- a/roles/ci_multus/molecule/default/verify_crc.yml +++ b/roles/ci_multus/molecule/default/verify_crc.yml @@ -35,6 +35,20 @@ (_ci_multus_molecule_nads_out is failed) or (_ci_multus_molecule_nads_out.resources | length == 0) + - name: Store output spec + ansible.builtin.set_fact: + _ci_multus_out_spec: >- + {{ + _ci_multus_molecule_nads_out.resources | + map(attribute='spec.config') + }} + + - name: Assert expected Network Attachment Definitions content spec with the expected + vars: + _ci_multus_expected_spec: "{{ lookup('file', 'nads_output.yml', rstrip=True) | from_yaml_all | map(attribute='spec.config') }}" + ansible.builtin.assert: + that: _ci_multus_out_spec == _ci_multus_expected_spec + - name: Create a test pod to attach a network kubernetes.core.k8s: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" diff --git a/roles/ci_multus/molecule/resources/vars/shared_vars.yml b/roles/ci_multus/molecule/resources/vars/shared_vars.yml index ba0c56b73c..22fc2bb660 100644 --- a/roles/ci_multus/molecule/resources/vars/shared_vars.yml +++ b/roles/ci_multus/molecule/resources/vars/shared_vars.yml @@ -20,7 +20,7 @@ cifmw_networking_env_definition: name: crc networks: default: - interface_name: "eth1" + interface_name: "eth0" network_name: default networks: default: @@ -32,6 +32,7 @@ cifmw_networking_env_definition: ipv4_ranges: - start: 192.168.122.30 end: 192.168.122.70 + multus_type: bridge deny_network: gw_v4: 192.168.122.1 network_name: deny_network @@ -41,6 +42,7 @@ cifmw_networking_env_definition: ipv4_ranges: - start: 192.168.122.30 end: 192.168.122.70 + multus_type: bridge not_allowed_network: gw_v4: 192.168.122.1 network_name: not_allowed_network @@ -50,6 +52,7 @@ cifmw_networking_env_definition: ipv4_ranges: - start: 192.168.122.30 end: 192.168.122.70 + multus_type: bridge no_multus_network: gw_v4: 192.168.122.1 network_name: patchnetwork diff --git a/roles/ci_multus/templates/nad.yml.j2 b/roles/ci_multus/templates/nad.yml.j2 index 10324ec080..44d4ae2efc 100644 --- a/roles/ci_multus/templates/nad.yml.j2 +++ b/roles/ci_multus/templates/nad.yml.j2 @@ -1,4 +1,9 @@ {% for network_name, network_details in _cifmw_ci_multus_net_info.items() %} +{% if network_details.tools.get('multus', {}).get('multus_type', None) %} +{% set multus_type = network_details.tools.multus.multus_type %} +{% else %} +{% set multus_type = cifmw_ci_multus_default_nad_type %} +{% endif %} --- apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition @@ -12,12 +17,11 @@ spec: { "cniVersion": "{{ cifmw_ci_multus_cniversion }}", "name": "{{ network_name }}", -{% if cifmw_ci_multus_default_nad_type == "macvlan" %} - "type": "macvlan", + "type": "{{ multus_type }}", +{% if multus_type == "macvlan" %} "master": "{{ network_details.interface_name }}", {% endif %} -{% if cifmw_ci_multus_default_nad_type == "bridge" %} - "type": "bridge", +{% if multus_type == "bridge" %} "bridge": "{{ network_details.interface_name }}", {% endif %} "ipam": { diff --git a/roles/networking_mapper/molecule/default/converge.yml b/roles/networking_mapper/molecule/default/converge.yml index da5656c793..5bbf155d6a 100644 --- a/roles/networking_mapper/molecule/default/converge.yml +++ b/roles/networking_mapper/molecule/default/converge.yml @@ -191,3 +191,4 @@ ansible.builtin.assert: that: - "_content.networks['internalapi'].vlan_id == 100" + - "_content.networks['internalapi'].tools.multus.multus_type == 'bridge'" diff --git a/roles/networking_mapper/molecule/default/vars/input.yml b/roles/networking_mapper/molecule/default/vars/input.yml index 08da4566f3..c7059dba76 100644 --- a/roles/networking_mapper/molecule/default/vars/input.yml +++ b/roles/networking_mapper/molecule/default/vars/input.yml @@ -39,6 +39,7 @@ networks: ranges: - start: 50 end: 59 + type: "bridge" storage: network: "172.18.0.0/24" vlan: 21 diff --git a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-full-map-out.json b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-full-map-out.json index 76afade0d5..eb7b3c328e 100644 --- a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-full-map-out.json +++ b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-full-map-out.json @@ -28,7 +28,8 @@ ], "ipv6_ranges": [], "ipv4_routes": [], - "ipv6_routes": [] + "ipv6_routes": [], + "multus_type": "macvlan" }, "netconfig": { "ipv4_ranges": [ @@ -88,7 +89,8 @@ "gateway": "192.168.122.1" } ], - "ipv6_routes": [] + "ipv6_routes": [], + "multus_type": "bridge" }, "netconfig": { "ipv4_ranges": [ diff --git a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-networks-out.json b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-networks-out.json index 8f7ad6f134..5135be2f1d 100644 --- a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-networks-out.json +++ b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-networks-out.json @@ -27,7 +27,8 @@ ], "ipv6_ranges": [], "ipv4_routes": [], - "ipv6_routes": [] + "ipv6_routes": [], + "multus_type": "macvlan" }, "netconfig": { "ipv4_ranges": [ @@ -87,7 +88,8 @@ "gateway": "192.168.122.1" } ], - "ipv6_routes": [] + "ipv6_routes": [], + "multus_type": "bridge" }, "netconfig": { "ipv4_ranges": [ diff --git a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-partial-map-out.json b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-partial-map-out.json index 64e8e70cdc..1fde484ed9 100644 --- a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-partial-map-out.json +++ b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-partial-map-out.json @@ -28,7 +28,8 @@ ], "ipv6_ranges": [], "ipv4_routes": [], - "ipv6_routes": [] + "ipv6_routes": [], + "multus_type": "macvlan" }, "netconfig": { "ipv4_ranges": [ @@ -88,7 +89,8 @@ "gateway": "192.168.122.1" } ], - "ipv6_routes": [] + "ipv6_routes": [], + "multus_type": "bridge" }, "netconfig": { "ipv4_ranges": [ diff --git a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools.yml b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools.yml index e981c5fb38..a8a12a88ea 100644 --- a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools.yml +++ b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools.yml @@ -4,6 +4,7 @@ networks: mtu: 1500 tools: multus: + type: macvlan ranges: - start: 30 end: 39 @@ -26,6 +27,7 @@ networks: ranges: - start: 30 end: 39 + type: bridge routes: - destination: "192.168.121.0/24" gateway: "192.168.122.1" From c6054654bb5e5d2b603329ec121e47f41f20ff07 Mon Sep 17 00:00:00 2001 From: Andrew Bays Date: Mon, 23 Jun 2025 09:38:46 +0000 Subject: [PATCH 189/480] Reduce comps and disks count for multi-ns VA --- scenarios/reproducers/va-multi.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/scenarios/reproducers/va-multi.yml b/scenarios/reproducers/va-multi.yml index 467b3ef0d1..158d6c5a43 100644 --- a/scenarios/reproducers/va-multi.yml +++ b/scenarios/reproducers/va-multi.yml @@ -95,7 +95,7 @@ cifmw_libvirt_manager_configuration: compute: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" @@ -103,15 +103,13 @@ cifmw_libvirt_manager_configuration: disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}" memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" - extra_disks_num: 3 - extra_disks_size: 30G nets: - ocpbm - osp_trunk compute2: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" @@ -119,8 +117,6 @@ cifmw_libvirt_manager_configuration: disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}" memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" - extra_disks_num: 3 - extra_disks_size: 30G nets: - ocpbm - osptrunk2 From 7e5498dcaee6888e0b7b689bc2fb2acc2c53c7e8 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Sat, 21 Jun 2025 11:12:14 +0530 Subject: [PATCH 190/480] Fix undefined ansible_user_dir var in inject success flag task https://github.com/openstack-k8s-operators/ci-framework/pull/3036 Separate deployment from testing. As it also moves `inject success flag` task to post-deployment playbook. The play where `inject success flag` task runs, gather_facts is set to false. ansible_user_dir is coming undefined. In order to fix that, we are adding a task to collect ansible_user_dir fact. This will make sure ansible_user_dir exists. Note: This code is generated by Gemini 2.5 Flash with following prompt ``` what gather_facts I can run to collect ansible_user_dir ``` Generated-By: Gemini 2.5 Flash Signed-off-by: Chandan Kumar (raukadah) --- post-deployment.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/post-deployment.yml b/post-deployment.yml index 2160c4e092..6dce5588d3 100644 --- a/post-deployment.yml +++ b/post-deployment.yml @@ -54,6 +54,12 @@ ansible.builtin.import_role: name: run_hook + - name: Gather minimal facts for ansible_user_dir + ansible.builtin.setup: + gather_subset: + - min + filter: "ansible_user_dir" + - name: Inject success flag ansible.builtin.file: path: "{{ ansible_user_dir }}/cifmw-success" From ad6505a6417e9ab40ad91542ba86eb2eaae26e46 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 18 Jun 2025 08:41:02 +0200 Subject: [PATCH 191/480] Fix the default value of cifmw_nfs_target The target var should be "computes", not "compute". It raises an issue that later the host is skipped by the Ansible. This commit fixes porting issue raised in Pull Request [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/3038 Signed-off-by: Daniel Pawlik --- roles/cifmw_nfs/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/cifmw_nfs/defaults/main.yml b/roles/cifmw_nfs/defaults/main.yml index 3d1a27cb63..20fd6734c6 100644 --- a/roles/cifmw_nfs/defaults/main.yml +++ b/roles/cifmw_nfs/defaults/main.yml @@ -19,4 +19,4 @@ # All variables within this role should have a prefix of "cifmw_nfs" cifmw_nfs_network: "storage" -cifmw_nfs_target: "compute" +cifmw_nfs_target: "computes" From 23c1cb6b0074a821dc1a7a598d5326845bcd8132 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Fri, 30 May 2025 06:21:33 -0400 Subject: [PATCH 192/480] Create hook for adding custom CA certs This patch enables adding custom CA certs using a hook. The intended usage is in downstream jobs that want to add certificates into the combined-ca-bundle, e.g. internal certificates. --- hooks/playbooks/install_custom_ca_certs.yaml | 40 ++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 hooks/playbooks/install_custom_ca_certs.yaml diff --git a/hooks/playbooks/install_custom_ca_certs.yaml b/hooks/playbooks/install_custom_ca_certs.yaml new file mode 100644 index 0000000000..dbcd4a6d41 --- /dev/null +++ b/hooks/playbooks/install_custom_ca_certs.yaml @@ -0,0 +1,40 @@ +--- +- name: Set up custom CA secret for OpenStack control plane + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: false + vars: + _custom_ca_cert_filepath: "{{ custom_ca_cert_filepath | mandatory }}" + _namespace: "{{ namespace | default('openstack') }}" + _controlplane_name: "{{ controlplane_name | default('controlplane') }}" + tasks: + - name: Read custom CA certificate file + ansible.builtin.slurp: + src: "{{ _custom_ca_cert_filepath }}" + register: custom_ca_certs + + - name: Create custom CA secret + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + type: Opaque + metadata: + name: custom-ca-certs + namespace: "{{ _namespace }}" + data: + CustomCACerts: "{{ custom_ca_certs.content }}" + + - name: Patch OpenStack control plane to use custom CA secret + kubernetes.core.k8s: + state: patched + kind: OpenStackControlPlane + api_version: core.openstack.org/v1beta1 + name: "{{ _controlplane_name }}" + namespace: "{{ _namespace }}" + definition: + spec: + tls: + podLevel: + enabled: true + caBundleSecretName: custom-ca-certs From e66ca1a2c46e01a1230d57eb832f75870976afde Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Wed, 14 May 2025 16:33:07 +0200 Subject: [PATCH 193/480] Add multus_attach to ci_multus bridge multus type We're adding a new variable attach at the networking_mapper to map its value to the output variable multus_attach. This is used then to generate NAD with information about where to attach the multus type bridge, if in a interface or a linux-bridge. --- .../net_map/networking_definition.py | 26 ++++++++++++++++++- .../net_map/networking_env_definitions.py | 2 ++ .../module_utils/net_map/networking_mapper.py | 4 ++- roles/ci_multus/README.md | 2 ++ roles/ci_multus/defaults/main.yml | 1 + roles/ci_multus/molecule/default/molecule.yml | 14 ++++++++++ .../molecule/default/nads_output.yml | 22 ++++++++++++++++ .../ci_multus/molecule/default/verify_crc.yml | 15 ++++++++--- .../molecule/resources/vars/shared_vars.yml | 1 + roles/ci_multus/templates/nad.yml.j2 | 9 +++++++ .../molecule/default/converge.yml | 1 + .../molecule/default/vars/input.yml | 1 + ...finition-valid-all-tools-full-map-out.json | 3 ++- ...finition-valid-all-tools-networks-out.json | 3 ++- ...ition-valid-all-tools-partial-map-out.json | 3 ++- .../networking-definition-valid-all-tools.yml | 1 + 16 files changed, 99 insertions(+), 9 deletions(-) diff --git a/plugins/module_utils/net_map/networking_definition.py b/plugins/module_utils/net_map/networking_definition.py index 3897831b77..e355fff159 100644 --- a/plugins/module_utils/net_map/networking_definition.py +++ b/plugins/module_utils/net_map/networking_definition.py @@ -1045,6 +1045,7 @@ class SubnetBasedNetworkToolDefinition: __FIELD_ROUTES_IPV4 = "routes-v4" __FIELD_ROUTES_IPV6 = "routes-v6" __FIELD_TYPE = "type" + __FIELD_ATTACH = "attach" def __init__( self, @@ -1069,6 +1070,7 @@ def __init__( self.__ipv4_routes: typing.List[HostNetworkRoute] = [] self.__ipv6_routes: typing.List[HostNetworkRoute] = [] self.__type: typing.Optional[str] = None + self.__attach: typing.Optional[str] = None self.__parse_raw(raw_config) @@ -1093,7 +1095,6 @@ def __parse_raw(self, raw_definition: typing.Dict[str, typing.Any]): parent_name=self.__object_name, alone_field=self.__FIELD_ROUTES, ) - _validate_fields_one_of( [ self.__FIELD_TYPE, @@ -1101,6 +1102,13 @@ def __parse_raw(self, raw_definition: typing.Dict[str, typing.Any]): raw_definition, parent_name=self.__object_name, ) + _validate_fields_one_of( + [ + self.__FIELD_ATTACH, + ], + raw_definition, + parent_name=self.__object_name, + ) self.__parse_raw_range_field(raw_definition, self.__FIELD_RANGES) self.__parse_raw_range_field( raw_definition, self.__FIELD_RANGES_IPV4, ip_version=4 @@ -1117,6 +1125,7 @@ def __parse_raw(self, raw_definition: typing.Dict[str, typing.Any]): raw_definition, self.__FIELD_ROUTES_IPV6, ip_version=6 ) self.__parse_raw_type_field(raw_definition, self.__FIELD_TYPE) + self.__parse_raw_type_attach(raw_definition, self.__FIELD_ATTACH) def __parse_raw_range_field( self, @@ -1215,6 +1224,21 @@ def __parse_raw_type_field(self, raw_definition, field_name: str): ) self.__type = type + @property + def attach(self) -> str: + """Where to attach the multus bridge""" + return self.__attach + + def __parse_raw_type_attach(self, raw_definition, field_name: str): + if field_name in raw_definition: + attach = _validate_parse_field_type( + field_name, + raw_definition, + str, + parent_name=self.__object_name, + ) + self.__attach = attach + class MultusNetworkDefinition(SubnetBasedNetworkToolDefinition): """Parses and holds Multus configuration for a given network.""" diff --git a/plugins/module_utils/net_map/networking_env_definitions.py b/plugins/module_utils/net_map/networking_env_definitions.py index 7435608c7e..8df24b0d07 100644 --- a/plugins/module_utils/net_map/networking_env_definitions.py +++ b/plugins/module_utils/net_map/networking_env_definitions.py @@ -137,6 +137,7 @@ class MappedMultusNetworkConfig: ipv4_routes: IPv4 routes assigned to Multus. ipv6_routes: IPv6 routes assigned to Multus. multus_type: The type of the multus network. + multus_attach: The type of the multus network. """ @@ -145,6 +146,7 @@ class MappedMultusNetworkConfig: ipv4_routes: typing.List[MappedIpv4NetworkRoute] ipv6_routes: typing.List[MappedIpv6NetworkRoute] multus_type: typing.Optional[str] = None + multus_attach: typing.Optional[str] = None @dataclasses.dataclass(frozen=True) diff --git a/plugins/module_utils/net_map/networking_mapper.py b/plugins/module_utils/net_map/networking_mapper.py index 307975e69d..4f9c3ffed5 100644 --- a/plugins/module_utils/net_map/networking_mapper.py +++ b/plugins/module_utils/net_map/networking_mapper.py @@ -679,14 +679,16 @@ def __build_network_tool_common( ], ] multus_type = [] + multus_attach = [] if tool_type.__name__ == "MappedMultusNetworkConfig": multus_type.append(tool_net_def.type) + multus_attach.append(tool_net_def.attach) if any( route_field in tool_type.__dataclass_fields__ for route_field in ["ipv4_routes", "ipv6_routes"] ): - args_list = args_list + route_args_list + multus_type + args_list = args_list + route_args_list + multus_type + multus_attach return tool_type(*args_list) diff --git a/roles/ci_multus/README.md b/roles/ci_multus/README.md index f32f349303..a7d112d6ef 100644 --- a/roles/ci_multus/README.md +++ b/roles/ci_multus/README.md @@ -11,6 +11,7 @@ Creates additional networks in a OCP cluster using NetworkAttachmentDefinition * `cifmw_ci_multus_ocp_hostname`: (String) The OCP inventory hostname. Used to gather network information specific to those nodes, mostly the interfaces. Defaults to `crc`. * `cifmw_ci_multus_cniversion`: (String) The CNI specification version used when creating the resource. Defaults to `0.3.1`. * `cifmw_ci_multus_default_nad_type`: (String) Default NAD type used when not specified by the network configuration. Defaults to `macvlan`. You can select the type of each NAD by "multus_type" +* `cifmw_ci_multus_default_bridge_attach`: (String) Set place to attach the bridge when NAD type is bridge. Defaults to `interface`. You can select the place to attach it by "multus_attach". * `cifmw_ci_multus_default_nad_ipam_type`: (String) Default NAD IPAM type to be used when not specified by the network configuration. Defaults to `whereabouts`. * `cifmw_ci_multus_default_nad_ipam_type_ip_version``: (String) Default IP version to use in IPAM config. Defaults to `v4`. * `cifmw_ci_multus_dryrun`: (Bool) When enabled, tasks that require an OCP environment are skipped. Defaults to `false`. @@ -37,6 +38,7 @@ cifmw_ci_multus_net_info_patch_1: - start: 192.168.122.30 end: 192.168.122.70 type: bridge + attach: linux-bridge ``` ## Limitations diff --git a/roles/ci_multus/defaults/main.yml b/roles/ci_multus/defaults/main.yml index 24b9166223..31a96ce913 100644 --- a/roles/ci_multus/defaults/main.yml +++ b/roles/ci_multus/defaults/main.yml @@ -24,6 +24,7 @@ cifmw_ci_multus_namespace: "openstack" cifmw_ci_multus_ocp_hostname: "crc" cifmw_ci_multus_cniversion: "0.3.1" cifmw_ci_multus_default_nad_type: "macvlan" +cifmw_ci_multus_default_bridge_attach: "interface" cifmw_ci_multus_default_nad_ipam_type: "whereabouts" cifmw_ci_multus_default_nad_ipam_type_ip_version: "v4" # Input configuration for ci_multus role diff --git a/roles/ci_multus/molecule/default/molecule.yml b/roles/ci_multus/molecule/default/molecule.yml index 4e06ba3303..6f2dcb39b9 100644 --- a/roles/ci_multus/molecule/default/molecule.yml +++ b/roles/ci_multus/molecule/default/molecule.yml @@ -15,6 +15,7 @@ provisioner: _expected_multus_networks: - default - patchnetwork + - bridge-to-linux-bridge cifmw_ci_multus_net_info_patch_1: patchnetwork: gw_v4: 192.168.122.1 @@ -27,6 +28,19 @@ provisioner: - start: 192.168.122.30 end: 192.168.122.70 multus_type: macvlan + cifmw_ci_multus_net_info_patch_2: + bridge-to-linux-bridge: + gw_v4: 192.168.122.1 + network_name: bridge-to-linux-bridge + network_v4: 192.168.122.0/24 + interface_name: eth1 + tools: + multus: + ipv4_ranges: + - start: 192.168.122.30 + end: 192.168.122.70 + multus_type: bridge + multus_attach: linux-bridge cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" cifmw_openshift_kubeconfig: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" diff --git a/roles/ci_multus/molecule/default/nads_output.yml b/roles/ci_multus/molecule/default/nads_output.yml index a82492df46..e8dbd98e0f 100644 --- a/roles/ci_multus/molecule/default/nads_output.yml +++ b/roles/ci_multus/molecule/default/nads_output.yml @@ -1,6 +1,28 @@ --- apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: bridge-to-linux-bridge + name: bridge-to-linux-bridge + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "bridge-to-linux-bridge", + "type": "bridge", + "bridge": "bridge-to-linux-bridge", + "ipam": { + "type": "whereabouts", + "range": "192.168.122.0/24", + "range_start": "192.168.122.30", + "range_end": "192.168.122.70" + } + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition metadata: labels: osp/net: default diff --git a/roles/ci_multus/molecule/default/verify_crc.yml b/roles/ci_multus/molecule/default/verify_crc.yml index ce032a848f..fec2a7825b 100644 --- a/roles/ci_multus/molecule/default/verify_crc.yml +++ b/roles/ci_multus/molecule/default/verify_crc.yml @@ -17,6 +17,8 @@ - name: Verify CRC hosts: all gather_facts: true + vars: + _ci_multus_expected_spec: "{{ lookup('file', 'nads_output.yml', rstrip=True) | from_yaml_all | map(attribute='spec.config') }}" tasks: - name: Include default vars ansible.builtin.include_vars: @@ -43,11 +45,16 @@ map(attribute='spec.config') }} - - name: Assert expected Network Attachment Definitions content spec with the expected - vars: - _ci_multus_expected_spec: "{{ lookup('file', 'nads_output.yml', rstrip=True) | from_yaml_all | map(attribute='spec.config') }}" + - name: Ensure both lists have the same length ansible.builtin.assert: - that: _ci_multus_out_spec == _ci_multus_expected_spec + that: + - _ci_multus_out_spec | length == _ci_multus_expected_spec | length + + - name: Compare each corresponding element in the lists + ansible.builtin.assert: + that: + - (item.0 | replace('\n', '')) == (item.1 | replace('\n', '')) + loop: "{{ _ci_multus_out_spec | zip(_ci_multus_expected_spec) | list }}" - name: Create a test pod to attach a network kubernetes.core.k8s: diff --git a/roles/ci_multus/molecule/resources/vars/shared_vars.yml b/roles/ci_multus/molecule/resources/vars/shared_vars.yml index 22fc2bb660..4c9bd788b0 100644 --- a/roles/ci_multus/molecule/resources/vars/shared_vars.yml +++ b/roles/ci_multus/molecule/resources/vars/shared_vars.yml @@ -63,3 +63,4 @@ cifmw_ci_multus_deny_list: cifmw_ci_multus_allow_list: - default - patchnetwork + - bridge-to-linux-bridge diff --git a/roles/ci_multus/templates/nad.yml.j2 b/roles/ci_multus/templates/nad.yml.j2 index 44d4ae2efc..6a57a32cda 100644 --- a/roles/ci_multus/templates/nad.yml.j2 +++ b/roles/ci_multus/templates/nad.yml.j2 @@ -4,6 +4,11 @@ {% else %} {% set multus_type = cifmw_ci_multus_default_nad_type %} {% endif %} +{% if network_details.tools.get('multus', {}).get('multus_attach', None) %} +{% set multus_attach = network_details.tools.multus.multus_attach %} +{% else %} +{% set multus_attach = cifmw_ci_multus_default_bridge_attach %} +{% endif %} --- apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition @@ -22,7 +27,11 @@ spec: "master": "{{ network_details.interface_name }}", {% endif %} {% if multus_type == "bridge" %} +{% if multus_attach == "interface" %} "bridge": "{{ network_details.interface_name }}", +{% elif multus_attach == "linux-bridge" %} + "bridge": "{{ network_name }}", +{% endif %} {% endif %} "ipam": { "type": "{{ cifmw_ci_multus_default_nad_ipam_type }}", diff --git a/roles/networking_mapper/molecule/default/converge.yml b/roles/networking_mapper/molecule/default/converge.yml index 5bbf155d6a..6b9b7dee75 100644 --- a/roles/networking_mapper/molecule/default/converge.yml +++ b/roles/networking_mapper/molecule/default/converge.yml @@ -192,3 +192,4 @@ that: - "_content.networks['internalapi'].vlan_id == 100" - "_content.networks['internalapi'].tools.multus.multus_type == 'bridge'" + - "_content.networks['internalapi'].tools.multus.multus_attach == 'linux-bridge'" diff --git a/roles/networking_mapper/molecule/default/vars/input.yml b/roles/networking_mapper/molecule/default/vars/input.yml index c7059dba76..05ed78ea31 100644 --- a/roles/networking_mapper/molecule/default/vars/input.yml +++ b/roles/networking_mapper/molecule/default/vars/input.yml @@ -40,6 +40,7 @@ networks: - start: 50 end: 59 type: "bridge" + attach: "linux-bridge" storage: network: "172.18.0.0/24" vlan: 21 diff --git a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-full-map-out.json b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-full-map-out.json index eb7b3c328e..94e4e9746b 100644 --- a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-full-map-out.json +++ b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-full-map-out.json @@ -90,7 +90,8 @@ } ], "ipv6_routes": [], - "multus_type": "bridge" + "multus_type": "bridge", + "multus_attach": "linux-bridge" }, "netconfig": { "ipv4_ranges": [ diff --git a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-networks-out.json b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-networks-out.json index 5135be2f1d..8e5a040981 100644 --- a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-networks-out.json +++ b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-networks-out.json @@ -89,7 +89,8 @@ } ], "ipv6_routes": [], - "multus_type": "bridge" + "multus_type": "bridge", + "multus_attach": "linux-bridge" }, "netconfig": { "ipv4_ranges": [ diff --git a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-partial-map-out.json b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-partial-map-out.json index 1fde484ed9..dbcaa4be89 100644 --- a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-partial-map-out.json +++ b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools-partial-map-out.json @@ -90,7 +90,8 @@ } ], "ipv6_routes": [], - "multus_type": "bridge" + "multus_type": "bridge", + "multus_attach": "linux-bridge" }, "netconfig": { "ipv4_ranges": [ diff --git a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools.yml b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools.yml index a8a12a88ea..8d3d8b2f88 100644 --- a/tests/unit/module_utils/test_files/networking-definition-valid-all-tools.yml +++ b/tests/unit/module_utils/test_files/networking-definition-valid-all-tools.yml @@ -28,6 +28,7 @@ networks: - start: 30 end: 39 type: bridge + attach: linux-bridge routes: - destination: "192.168.121.0/24" gateway: "192.168.122.1" From 97a8022dbd7ca3ed3af8d1bea79e51b3d76dba3c Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Tue, 24 Jun 2025 12:22:29 +0200 Subject: [PATCH 194/480] Remove test case name from class name The validations framework currently puts test case name as part of the class name field, which in the end means that the test case name is duplicated, e.g. as it is processed by Polarion or other JUnit parser. This change alters the class name field to be just `validations`. --- .../filter_plugins/cifmw_validations_xml_filter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/validations/filter_plugins/cifmw_validations_xml_filter.py b/roles/validations/filter_plugins/cifmw_validations_xml_filter.py index 96367f4f28..bae6f3827a 100755 --- a/roles/validations/filter_plugins/cifmw_validations_xml_filter.py +++ b/roles/validations/filter_plugins/cifmw_validations_xml_filter.py @@ -39,8 +39,8 @@ - - + + @@ -80,7 +80,7 @@ def __map_xml_results(cls, test_results): }, ) for name, data in test_results.items(): - attributes = {"name": name, "classname": f"validations.{name}"} + attributes = {"name": name, "classname": "validations"} if "time" in data: attributes["time"] = cls.__float_conversion(data["time"]) tc_elm = ET.SubElement(ts_elm, "testcase", attrib=attributes) From 69aef1accb61b76c8fa6c8d367e04262e50f5a51 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Tue, 24 Jun 2025 13:00:39 +0200 Subject: [PATCH 195/480] Remove YAML extension from test case name The validations are specified as Ansible playbooks to be executed. Hence, in output, we see things like `invoke_tlse_playbooks.yml` as case name in generated XML. This change alters it to be presented as `invoke_tlse_playbooks`. --- .../validations/filter_plugins/cifmw_validations_xml_filter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/validations/filter_plugins/cifmw_validations_xml_filter.py b/roles/validations/filter_plugins/cifmw_validations_xml_filter.py index bae6f3827a..01bbbcd1d9 100755 --- a/roles/validations/filter_plugins/cifmw_validations_xml_filter.py +++ b/roles/validations/filter_plugins/cifmw_validations_xml_filter.py @@ -21,7 +21,7 @@ _internal_results: test-1: time: 2.54512 - test-case-2: + test-2.yml: time: 4.5450345 error: "error message" ansible.builtin.set_fact: @@ -80,6 +80,7 @@ def __map_xml_results(cls, test_results): }, ) for name, data in test_results.items(): + name = name.replace(".yml", "").replace(".yaml", "") attributes = {"name": name, "classname": "validations"} if "time" in data: attributes["time"] = cls.__float_conversion(data["time"]) From 42e2dc474ec5bdb2d671f469a046e7c1ad2f9d48 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 28 Mar 2025 15:57:52 +0530 Subject: [PATCH 196/480] Use transparent vlan in ci bootstrap vexxhost and IBM cloud both have transparent vlan enabled now, so let's use it. This will avoid creation and deletion of all vlan and trunk ports. Also will avoid env breakage due to cleanup of ports for autohold nodes. Depends-On: https://review.rdoproject.org/r/c/config/+/57549 Depends-On: https://review.rdoproject.org/r/c/config/+/57614 Related-Issue: OSPCIX-771 --- zuul.d/adoption.yaml | 2 ++ zuul.d/base.yaml | 1 + zuul.d/edpm_multinode.yaml | 4 ++++ zuul.d/kuttl_multinode.yaml | 1 + zuul.d/podified_multinode.yaml | 1 + zuul.d/tempest_multinode.yaml | 1 + 6 files changed, 10 insertions(+) diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index f9e7aa10da..cdce1c15bf 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -48,6 +48,7 @@ default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true range: 192.168.122.0/24 internal-api: vlan: 20 @@ -222,6 +223,7 @@ default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true range: 192.168.122.0/24 internal-api: vlan: 20 diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 2f4b8baced..a41ad6c290 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -166,6 +166,7 @@ default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true range: 192.168.122.0/24 internal-api: vlan: 20 diff --git a/zuul.d/edpm_multinode.yaml b/zuul.d/edpm_multinode.yaml index ded4fae462..06b61e2c80 100644 --- a/zuul.d/edpm_multinode.yaml +++ b/zuul.d/edpm_multinode.yaml @@ -13,6 +13,7 @@ default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true range: 192.168.122.0/24 internal-api: vlan: 20 @@ -76,6 +77,7 @@ default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true range: 192.168.122.0/24 internal-api: vlan: 20 @@ -153,6 +155,7 @@ default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true range: 192.168.122.0/24 internal-api: vlan: 20 @@ -245,6 +248,7 @@ default: mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true range: 192.168.122.0/24 internal-api: vlan: 20 diff --git a/zuul.d/kuttl_multinode.yaml b/zuul.d/kuttl_multinode.yaml index 4c86308c40..7d40c19edc 100644 --- a/zuul.d/kuttl_multinode.yaml +++ b/zuul.d/kuttl_multinode.yaml @@ -15,6 +15,7 @@ range: 192.168.122.0/24 mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true router: false internal-api: vlan: 20 diff --git a/zuul.d/podified_multinode.yaml b/zuul.d/podified_multinode.yaml index da4a3d5b75..0948b1f0e4 100644 --- a/zuul.d/podified_multinode.yaml +++ b/zuul.d/podified_multinode.yaml @@ -23,6 +23,7 @@ range: 192.168.122.0/24 mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true internal-api: vlan: 20 range: 172.17.0.0/24 diff --git a/zuul.d/tempest_multinode.yaml b/zuul.d/tempest_multinode.yaml index 2d88eafc58..1b7619013b 100644 --- a/zuul.d/tempest_multinode.yaml +++ b/zuul.d/tempest_multinode.yaml @@ -23,6 +23,7 @@ range: 192.168.122.0/24 mtu: "{{ ('ibm' in nodepool.cloud) | ternary('1440', '1500') }}" router_net: "" + transparent: true internal-api: vlan: 20 range: 172.17.0.0/24 From 0652b7b9039589e38a80e0164a05b119f5b75795 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 16 Jun 2025 11:39:09 +0530 Subject: [PATCH 197/480] Use role instead of playbooks - 06-deploy-edpm.yml It is continuation of simplification job execution [1]. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- deploy-edpm.yml | 63 +++++++++++++++++++-- playbooks/06-deploy-edpm.yml | 4 ++ playbooks/validations.yml | 2 +- roles/cifmw_setup/tasks/deploy_edpm.yml | 73 +++++++++++++++++++++++++ roles/cifmw_setup/tasks/hci_deploy.yml | 32 +++++++++++ 5 files changed, 169 insertions(+), 5 deletions(-) create mode 100644 roles/cifmw_setup/tasks/deploy_edpm.yml create mode 100644 roles/cifmw_setup/tasks/hci_deploy.yml diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 3a5f55c113..894bcc5715 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -90,10 +90,65 @@ tags: - build-operators -- name: Import deploy edpm playbook - ansible.builtin.import_playbook: playbooks/06-deploy-edpm.yml - tags: - - edpm +- name: Deploy EDPM + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Deploy EDPM + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: deploy_edpm.yml + tags: + - edpm + +- name: Deploy NFS server on target nodes + become: true + hosts: "{{ groups[cifmw_nfs_target | default('computes')][0] | default([]) }}" + tasks: + - name: Run cifmw_nfs role + vars: + nftables_path: /etc/nftables + nftables_conf: /etc/sysconfig/nftables.conf + when: + - cifmw_edpm_deploy_nfs | default('false') | bool + ansible.builtin.import_role: + name: cifmw_nfs + +- name: Clear ceph target hosts facts to force refreshing in HCI deployments + hosts: "{{ cifmw_ceph_target | default('computes') }}" + tasks: + # end_play will end only current play, not the main edpm-deploy.yml + - name: Early end if architecture deploy + when: + - cifmw_architecture_scenario is defined + ansible.builtin.meta: end_play + + - name: Clear ceph target hosts facts + when: cifmw_edpm_deploy_hci | default('false') | bool + ansible.builtin.meta: clear_facts + +# TODO: replace this import_playbook with cifmw_ceph role +- name: Deploy Ceph on target nodes + vars: + _deploy_ceph: >- + {{ + (cifmw_edpm_deploy_hci | default('false') | bool) and + cifmw_architecture_scenario is undefined + }} + storage_network_range: 172.18.0.0/24 + storage_mgmt_network_range: 172.20.0.0/24 + ansible.builtin.import_playbook: playbooks/ceph.yml + +- name: Continue HCI deploy + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Continue HCI deploy + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: hci_deploy.yml + tags: + - edpm - name: Import VA deployment playbook ansible.builtin.import_playbook: playbooks/06-deploy-architecture.yml diff --git a/playbooks/06-deploy-edpm.yml b/playbooks/06-deploy-edpm.yml index 9a691c162d..c3f210514f 100644 --- a/playbooks/06-deploy-edpm.yml +++ b/playbooks/06-deploy-edpm.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: cifmw_setup/tasks/deploy-edpm.yml. +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# - name: Deploy podified control plane hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false diff --git a/playbooks/validations.yml b/playbooks/validations.yml index 9c65cfbf97..613eb77bfb 100644 --- a/playbooks/validations.yml +++ b/playbooks/validations.yml @@ -1,5 +1,5 @@ # -# NOTE: Playbook migrated to: 06-deploy-edpm.yml & 06-deploy-architecture.yml. +# NOTE: Playbook migrated to: roles/cifmw_setup/tasks/hci_deploy.yml:L29-35 & 06-deploy-architecture.yml. # This migration is temporary, and will be further migrated to role. # DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. # diff --git a/roles/cifmw_setup/tasks/deploy_edpm.yml b/roles/cifmw_setup/tasks/deploy_edpm.yml new file mode 100644 index 0000000000..34de902f43 --- /dev/null +++ b/roles/cifmw_setup/tasks/deploy_edpm.yml @@ -0,0 +1,73 @@ +--- +- name: Deploy EDPM + when: cifmw_architecture_scenario is not defined + block: + - name: Run pre_deploy hooks + vars: + step: pre_deploy + ansible.builtin.import_role: + name: run_hook + + - name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + + - name: Configure Storage Class + ansible.builtin.include_role: + name: ci_local_storage + when: not cifmw_use_lvms | default(false) + + - name: Configure LVMS Storage Class + ansible.builtin.include_role: + name: ci_lvms_storage + when: cifmw_use_lvms | default(false) + + - name: Run edpm_prepare + ansible.builtin.include_role: + name: edpm_prepare + + - name: Run post_ctlplane_deploy hooks + when: + - cifmw_architecture_scenario is undefined + vars: + step: post_ctlplane_deploy + ansible.builtin.import_role: + name: run_hook + + - name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + + - name: Create virtual baremetal and deploy EDPM + when: + - cifmw_edpm_deploy_baremetal | default('false') | bool + - cifmw_deploy_edpm | default('false') | bool + ansible.builtin.import_role: + name: edpm_deploy_baremetal + + - name: Load parameters files + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + + - name: Create VMs and Deploy EDPM + when: + - not cifmw_edpm_deploy_baremetal | default('false') | bool + - cifmw_deploy_edpm | default('false') | bool + block: + - name: Create and provision external computes + when: + - cifmw_use_libvirt is defined + - cifmw_use_libvirt | bool + ansible.builtin.import_role: + name: libvirt_manager + tasks_from: deploy_edpm_compute.yml + + - name: Prepare for HCI deploy phase 1 + when: cifmw_edpm_deploy_hci | default('false') | bool + ansible.builtin.include_role: + name: hci_prepare + tasks_from: phase1.yml + + - name: Deploy EDPM + ansible.builtin.import_role: + name: edpm_deploy diff --git a/roles/cifmw_setup/tasks/hci_deploy.yml b/roles/cifmw_setup/tasks/hci_deploy.yml new file mode 100644 index 0000000000..0279f5587a --- /dev/null +++ b/roles/cifmw_setup/tasks/hci_deploy.yml @@ -0,0 +1,32 @@ +--- +- name: Continue HCI deploy + when: cifmw_architecture_scenario is not defined + block: + - name: Create Ceph secrets and retrieve FSID info + when: cifmw_edpm_deploy_hci | default('false') | bool + block: + - name: Prepare for HCI deploy phase 2 + ansible.builtin.include_role: + name: hci_prepare + tasks_from: phase2.yml + + - name: Continue HCI deployment + ansible.builtin.include_role: + name: edpm_deploy + vars: + cifmw_edpm_deploy_prepare_run: false + + - name: Run post_deploy hooks + vars: + step: post_deploy + ansible.builtin.import_role: + name: run_hook + +# If we're doing an architecture deployment, we need to skip validations here. +# Instead, they will be executed in the 06-deploy-architecture.yml playbook. +- name: Run validations + ansible.builtin.include_role: + name: validations + when: + - cifmw_architecture_scenario is not defined + - cifmw_execute_validations | default(false) | bool From 0fe5e74f0be40d3a8ee9b91f3c7cc076e43b5a58 Mon Sep 17 00:00:00 2001 From: Andrew Bays Date: Wed, 25 Jun 2025 09:55:01 +0000 Subject: [PATCH 198/480] Fix 2-compute setting for multi-namespace VA --- scenarios/reproducers/va-multi.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scenarios/reproducers/va-multi.yml b/scenarios/reproducers/va-multi.yml index 158d6c5a43..f4f68a12c3 100644 --- a/scenarios/reproducers/va-multi.yml +++ b/scenarios/reproducers/va-multi.yml @@ -31,6 +31,9 @@ cifmw_networking_mapper_interfaces_info_translations: osptrunk2: - ctlplane2 +# Override the default 3-compute VA setting, since 3 computes in both namespaces is too expensive +cifmw_libvirt_manager_compute_amount: 2 + cifmw_libvirt_manager_configuration: networks: osp_trunk: | From d7bf8dd68cba709d91cc631929c8578387f03d1a Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Fri, 20 Jun 2025 14:07:08 +0200 Subject: [PATCH 199/480] Add empty adoption scenario uni07eta Just to make ci-framework happy, following the pattern already established by the other scenarios. The configuration will be set through variable files by the users of this code. --- scenarios/adoption/uni07eta.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 scenarios/adoption/uni07eta.yml diff --git a/scenarios/adoption/uni07eta.yml b/scenarios/adoption/uni07eta.yml new file mode 100644 index 0000000000..4e9e5200e7 --- /dev/null +++ b/scenarios/adoption/uni07eta.yml @@ -0,0 +1,2 @@ +libvirt_manager_patch_layout: {} +networking_mapper_definition_patch: {} From 3a5e162d4c0152722bd267868e95205fe86bf037 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Wed, 25 Jun 2025 13:06:25 +0300 Subject: [PATCH 200/480] Set gather_facts to true for post deployment some issues are caused by missing facts after new post deployment seperation was introduced, setting gather_facts to true for post deployment to avoid those issues --- post-deployment.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/post-deployment.yml b/post-deployment.yml index 6dce5588d3..cf82ad98fe 100644 --- a/post-deployment.yml +++ b/post-deployment.yml @@ -1,6 +1,6 @@ - name: Run Post-deployment admin setup steps, test, and compliance scan hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false + gather_facts: true tasks: - name: Run cifmw_setup admin_setup.yml ansible.builtin.import_role: @@ -44,7 +44,7 @@ - name: Run hooks and inject status flag hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false + gather_facts: true tasks: - name: Run pre_end hooks tags: @@ -54,12 +54,6 @@ ansible.builtin.import_role: name: run_hook - - name: Gather minimal facts for ansible_user_dir - ansible.builtin.setup: - gather_subset: - - min - filter: "ansible_user_dir" - - name: Inject success flag ansible.builtin.file: path: "{{ ansible_user_dir }}/cifmw-success" From df3138a6149963363bcd35fbfc4cbe4c9e5e0ef2 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Wed, 25 Jun 2025 15:33:46 +0200 Subject: [PATCH 201/480] Allow to disable telemetry exporters in ci-framework jobs There are certain cases where we want to disable specific telemetry exporters in ci-framework jobs, i.e. watcher jobs where the required metrics to test watcher features are injected into prometheus directly. This patch is adding a variable cifmw_edpm_telemetry_enabled_exporters which can contain the list of desired exporters in the telemetry enabled deployment. Co-Authored-By: Chandan Kumar --- hooks/playbooks/fetch_compute_facts.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index bc2d94d7c7..b96c3dee14 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -273,6 +273,15 @@ {{ cifmw_hook_fetch_compute_facts_edpm_cmd | indent( width=8) }} {% endif %} + {% if cifmw_edpm_telemetry_enabled_exporters is defined and cifmw_edpm_telemetry_enabled_exporters | length > 0 %} + - op: replace + path: /spec/nodeTemplate/ansible/ansibleVars/edpm_telemetry_enabled_exporters + value: + {% for exporter in cifmw_edpm_telemetry_enabled_exporters %} + - "{{ exporter }}" + {% endfor %} + {% endif %} + - name: Ensure we know about the private host keys ansible.builtin.shell: cmd: | From a9584a96ba44aa1c8fa52c90adc1d11535d0ecbc Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Thu, 26 Jun 2025 14:24:19 +0200 Subject: [PATCH 202/480] Allow setting test-operator namespace per stage This will allow us to run the testing with test-operator in environments where multiple OpenStack namespaces exist on a single OpenShift cluster. --- roles/test_operator/README.md | 2 +- roles/test_operator/defaults/main.yml | 10 +++++----- roles/test_operator/tasks/cleanup-run.yaml | 2 +- roles/test_operator/tasks/collect-logs.yaml | 6 +++--- roles/test_operator/tasks/run-test-operator-job.yml | 6 +++--- roles/test_operator/tasks/stages.yml | 2 +- roles/test_operator/tasks/tempest-tests.yml | 2 +- roles/test_operator/tasks/tobiko-tests.yml | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 5b87b85b5d..3aa5e8783e 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -53,7 +53,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `type`: (String) The framework name you would like to call, currently the options are: tempest, ansibletest, horizontest, tobiko. * `test_vars_file`: (String) Path to the file used for testing, this file should contain the testing params for this stage. Only parameters specific for the controller can be used (Tempest, Ansibletest, Horizontest and Tobiko). * `test_vars`: (String) Testing parameters for this specific stage if a `test_vars` is used the specified parameters would override the ones in the `test_vars_file`. Only parameters specific for the controller can be used (Tempest, Ansibletest, Horizontest and Tobiko). - > Important note! Only variables with the following structure can be used to override inside a stage: `cifmw_test_operator_[test-operator CR name]_[parameter name]`. For example, these variables cannot be overridden per stage: `cifmw_test_operator_default_registry`, `cifmw_test_operator_default_namespace`, `cifmw_test_operator_default_image_tag`. + > Important note! Generally only the variables with the following structure can be used to override inside a stage: `cifmw_test_operator_[test-operator CR name]_[parameter name]`. For example, these variables cannot be overridden per stage: `cifmw_test_operator_default_registry`, `cifmw_test_operator_default_namespace`, `cifmw_test_operator_default_image_tag`. One exception is `cifmw_test_operator_namespace`, which allows running the testing frameworks in multiple namespaces. * `pre_test_stage_hooks`: (List) List of pre hooks to run as described [hooks README](https://github.com/openstack-k8s-operators/ci-framework/tree/main/roles/run_hook#hooks-expected-format). * `post_test_stage_hooks`: (List) List of post hooks to run as described [hooks README](https://github.com/openstack-k8s-operators/ci-framework/tree/main/roles/run_hook#hooks-expected-format). Default value: diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index bfa978eeb0..703dc2f1c0 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -49,7 +49,7 @@ cifmw_test_operator_log_pod_definition: kind: Pod metadata: name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" spec: containers: - name: test-operator-logs-container @@ -135,7 +135,7 @@ cifmw_test_operator_tempest_config: kind: Tempest metadata: name: "{{ stage_vars_dict.cifmw_test_operator_tempest_name }}-{{ _stage_vars.name }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" spec: SELinuxLevel: "{{ cifmw_test_operator_selinux_level }}" containerImage: "{{ stage_vars_dict.cifmw_test_operator_tempest_image }}:{{ stage_vars_dict.cifmw_test_operator_tempest_image_tag }}" @@ -194,7 +194,7 @@ cifmw_test_operator_tobiko_config: kind: Tobiko metadata: name: "{{ stage_vars_dict.cifmw_test_operator_tobiko_name }}-{{ _stage_vars.name }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" spec: SELinuxLevel: "{{ cifmw_test_operator_selinux_level }}" kubeconfigSecretName: "{{ stage_vars_dict.cifmw_test_operator_tobiko_kubeconfig_secret }}" @@ -242,7 +242,7 @@ cifmw_test_operator_ansibletest_config: kind: AnsibleTest metadata: name: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_name }}-{{ _stage_vars.name }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" spec: SELinuxLevel: "{{ cifmw_test_operator_selinux_level }}" containerImage: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_image }}:{{ stage_vars_dict.cifmw_test_operator_ansibletest_image_tag }}" @@ -292,7 +292,7 @@ cifmw_test_operator_horizontest_config: kind: HorizonTest metadata: name: "{{ stage_vars_dict.cifmw_test_operator_horizontest_name }}-{{ _stage_vars.name }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" spec: SELinuxLevel: "{{ cifmw_test_operator_selinux_level }}" storageClass: "{{ cifmw_test_operator_storage_class }}" diff --git a/roles/test_operator/tasks/cleanup-run.yaml b/roles/test_operator/tasks/cleanup-run.yaml index f3f8c2e5e9..ffe6fa27fb 100644 --- a/roles/test_operator/tasks/cleanup-run.yaml +++ b/roles/test_operator/tasks/cleanup-run.yaml @@ -17,7 +17,7 @@ state: absent api_version: v1 name: "{{ test_operator_crd_name }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" wait: true wait_timeout: 600 diff --git a/roles/test_operator/tasks/collect-logs.yaml b/roles/test_operator/tasks/collect-logs.yaml index a778d4923b..cbb0c6b289 100644 --- a/roles/test_operator/tasks/collect-logs.yaml +++ b/roles/test_operator/tasks/collect-logs.yaml @@ -8,7 +8,7 @@ kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit)}}" context: "{{ cifmw_openshift_context | default(omit)}}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" kind: PersistentVolumeClaim label_selectors: - "instanceName={{ test_operator_instance_name }}" @@ -57,7 +57,7 @@ kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit) }}" context: "{{ cifmw_openshift_context | default(omit) }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" kind: Pod name: "test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}" wait: true @@ -73,7 +73,7 @@ vars: pod_path: mnt/logs-{{ test_operator_instance_name }}-step-{{ index }} ansible.builtin.shell: > - oc cp -n {{ cifmw_test_operator_namespace }} + oc cp -n {{ stage_vars_dict.cifmw_test_operator_namespace }} test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}:{{ pod_path }} {{ cifmw_test_operator_artifacts_basedir }} loop: "{{ logsPVCs.resources }}" diff --git a/roles/test_operator/tasks/run-test-operator-job.yml b/roles/test_operator/tasks/run-test-operator-job.yml index 658ee5970f..3078a45da1 100644 --- a/roles/test_operator/tasks/run-test-operator-job.yml +++ b/roles/test_operator/tasks/run-test-operator-job.yml @@ -56,7 +56,7 @@ kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit) }}" context: "{{ cifmw_openshift_context | default(omit) }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" kind: Pod label_selectors: - "workflowStep={{ [(test_operator_workflow | length) - 1, 0] | max }}" @@ -84,7 +84,7 @@ kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit)}}" context: "{{ cifmw_openshift_context | default(omit) }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" kind: Pod register: pod_list @@ -94,7 +94,7 @@ kubeconfig: "{{ cifmw_openshift_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit) }}" context: "{{ cifmw_openshift_context | default(omit) }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" kind: Pod label_selectors: - "instanceName={{ test_operator_instance_name }}" diff --git a/roles/test_operator/tasks/stages.yml b/roles/test_operator/tasks/stages.yml index b67e948fcf..ce0d1031ba 100644 --- a/roles/test_operator/tasks/stages.yml +++ b/roles/test_operator/tasks/stages.yml @@ -32,7 +32,7 @@ - name: Overwrite global_vars with stage_vars with_dict: "{{ vars | combine(_stage_test_vars) }}" vars: - start_with: cifmw_test_operator_{{ _stage_vars.type }} + start_with: cifmw_test_operator_ when: item.key.startswith(start_with) ansible.builtin.set_fact: stage_vars_dict: "{{ stage_vars_dict | combine({item.key: _stage_test_vars[item.key] | default(lookup('vars', item.key, default=omit)) }) }}" diff --git a/roles/test_operator/tasks/tempest-tests.yml b/roles/test_operator/tasks/tempest-tests.yml index aecb9e591b..676ea84881 100644 --- a/roles/test_operator/tasks/tempest-tests.yml +++ b/roles/test_operator/tasks/tempest-tests.yml @@ -105,7 +105,7 @@ type: Opaque metadata: name: "{{ cifmw_test_operator_controller_priv_key_secret_name }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" data: ssh-privatekey: >- {{ diff --git a/roles/test_operator/tasks/tobiko-tests.yml b/roles/test_operator/tasks/tobiko-tests.yml index 100b62d502..ea389404d8 100644 --- a/roles/test_operator/tasks/tobiko-tests.yml +++ b/roles/test_operator/tasks/tobiko-tests.yml @@ -101,7 +101,7 @@ type: Opaque metadata: name: "{{ stage_vars_dict.cifmw_test_operator_tobiko_kubeconfig_secret }}" - namespace: "{{ cifmw_test_operator_namespace }}" + namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" data: config: "{{ lookup('file', cifmw_openshift_kubeconfig) | b64encode }}" when: not cifmw_test_operator_dry_run | bool From 3a9127276ef04ec22ba0e1f6f668dbc9a1629e89 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Fri, 27 Jun 2025 10:44:04 +0200 Subject: [PATCH 203/480] Set namespace for oc calls Some calls in `roles/cifmw_cephadm/tasks/configure_object.yml` assume that oc commands will be running in the `openstack` namespace, but it is not true in day2 job, during the 2nd reproducer run. Looking at other tasks in this file, it should be executed explicitly in namespace defined via `cifmw_cephadm_ns` variable. --- roles/cifmw_cephadm/tasks/configure_object.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/cifmw_cephadm/tasks/configure_object.yml b/roles/cifmw_cephadm/tasks/configure_object.yml index b06d266e71..87b49f4ed7 100644 --- a/roles/cifmw_cephadm/tasks/configure_object.yml +++ b/roles/cifmw_cephadm/tasks/configure_object.yml @@ -98,7 +98,7 @@ KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" ansible.builtin.shell: | set -euo pipefail - oc exec -t openstackclient -- \ + oc -n {{ cifmw_cephadm_ns }} exec -t openstackclient -- \ openstack endpoint list -f json | \ jq -r '.[] | select(.["Service Name"] == "swift" and .Interface == "public") | .ID' register: uuid_swift_public_ep @@ -109,7 +109,7 @@ KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" ansible.builtin.shell: | set -euo pipefail - oc exec -t openstackclient -- \ + oc -n {{ cifmw_cephadm_ns }} exec -t openstackclient -- \ openstack endpoint list -f json | \ jq -r '.[] | select(.["Service Name"] == "swift" and .Interface == "internal") | .ID' register: uuid_swift_internal_ep From cd09974fbdc3ae6dfb5c31d8ed4a8ba29eb63122 Mon Sep 17 00:00:00 2001 From: bshewale Date: Mon, 16 Jun 2025 16:49:11 +0530 Subject: [PATCH 204/480] Allow setting up 8 OCP nodes So with this change by default it will allow setting up 8 OCP nodes and 512 pods per node(previously it was 1024), previously was limited to 4 nodes. Basically below two parameters responsible here https://docs.redhat.com/en/documentation/openshift_dedicated/4/html/networking/cidr-range-definitions cluster_subnet_v4: "192.168.16.0/20" cluster_host_prefix_v4: "23" 2^(32 - 23) = 512 2^(23-20) = 8 Also it will not increase OCP nodes in a cluster just allow setting up 8 nodes by default and that should cover most of the dev cases by default. If more nodes are needed user can override these vars as per need. Fix: #OSPRH-15223 --- roles/devscripts/vars/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/devscripts/vars/main.yml b/roles/devscripts/vars/main.yml index 3cb58f4872..7b9870b5bb 100644 --- a/roles/devscripts/vars/main.yml +++ b/roles/devscripts/vars/main.yml @@ -41,7 +41,7 @@ cifmw_devscripts_config_defaults: provisioning_network_profile: "Managed" provisioning_network: "172.22.0.0/24" cluster_subnet_v4: "192.168.16.0/20" - cluster_host_prefix_v4: "22" + cluster_host_prefix_v4: "23" service_subnet_v4: "172.30.0.0/16" external_subnet_v4: "192.168.111.0/24" num_masters: 3 From fe9955e773dcb39ed958cd7c17aaf6ad5b914a0f Mon Sep 17 00:00:00 2001 From: Chandan Kumar Date: Fri, 27 Jun 2025 17:12:32 +0530 Subject: [PATCH 205/480] Add pre playbook to copy tcib container config file In watcher-operator, we use custom containers.yamli[1] file to build containers in meta content provider. This pr moves the bits from watcher-operator to ci-framework so that other projects can reuse it. Links: [1]. https://github.com/openstack-k8s-operators/watcher-operator/commit/9e4c3951f7b28c4c175149d675351e0dc3fd882e#diff-57c6783bf06af1bbc1106fee66b30f0808322d79a77006567860245ddcd98d1b Signed-off-by: Chandan Kumar --- .../meta_content_provider/copy_container_files.yaml | 10 ++++++++++ zuul.d/content_provider.yaml | 2 ++ 2 files changed, 12 insertions(+) create mode 100644 ci/playbooks/meta_content_provider/copy_container_files.yaml diff --git a/ci/playbooks/meta_content_provider/copy_container_files.yaml b/ci/playbooks/meta_content_provider/copy_container_files.yaml new file mode 100644 index 0000000000..25ff7a7ee8 --- /dev/null +++ b/ci/playbooks/meta_content_provider/copy_container_files.yaml @@ -0,0 +1,10 @@ +--- +- name: Copy watcher containers.yaml file + hosts: all + tasks: + - name: Copy containers.yaml file + when: cifmw_build_containers_config_file is defined + ansible.builtin.copy: + src: "{{ zuul_project_container_path }}" + dest: "{{ cifmw_build_containers_config_file }}" + remote_src: true diff --git a/zuul.d/content_provider.yaml b/zuul.d/content_provider.yaml index 764d95c686..1171c4ee1c 100644 --- a/zuul.d/content_provider.yaml +++ b/zuul.d/content_provider.yaml @@ -10,6 +10,8 @@ A zuul job to build content (rpms, openstack services containers, operators) from opendev and github changes. timeout: 5000 + pre-run: + - ci/playbooks/meta_content_provider/copy_container_files.yaml run: - ci/playbooks/meta_content_provider/run.yml From 2d94c384a114a070554c94e732b2a1145260eb3f Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Thu, 19 Jun 2025 13:47:13 +0530 Subject: [PATCH 206/480] Add cifmw_cleanup_architecture var to enable/disable cleanup It will help to reuse the ocp environment when architecture deployment failed in pre step. Signed-off-by: Chandan Kumar (raukadah) --- deploy-edpm-reuse.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy-edpm-reuse.yaml b/deploy-edpm-reuse.yaml index d6a3f0cfc1..1550d4390d 100644 --- a/deploy-edpm-reuse.yaml +++ b/deploy-edpm-reuse.yaml @@ -29,6 +29,7 @@ no_log: "{{ cifmw_nolog | default(true) | bool }}" async: "{{ 7200 + cifmw_test_operator_timeout | default(3600) }}" # 2h should be enough to deploy EDPM and rest for tests. poll: 20 + when: cifmw_cleanup_architecture | default(true) | bool delegate_to: controller-0 ansible.builtin.command: cmd: "/home/zuul/cleanup-architecture.sh" From 7fb4b62daab630706ccde1ee1f0aac2efdb717fa Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 3 Jun 2025 12:28:34 +0200 Subject: [PATCH 207/480] Add variable to overwrite NNCP_DNS_SERVER Earlier Ansible was relay on variable that was taken from /etc/ci/env/networking-info.yml which was generated during base job, but it does not include value with "ip4". This commit gives possibility to set custom NNCP_DNS_SERVER value. Signed-off-by: Daniel Pawlik --- ci/playbooks/kuttl/deploy-deps.yaml | 2 +- hooks/playbooks/fetch_compute_facts.yml | 2 +- hooks/playbooks/kuttl_openstack_prep.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/playbooks/kuttl/deploy-deps.yaml b/ci/playbooks/kuttl/deploy-deps.yaml index 4b5109db9c..9036df4090 100644 --- a/ci/playbooks/kuttl/deploy-deps.yaml +++ b/ci/playbooks/kuttl/deploy-deps.yaml @@ -80,7 +80,7 @@ NETWORK_MTU: "{{ crc_ci_bootstrap_networks_out.crc.default.mtu }}" NNCP_DNS_SERVER: >- {{ - crc_ci_bootstrap_networks_out[_crc_hostname].default.ip4 | + cifmw_nncp_dns_server | default(crc_ci_bootstrap_networks_out[_crc_hostname].default.ip) | split('/') | first }} diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index b96c3dee14..92a2acd176 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -68,7 +68,7 @@ NNCP_INTERFACE: "{{ crc_ci_bootstrap_networks_out[_crc_hostname].default.iface }}" NNCP_DNS_SERVER: >- {{ - crc_ci_bootstrap_networks_out[_crc_hostname].default.ip4 | + cifmw_nncp_dns_server | default(crc_ci_bootstrap_networks_out[_crc_hostname].default.ip) | split('/') | first }} diff --git a/hooks/playbooks/kuttl_openstack_prep.yml b/hooks/playbooks/kuttl_openstack_prep.yml index 4488225ace..5e75b904f0 100644 --- a/hooks/playbooks/kuttl_openstack_prep.yml +++ b/hooks/playbooks/kuttl_openstack_prep.yml @@ -29,7 +29,7 @@ NNCP_INTERFACE: "{{ crc_ci_bootstrap_networks_out[_crc_hostname].default.iface }}" NNCP_DNS_SERVER: >- {{ - crc_ci_bootstrap_networks_out[_crc_hostname].default.ip4 | + cifmw_nncp_dns_server | default(crc_ci_bootstrap_networks_out[_crc_hostname].default.ip) | split('/') | first }} From b9417dd6c64ee8d82a30fb3f76448effcc916476 Mon Sep 17 00:00:00 2001 From: Chandan Kumar Date: Fri, 27 Jun 2025 16:19:24 +0530 Subject: [PATCH 208/480] Add missing conditional for Update the go.mod file for no pr case Without this conditional, the meta content provider is failing with following error: ``` msg: | 2025-06-27 06:20:22.612671 | controller | The task includes an option with an undefined variable. The error was: 'operator_api_path' is undefined. 'operator_api_path' is undefined 2025-06-27 06:20:22.612685 | controller | 2025-06-27 06:20:22.612699 | controller | The error appears to be in '/home/zuul-worker/src/github.com/openstack-k8s-operators/ci-framework/roles/operator_build/tasks/build.yml': line 70, column 3, but may 2025-06-27 06:20:22.612713 | controller | be elsewhere in the file depending on the exact syntax problem. 2025-06-27 06:20:22.612740 | controller | 2025-06-27 06:20:22.612753 | controller | The offending line appears to be: 2025-06-27 06:20:22.612766 | controller | 2025-06-27 06:20:22.612779 | controller | 2025-06-27 06:20:22.612792 | controller | - name: "{{ operator.name }} - Update the go.mod file using latest commit if no PR is provided" # noqa: name[template] 2025-06-27 06:20:22.612805 | controller | ^ here 2025-06-27 06:20:22.612818 | controller | We could be wrong, but this one looks like it might be an issue with 2025-06-27 06:20:22.612830 | controller | missing quotes. Always quote template expression brackets when they 2025-06-27 06:20:22.612843 | controller | start a value. For instance: 2025-06-27 06:20:22.612856 | controller | ``` By adding the conditional fixes the issue. Note: it also fixes the same conditional for other tasks. Signed-off-by: Chandan Kumar --- roles/operator_build/tasks/build.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/roles/operator_build/tasks/build.yml b/roles/operator_build/tasks/build.yml index 2588caec77..27eb610d04 100644 --- a/roles/operator_build/tasks/build.yml +++ b/roles/operator_build/tasks/build.yml @@ -29,12 +29,12 @@ operator_base_module: "{{ go_mod_out['content'] | b64decode | regex_search(cifmw_operator_build_org + '/' + operator.name + '/(\\w*)\\s', '\\1') }}" - name: Get the base module name not empty operator_base_module - when: operator_base_module is not none + when: operator_base_module ansible.builtin.set_fact: operator_base_module_name: "{{ operator_base_module | first }}" - name: "{{ operator.name }} - Set default api path" # noqa: name[template] - when: operator_base_module is not none + when: operator_base_module ansible.builtin.set_fact: operator_api_path: "github.com/{{ cifmw_operator_build_org }}/{{ operator.name }}/{{ operator_base_module_name }}" @@ -55,7 +55,7 @@ - operator.name != cifmw_operator_build_meta_name - operator.pr_owner is defined - operator.pr_sha is defined - - operator_base_module is not none + - operator_base_module - name: "{{ operator.name }} - Get latest commit when no PR is provided" # noqa: name[template] command-instead-of-module ansible.builtin.command: @@ -83,6 +83,7 @@ - cifmw_operator_build_meta_build - operator.name != cifmw_operator_build_meta_name - operator.pr_owner is not defined + - operator_base_module - name: Get container image block: From b18133a77cbc9cd0c30245293ce25db1e203bfa9 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 12 Jun 2025 09:10:10 +0200 Subject: [PATCH 209/480] Use role instead of playbooks - 06-deploy-architecture.yml Reason why we move playbook execution to roles was described in pull request [1], but in few words: it would be easier for mainteners to understand playbook/role execution, it is just better to control variables, easier to debug, etc. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/2930 Signed-off-by: Daniel Pawlik --- deploy-edpm.yml | 41 ++- playbooks/06-deploy-architecture.yml | 4 + playbooks/validations.yml | 4 +- roles/ci_lvms_storage/README.md | 14 +- roles/cifmw_setup/defaults/main.yml | 1 + .../cifmw_setup/tasks/deploy_architecture.yml | 269 ++++++++++++++++++ 6 files changed, 320 insertions(+), 13 deletions(-) create mode 100644 roles/cifmw_setup/tasks/deploy_architecture.yml diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 894bcc5715..e2ab177375 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -150,7 +150,40 @@ tags: - edpm -- name: Import VA deployment playbook - ansible.builtin.import_playbook: playbooks/06-deploy-architecture.yml - tags: - - edpm +- name: Deploy VA and validate workflow + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run pre_deploy hooks + when: cifmw_architecture_scenario is defined + vars: + step: pre_deploy + ansible.builtin.import_role: + name: run_hook + + # FIXME:Earlier, where we were using import_playbook, the cifmw_architecture_scenario + # variable was not available in playbooks/06-deploy-architecture.yml, + # but by using import_playbook, the variables are parsed in different way, + # so instead of cifmw_architecture_scenario not being defined, it is defined + # and it is executing additional tasks, which should not. + # Temporary move the end_play here and let's improve the tasks execution + # where tasks execution would be merged into one if the tasks should + # be done on same host. + - name: Early end if not architecture deploy + tags: + - always + when: cifmw_architecture_scenario is not defined + ansible.builtin.meta: end_play + + - name: Run cifmw_setup deploy_architecture + when: cifmw_architecture_scenario is defined + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: deploy_architecture.yml + tags: + - edpm + + - name: Run validations + ansible.builtin.include_role: + name: validations + when: cifmw_execute_validations | default(false) | bool diff --git a/playbooks/06-deploy-architecture.yml b/playbooks/06-deploy-architecture.yml index 0d2e708a01..302e060281 100644 --- a/playbooks/06-deploy-architecture.yml +++ b/playbooks/06-deploy-architecture.yml @@ -1,4 +1,8 @@ --- +# +# NOTE: Playbook migrated to: roles/cifmw_setup/tasks/deploy_architecture.yml +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. +# - name: Deploy VA hosts: "{{ cifmw_target_host | default('localhost') }}" tasks: diff --git a/playbooks/validations.yml b/playbooks/validations.yml index 613eb77bfb..6c0a6d063c 100644 --- a/playbooks/validations.yml +++ b/playbooks/validations.yml @@ -1,5 +1,6 @@ # -# NOTE: Playbook migrated to: roles/cifmw_setup/tasks/hci_deploy.yml:L29-35 & 06-deploy-architecture.yml. +# NOTE: Playbook migrated to: roles/cifmw_setup/tasks/hci_deploy.yml & +# 06-deploy-architecture.yml. # This migration is temporary, and will be further migrated to role. # DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. # @@ -7,7 +8,6 @@ hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: - - name: Run validations ansible.builtin.include_role: name: validations diff --git a/roles/ci_lvms_storage/README.md b/roles/ci_lvms_storage/README.md index 16a895fee6..ca81babfa9 100644 --- a/roles/ci_lvms_storage/README.md +++ b/roles/ci_lvms_storage/README.md @@ -30,18 +30,18 @@ clean and adds to an LVMS cluster. * `cifmw_use_lvms`: (Boolean) Whether or not to use LVMS (default: `false`) If the ci-framework is called and `cifmw_use_lvms` is true, then -the playbooks `06-deploy-architecture.yml` and `06-deploy-edpm.yml` -call the `ci_lvms_storage` role to create a storage class called -`lvms-local-storage` and the `ci_gen_kustomize_values` role will -set the `storageClass` to `lvms-local-storage` in the generated +the tasks in role `roles/cifmw_setup/tasks/deploy_architecture.yml` +and playbook `06-deploy-edpm.yml` call the `ci_lvms_storage` role to create +a storage class called `lvms-local-storage` and the `ci_gen_kustomize_values` +role will set the `storageClass` to `lvms-local-storage` in the generated values.yaml files used to build architecture CRs. The Tempest CR file, created by the `test_operator` role, will also set its `storageClass` value to `lvms-local-storage`. If the ci-framework is called and `cifmw_use_lvms` is false, then the -playbooks `06-deploy-architecture.yml` and `06-deploy-edpm.yml` -call the `ci_local_storage` role to create a storage class called -`local-storage` and the `ci_gen_kustomize_values` role will set +tasks in role `roles/cifmw_setup/tasks/deploy_architecture.yml` and playbook +`06-deploy-edpm.yml` call the `ci_local_storage` role to create a storage class +called `local-storage` and the `ci_gen_kustomize_values` role will set the `storageClass` to `local-storage` in the generated values.yaml files used to build architecture CRs. The Tempest CR file, created by the `test_operator` role, will also set its `storageClass` value to diff --git a/roles/cifmw_setup/defaults/main.yml b/roles/cifmw_setup/defaults/main.yml index 159eaa9a78..74cdfebb7a 100644 --- a/roles/cifmw_setup/defaults/main.yml +++ b/roles/cifmw_setup/defaults/main.yml @@ -1,2 +1,3 @@ --- ansible_user_dir: "{{ lookup('env', 'HOME') }}" +openstack_namespace: openstack diff --git a/roles/cifmw_setup/tasks/deploy_architecture.yml b/roles/cifmw_setup/tasks/deploy_architecture.yml new file mode 100644 index 0000000000..946f13f89a --- /dev/null +++ b/roles/cifmw_setup/tasks/deploy_architecture.yml @@ -0,0 +1,269 @@ +--- +- name: Load Networking Environment Definition + tags: + - always + ansible.builtin.import_role: + name: networking_mapper + tasks_from: load_env_definition.yml + +- name: Fetch network facts + tags: + - always + when: + - "not item.startswith('ocp-')" + ansible.builtin.setup: + gather_subset: network + delegate_facts: true + delegate_to: "{{ item }}" + loop: "{{ groups['all'] }}" + loop_control: + label: "{{ item }}" + +- name: Look for nova migration keypair file + tags: + - edpm_bootstrap + register: _nova_key_file + ansible.builtin.stat: + path: "{{ cifmw_basedir }}/artifacts/nova_migration_key" + +- name: Ensure nova migration keypair details are propagated + tags: + - always + vars: + _ssh_file: >- + {{ + _nova_key_file.stat.path | + default( + (cifmw_basedir, 'artifacts', 'nova_migration_key') | + ansible.builtin.path_join + ) + }} + block: + - name: Create nova migration keypair if does not exists + when: + - not _nova_key_file.stat.exists | default(false) + community.crypto.openssh_keypair: + comment: "nova migration" + path: "{{ _ssh_file }}" + type: "{{ cifmw_ssh_keytype | default('ecdsa') }}" + size: "{{ cifmw_ssh_keysize | default(521) }}" + + - name: Try/catch block + vars: + # We want to match anything like: + # - controller (in Zuul) + # - controller-0.foo.com (FQDN) + # - controller-0 (no FQDN) - compatibility match + _ctl_data: >- + {{ + hostvars | dict2items | + selectattr('key', 'match', '^(controller-0.*|controller)') | + map(attribute='value') | first + }} + _ifaces_vars: >- + {{ + _ctl_data.ansible_interfaces | + map('regex_replace', '^(.*)$', 'ansible_\1') + }} + _controller_host: "{{ _ctl_data.ansible_host }}" + block: + - name: Generate needed facts out of local files + vars: + _ctl_ifaces_vars: >- + {{ + _ctl_data | dict2items | selectattr('key', 'in', _ifaces_vars) + }} + _ipv4_network_data: >- + {{ + _ctl_ifaces_vars | + selectattr('value.ipv4.address', 'defined') | + selectattr('value.ipv4.address', 'equalto', _controller_host) | + map(attribute='value.ipv4') | first | default({}) + }} + _ipv6_network_data: >- + {{ + _ctl_ifaces_vars | + selectattr('value.ipv6.address', 'defined') | + selectattr('value.ipv6.address', 'equalto', _controller_host) | + map(attribute='value.ipv6') | first | default({}) + }} + _ipv4_sshd_ranges: >- + {{ + ( + [cifmw_networking_env_definition.networks.ctlplane.network_v4] + if cifmw_networking_env_definition.networks.ctlplane.network_v4 is defined else [] + ) + + ( + [ + _ipv4_network_data.network + '/' + _ipv4_network_data.prefix + ] + ) if (_ipv4_network_data | length > 0) else [] + }} + _ipv6_sshd_ranges: >- + {{ + ( + [cifmw_networking_env_definition.networks.ctlplane.network_v6] + if cifmw_networking_env_definition.networks.ctlplane.network_v6 is defined else [] + ) + + ( + [ + _ipv6_network_data.network + '/' + _ipv6_network_data.prefix + ] + ) if (_ipv6_network_data | length > 0) else [] + }} + ansible.builtin.set_fact: + cifmw_ci_gen_kustomize_values_ssh_authorizedkeys: >- + {{ lookup('file', '~/.ssh/id_cifw.pub', rstrip=False) }} + cifmw_ci_gen_kustomize_values_ssh_private_key: >- + {{ lookup('file', '~/.ssh/id_cifw', rstrip=False) }} + cifmw_ci_gen_kustomize_values_ssh_public_key: >- + {{ lookup('file', '~/.ssh/id_cifw.pub', rstrip=False) }} + cifmw_ci_gen_kustomize_values_migration_pub_key: >- + {{ lookup('file', _ssh_file ~ '.pub', rstrip=False)}} + cifmw_ci_gen_kustomize_values_migration_priv_key: >- + {{ lookup('file', _ssh_file, rstrip=False) }} + cifmw_ci_gen_kustomize_values_sshd_ranges: >- + {{ + _ipv4_sshd_ranges + _ipv6_sshd_ranges + }} + rescue: + - name: Debug _ctl_data + ansible.builtin.debug: + var: _ctl_data + + - name: Debug _ifaces_vars + ansible.builtin.debug: + var: _ifaces_vars + + - name: Fail for good + ansible.builtin.fail: + msg: >- + Error detected. Check debugging output above. + +- name: Set cifmw_architecture_automation_file if not set before + when: cifmw_architecture_automation_file is not defined + ansible.builtin.set_fact: + cifmw_architecture_automation_file: >- + {{ + ( + cifmw_architecture_repo | default(ansible_user_dir+'/src/github.com/openstack-k8s-operators/architecture'), + 'automation/vars', + cifmw_architecture_scenario~'.yaml' + ) | ansible.builtin.path_join + }} + +- name: Load architecture automation file + tags: + - edpm_deploy + register: _automation + ansible.builtin.slurp: + path: "{{ cifmw_architecture_automation_file }}" + +- name: Prepare automation data + tags: + - edpm_deploy + vars: + _parsed: "{{ _automation.content | b64decode | from_yaml }}" + ansible.builtin.set_fact: + cifmw_deploy_architecture_steps: >- + {{ _parsed['vas'][cifmw_architecture_scenario] }} + +- name: Check requirements + tags: + - edpm_bootstrap + ansible.builtin.import_role: + name: kustomize_deploy + tasks_from: check_requirements.yml + +- name: Reduce OCP cluster size in architecture + when: + - groups['ocps'] | length == 1 + ansible.builtin.import_role: + name: kustomize_deploy + tasks_from: reduce_ocp_cluster.yml + tags: + - edpm_bootstrap + +- name: Configure Storage Class + ansible.builtin.import_role: + name: ci_local_storage + when: not cifmw_use_lvms | default(false) + tags: + - storage + - edpm_bootstrap + +- name: Deploy OSP operators + ansible.builtin.import_role: + name: kustomize_deploy + tasks_from: install_operators.yml + tags: + - operator + - edpm_bootstrap + +- name: Update containers in deployed OSP operators + vars: + cifmw_update_containers_metadata: controlplane + ansible.builtin.include_role: + name: update_containers + tags: + - update_containers + - edpm_bootstrap + when: cifmw_ci_gen_kustomize_values_deployment_version is not defined + +- name: Update containers in deployed OSP operators using set_openstack_containers role + when: + - cifmw_set_openstack_containers | default(false) | bool + - cifmw_ci_gen_kustomize_values_deployment_version is not defined + ansible.builtin.include_role: + name: set_openstack_containers + tags: + - set_openstack_containers + - edpm_bootstrap + +- name: Configure LVMS Storage Class + ansible.builtin.include_role: + name: ci_lvms_storage + when: cifmw_use_lvms | default(false) + tags: + - storage + - edpm_bootstrap + +- name: Execute deployment steps + tags: + - edpm_deploy + ansible.builtin.include_role: + name: kustomize_deploy + tasks_from: execute_step.yml + apply: + tags: + - edpm_deploy + loop: "{{ cifmw_deploy_architecture_steps.stages }}" + loop_control: + label: "{{ stage.path }}" + loop_var: stage + index_var: stage_id + +- name: Extract and install OpenStackControlplane CA + ansible.builtin.import_role: + role: install_openstack_ca + tags: + - openstack_ca + - edpm_post + +- name: Run nova host discover process + tags: + - edpm_post + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: >- + oc rsh + -n {{ openstack_namespace }} + nova-cell0-conductor-0 + nova-manage cell_v2 discover_hosts --verbose + +- name: Run post_deploy hooks + vars: + step: post_deploy + ansible.builtin.import_role: + name: run_hook From 16899b14c977acbf5066d35ff1d5161b1c034e0b Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 8 Jul 2025 12:57:18 +0200 Subject: [PATCH 210/480] Trigger documentation Github action job when "recheck" provided Currently, when the documentation job fails, it is not possible to re-trigger the GH action to verify again the commit. Let's add a condition, that would trigger the CI job when "recheck" comment is provided. Signed-off-by: Daniel Pawlik --- .github/workflows/documentation.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 2382fa2cf9..c51e6c6367 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -14,6 +14,7 @@ on: # noqa: yaml[truthy] jobs: build-and-check: runs-on: ubuntu-latest + if: github.event_name == 'pull_request_target' || github.event_name == 'pull_request' || github.event.comment.body == 'recheck' steps: - name: Checkout uses: actions/checkout@v4 From 08bb82e738fdade894cf146befa04fdb3ca14dc2 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Thu, 3 Jul 2025 09:25:17 +0300 Subject: [PATCH 211/480] Allow setting container name prefix Allows setting a prefix for container names in the update_containers role. This is useful for deploying older versions of containers that may have different naming conventions. --- roles/update_containers/README.md | 1 + roles/update_containers/defaults/main.yml | 1 + .../templates/update_containers.j2 | 148 +++++++++--------- 3 files changed, 76 insertions(+), 74 deletions(-) diff --git a/roles/update_containers/README.md b/roles/update_containers/README.md index 07920bcb8d..0df26d7fe2 100644 --- a/roles/update_containers/README.md +++ b/roles/update_containers/README.md @@ -13,6 +13,7 @@ If apply, please explain the privilege escalation done in this role. * `cifmw_update_containers_base_dir`: The base directory of update_containers role. Default is "ansible_user_dir ~ '/ci-framework-data')". * `cifmw_update_containers_dest_path`: The destination file path to create update containers CR file. * `cifmw_update_containers_registry`: The container registry to pull containers from. Default to "quay.io". +* `cifmw_update_containers_name_prefix`: The container name prefix. Default to "openstack". * `cifmw_update_containers_org`: The container registry namespace to pull container from. Default to `podified-antelope-centos9` * `cifmw_update_containers_tag`: The container tag. Default to "current-podified". * `cifmw_update_containers_cindervolumes`: The names of the cinder volumes prefix. Default to `[]`. diff --git a/roles/update_containers/defaults/main.yml b/roles/update_containers/defaults/main.yml index 95142c4136..e7eeccefde 100644 --- a/roles/update_containers/defaults/main.yml +++ b/roles/update_containers/defaults/main.yml @@ -37,6 +37,7 @@ cifmw_update_containers_dest_path: >- cifmw_update_containers_registry: "quay.io" cifmw_update_containers_org: "podified-antelope-centos9" cifmw_update_containers_tag: "current-podified" +cifmw_update_containers_name_prefix: "openstack" cifmw_update_containers_openstack: false cifmw_update_containers_rollback: false cifmw_update_containers_cindervolumes: diff --git a/roles/update_containers/templates/update_containers.j2 b/roles/update_containers/templates/update_containers.j2 index 6daeda06fa..1b73aa774c 100644 --- a/roles/update_containers/templates/update_containers.j2 +++ b/roles/update_containers/templates/update_containers.j2 @@ -6,87 +6,87 @@ metadata: spec: customContainerImages: {% if cifmw_update_containers_openstack | bool %} - aodhAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-aodh-api:{{ cifmw_update_containers_tag }} - aodhEvaluatorImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-aodh-evaluator:{{ cifmw_update_containers_tag }} - aodhListenerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-aodh-listener:{{ cifmw_update_containers_tag }} - aodhNotifierImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-aodh-notifier:{{ cifmw_update_containers_tag }} - barbicanAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-barbican-api:{{ cifmw_update_containers_barbican_custom_tag | default(cifmw_update_containers_tag) }} - barbicanKeystoneListenerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-barbican-keystone-listener:{{ cifmw_update_containers_tag }} - barbicanWorkerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-barbican-worker:{{ cifmw_update_containers_barbican_custom_tag | default(cifmw_update_containers_tag) }} - ceilometerCentralImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ceilometer-central:{{ cifmw_update_containers_tag }} - ceilometerComputeImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ceilometer-compute:{{ cifmw_update_containers_tag }} - ceilometerIpmiImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ceilometer-ipmi:{{ cifmw_update_containers_tag }} - ceilometerNotificationImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ceilometer-notification:{{ cifmw_update_containers_tag }} - cinderAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-cinder-api:{{ cifmw_update_containers_tag }} - cinderBackupImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-cinder-backup:{{ cifmw_update_containers_tag }} - cinderSchedulerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-cinder-scheduler:{{ cifmw_update_containers_tag }} - cinderVolumeImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-cinder-volume:{{ cifmw_update_containers_tag }} - designateAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-designate-api:{{ cifmw_update_containers_tag }} - designateBackendbind9Image: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-designate-backend-bind9:{{ cifmw_update_containers_tag }} - designateCentralImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-designate-central:{{ cifmw_update_containers_tag }} - designateMdnsImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-designate-mdns:{{ cifmw_update_containers_tag }} - designateProducerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-designate-producer:{{ cifmw_update_containers_tag }} - designateUnboundImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-unbound:{{ cifmw_update_containers_tag }} - designateWorkerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-designate-worker:{{ cifmw_update_containers_tag }} - edpmFrrImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-frr:{{ cifmw_update_containers_tag }} - edpmIscsidImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-iscsid:{{ cifmw_update_containers_tag }} - edpmLogrotateCrondImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-cron:{{ cifmw_update_containers_tag }} - edpmMultipathdImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-multipathd:{{ cifmw_update_containers_tag }} - edpmNeutronDhcpAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-neutron-dhcp-agent:{{ cifmw_update_containers_tag }} - edpmNeutronMetadataAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-neutron-metadata-agent-ovn:{{ cifmw_update_containers_tag }} - edpmNeutronOvnAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-neutron-ovn-agent:{{ cifmw_update_containers_tag }} - edpmNeutronSriovAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-neutron-sriov-agent:{{ cifmw_update_containers_tag }} - edpmOvnBgpAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ovn-bgp-agent:{{ cifmw_update_containers_tag }} - glanceAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-glance-api:{{ cifmw_update_containers_tag }} - heatAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-heat-api:{{ cifmw_update_containers_tag }} - heatCfnapiImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-heat-api-cfn:{{ cifmw_update_containers_tag }} - heatEngineImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-heat-engine:{{ cifmw_update_containers_tag }} - horizonImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-horizon:{{ cifmw_update_containers_tag }} - infraDnsmasqImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-neutron-server:{{ cifmw_update_containers_tag }} - infraMemcachedImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-memcached:{{ cifmw_update_containers_tag }} - ironicAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ironic-api:{{ cifmw_update_containers_tag }} - ironicConductorImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ironic-conductor:{{ cifmw_update_containers_tag }} - ironicInspectorImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ironic-inspector:{{ cifmw_update_containers_tag }} - ironicNeutronAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ironic-neutron-agent:{{ cifmw_update_containers_tag }} - ironicPxeImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ironic-pxe:{{ cifmw_update_containers_tag }} - keystoneAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-keystone:{{ cifmw_update_containers_tag }} - manilaAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-manila-api:{{ cifmw_update_containers_tag }} - manilaSchedulerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-manila-scheduler:{{ cifmw_update_containers_tag }} - manilaShareImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-manila-share:{{ cifmw_update_containers_tag }} - mariadbImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-mariadb:{{ cifmw_update_containers_tag }} - neutronAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-neutron-server:{{ cifmw_update_containers_tag }} - novaAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-nova-api:{{ cifmw_update_containers_tag }} - novaComputeImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-nova-compute:{{ cifmw_update_containers_tag }} - novaConductorImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-nova-conductor:{{ cifmw_update_containers_tag }} - novaNovncImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-nova-novncproxy:{{ cifmw_update_containers_tag }} - novaSchedulerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-nova-scheduler:{{ cifmw_update_containers_tag }} - octaviaAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-octavia-api:{{ cifmw_update_containers_tag }} - octaviaHealthmanagerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-octavia-health-manager:{{ cifmw_update_containers_tag }} - octaviaHousekeepingImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-octavia-housekeeping:{{ cifmw_update_containers_tag }} - octaviaWorkerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-octavia-worker:{{ cifmw_update_containers_tag }} - openstackClientImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-openstackclient:{{ cifmw_update_containers_tag }} - ovnControllerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ovn-controller:{{ cifmw_update_containers_tag }} - ovnControllerOvsImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ovn-base:{{ cifmw_update_containers_tag }} - ovnNbDbclusterImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ovn-nb-db-server:{{ cifmw_update_containers_tag }} - ovnNorthdImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ovn-northd:{{ cifmw_update_containers_tag }} - ovnSbDbclusterImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-ovn-sb-db-server:{{ cifmw_update_containers_tag }} - placementAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-placement-api:{{ cifmw_update_containers_tag }} - rabbitmqImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-rabbitmq:{{ cifmw_update_containers_tag }} - swiftAccountImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-swift-account:{{ cifmw_update_containers_tag }} - swiftContainerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-swift-container:{{ cifmw_update_containers_tag }} - swiftObjectImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-swift-object:{{ cifmw_update_containers_tag }} - swiftProxyImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-swift-proxy-server:{{ cifmw_update_containers_tag }} - testTempestImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-tempest-all:{{ cifmw_update_containers_tag }} + aodhAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-aodh-api:{{ cifmw_update_containers_tag }} + aodhEvaluatorImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-aodh-evaluator:{{ cifmw_update_containers_tag }} + aodhListenerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-aodh-listener:{{ cifmw_update_containers_tag }} + aodhNotifierImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-aodh-notifier:{{ cifmw_update_containers_tag }} + barbicanAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-barbican-api:{{ cifmw_update_containers_barbican_custom_tag | default(cifmw_update_containers_tag) }} + barbicanKeystoneListenerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-barbican-keystone-listener:{{ cifmw_update_containers_tag }} + barbicanWorkerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-barbican-worker:{{ cifmw_update_containers_barbican_custom_tag | default(cifmw_update_containers_tag) }} + ceilometerCentralImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ceilometer-central:{{ cifmw_update_containers_tag }} + ceilometerComputeImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ceilometer-compute:{{ cifmw_update_containers_tag }} + ceilometerIpmiImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ceilometer-ipmi:{{ cifmw_update_containers_tag }} + ceilometerNotificationImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ceilometer-notification:{{ cifmw_update_containers_tag }} + cinderAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-cinder-api:{{ cifmw_update_containers_tag }} + cinderBackupImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-cinder-backup:{{ cifmw_update_containers_tag }} + cinderSchedulerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-cinder-scheduler:{{ cifmw_update_containers_tag }} + cinderVolumeImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-cinder-volume:{{ cifmw_update_containers_tag }} + designateAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-designate-api:{{ cifmw_update_containers_tag }} + designateBackendbind9Image: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-designate-backend-bind9:{{ cifmw_update_containers_tag }} + designateCentralImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-designate-central:{{ cifmw_update_containers_tag }} + designateMdnsImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-designate-mdns:{{ cifmw_update_containers_tag }} + designateProducerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-designate-producer:{{ cifmw_update_containers_tag }} + designateUnboundImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-unbound:{{ cifmw_update_containers_tag }} + designateWorkerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-designate-worker:{{ cifmw_update_containers_tag }} + edpmFrrImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-frr:{{ cifmw_update_containers_tag }} + edpmIscsidImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-iscsid:{{ cifmw_update_containers_tag }} + edpmLogrotateCrondImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-cron:{{ cifmw_update_containers_tag }} + edpmMultipathdImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-multipathd:{{ cifmw_update_containers_tag }} + edpmNeutronDhcpAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-neutron-dhcp-agent:{{ cifmw_update_containers_tag }} + edpmNeutronMetadataAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-neutron-metadata-agent-ovn:{{ cifmw_update_containers_tag }} + edpmNeutronOvnAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-neutron-ovn-agent:{{ cifmw_update_containers_tag }} + edpmNeutronSriovAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-neutron-sriov-agent:{{ cifmw_update_containers_tag }} + edpmOvnBgpAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ovn-bgp-agent:{{ cifmw_update_containers_tag }} + glanceAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-glance-api:{{ cifmw_update_containers_tag }} + heatAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-heat-api:{{ cifmw_update_containers_tag }} + heatCfnapiImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-heat-api-cfn:{{ cifmw_update_containers_tag }} + heatEngineImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-heat-engine:{{ cifmw_update_containers_tag }} + horizonImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-horizon:{{ cifmw_update_containers_tag }} + infraDnsmasqImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-neutron-server:{{ cifmw_update_containers_tag }} + infraMemcachedImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-memcached:{{ cifmw_update_containers_tag }} + ironicAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ironic-api:{{ cifmw_update_containers_tag }} + ironicConductorImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ironic-conductor:{{ cifmw_update_containers_tag }} + ironicInspectorImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ironic-inspector:{{ cifmw_update_containers_tag }} + ironicNeutronAgentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ironic-neutron-agent:{{ cifmw_update_containers_tag }} + ironicPxeImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ironic-pxe:{{ cifmw_update_containers_tag }} + keystoneAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-keystone:{{ cifmw_update_containers_tag }} + manilaAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-manila-api:{{ cifmw_update_containers_tag }} + manilaSchedulerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-manila-scheduler:{{ cifmw_update_containers_tag }} + manilaShareImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-manila-share:{{ cifmw_update_containers_tag }} + mariadbImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-mariadb:{{ cifmw_update_containers_tag }} + neutronAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-neutron-server:{{ cifmw_update_containers_tag }} + novaAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-nova-api:{{ cifmw_update_containers_tag }} + novaComputeImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-nova-compute:{{ cifmw_update_containers_tag }} + novaConductorImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-nova-conductor:{{ cifmw_update_containers_tag }} + novaNovncImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-nova-novncproxy:{{ cifmw_update_containers_tag }} + novaSchedulerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-nova-scheduler:{{ cifmw_update_containers_tag }} + octaviaAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-octavia-api:{{ cifmw_update_containers_tag }} + octaviaHealthmanagerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-octavia-health-manager:{{ cifmw_update_containers_tag }} + octaviaHousekeepingImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-octavia-housekeeping:{{ cifmw_update_containers_tag }} + octaviaWorkerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-octavia-worker:{{ cifmw_update_containers_tag }} + openstackClientImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-openstackclient:{{ cifmw_update_containers_tag }} + ovnControllerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ovn-controller:{{ cifmw_update_containers_tag }} + ovnControllerOvsImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ovn-base:{{ cifmw_update_containers_tag }} + ovnNbDbclusterImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ovn-nb-db-server:{{ cifmw_update_containers_tag }} + ovnNorthdImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ovn-northd:{{ cifmw_update_containers_tag }} + ovnSbDbclusterImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-ovn-sb-db-server:{{ cifmw_update_containers_tag }} + placementAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-placement-api:{{ cifmw_update_containers_tag }} + rabbitmqImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-rabbitmq:{{ cifmw_update_containers_tag }} + swiftAccountImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-swift-account:{{ cifmw_update_containers_tag }} + swiftContainerImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-swift-container:{{ cifmw_update_containers_tag }} + swiftObjectImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-swift-object:{{ cifmw_update_containers_tag }} + swiftProxyImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-swift-proxy-server:{{ cifmw_update_containers_tag }} + testTempestImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-tempest-all:{{ cifmw_update_containers_tag }} {% if cifmw_update_containers_cindervolumes | length > 0 %} cinderVolumeImages: {% for vol in cifmw_update_containers_cindervolumes %} - {{ vol }}: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-cinder-volume:{{ cifmw_update_containers_tag }} + {{ vol }}: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-cinder-volume:{{ cifmw_update_containers_tag }} {% endfor %} {% endif %} {% if cifmw_update_containers_manilashares | length > 0 %} manilaShareImages: {% for shares in cifmw_update_containers_manilashares %} - {{ shares }}: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-manila-share:{{ cifmw_update_containers_tag }} + {{ shares }}: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-manila-share:{{ cifmw_update_containers_tag }} {% endfor %} {% endif %} {% endif %} @@ -106,5 +106,5 @@ spec: edpmNodeExporterImage: {{ cifmw_update_containers_edpmnodeexporterimage }} {% endif %} {% if cifmw_update_containers_agentimage is defined %} - agentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/openstack-baremetal-operator-agent:{{ cifmw_update_containers_tag }} + agentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-baremetal-operator-agent:{{ cifmw_update_containers_tag }} {% endif %} From f7088229079ee566232c6a492fa8f66f01c15efa Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 7 Jul 2025 17:36:06 +0200 Subject: [PATCH 212/480] Add docs for Ansible unit tests Docs will describe how to run unit tests for our modules using ansible-test tool. Signed-off-by: Daniel Pawlik --- docs/source/development/03_ansible_test.md | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 docs/source/development/03_ansible_test.md diff --git a/docs/source/development/03_ansible_test.md b/docs/source/development/03_ansible_test.md new file mode 100644 index 0000000000..c7dbf54e86 --- /dev/null +++ b/docs/source/development/03_ansible_test.md @@ -0,0 +1,33 @@ +# Run ansible-tests + +Most of the modules have unit jobs to verify if functions +returns what they should to avoid potential errors after modification. + +## Testing + +The Ansible units job tests are located in `tests/unit/modules/`. +To run the tests, follow the guide: + +```shell +podman run -it centos:stream9 bash + +### inside the container ### + +# install basic deps +yum install -y git make sudo python3.11-pip + +# clone CI framework +git clone https://github.com/openstack-k8s-operators/ci-framework && cd ci-framework + +# prepare venv dir +make setup_tests + +# source venv +source $HOME/test-python/bin/activate + +# install test-requirements.txt via pip +pip3 install -r test-requirements.txt + +# run script that execute ansible tests +bash scripts/run_ansible_test +``` From b7db8d232d45af3c82537ae1fd4af6e93e1f0115 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 30 Jun 2025 13:42:22 +0200 Subject: [PATCH 213/480] Improve warning comment in playbooks The comment should better warn of future plans. Signed-off-by: Daniel Pawlik --- playbooks/01-bootstrap.yml | 2 +- playbooks/02-infra.yml | 2 +- playbooks/03-build-packages.yml | 2 +- playbooks/04-build-containers.yml | 2 +- playbooks/05-build-operators.yml | 2 +- playbooks/06-deploy-architecture.yml | 2 +- playbooks/06-deploy-edpm.yml | 2 +- playbooks/07-admin-setup.yml | 2 +- playbooks/08-run-tests.yml | 2 +- playbooks/09-compliance.yml | 2 +- playbooks/nfs.yml | 2 +- playbooks/validations.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/playbooks/01-bootstrap.yml b/playbooks/01-bootstrap.yml index 42697c2ceb..912aefd9a2 100644 --- a/playbooks/01-bootstrap.yml +++ b/playbooks/01-bootstrap.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: cifmw_setup/tasks/bootstrap.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE. # - name: Bootstrap playbook hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/02-infra.yml b/playbooks/02-infra.yml index 61b66abff1..7042de3211 100644 --- a/playbooks/02-infra.yml +++ b/playbooks/02-infra.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: cifmw_setup/tasks/infra.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Run pre_infra hooks hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/03-build-packages.yml b/playbooks/03-build-packages.yml index 44fdc2dda2..32ab5def16 100644 --- a/playbooks/03-build-packages.yml +++ b/playbooks/03-build-packages.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: cifmw_setup/build_packages.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Build package playbook hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/04-build-containers.yml b/playbooks/04-build-containers.yml index e990a0b8ad..d943089a67 100644 --- a/playbooks/04-build-containers.yml +++ b/playbooks/04-build-containers.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: cifmw_setup/build_containers.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Build container playbook hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/05-build-operators.yml b/playbooks/05-build-operators.yml index 1de74ad195..a058ebd0ef 100644 --- a/playbooks/05-build-operators.yml +++ b/playbooks/05-build-operators.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: cifmw_setup/tasks/build_operators.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Build operators playbook hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/06-deploy-architecture.yml b/playbooks/06-deploy-architecture.yml index 302e060281..41e997c97b 100644 --- a/playbooks/06-deploy-architecture.yml +++ b/playbooks/06-deploy-architecture.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: roles/cifmw_setup/tasks/deploy_architecture.yml -# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE. # - name: Deploy VA hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/06-deploy-edpm.yml b/playbooks/06-deploy-edpm.yml index c3f210514f..75eb8c9f9b 100644 --- a/playbooks/06-deploy-edpm.yml +++ b/playbooks/06-deploy-edpm.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: cifmw_setup/tasks/deploy-edpm.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Deploy podified control plane hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/07-admin-setup.yml b/playbooks/07-admin-setup.yml index b3f67cee77..03ed112a32 100644 --- a/playbooks/07-admin-setup.yml +++ b/playbooks/07-admin-setup.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: cifmw_setup/tasks/admin_setup.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Post-deployment admin setup steps hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/08-run-tests.yml b/playbooks/08-run-tests.yml index f087a12338..4004d66299 100644 --- a/playbooks/08-run-tests.yml +++ b/playbooks/08-run-tests.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: cifmw_setup/tasks/run_tests.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: "Test playbook" hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/09-compliance.yml b/playbooks/09-compliance.yml index 6378a51fd2..2cb51525e4 100644 --- a/playbooks/09-compliance.yml +++ b/playbooks/09-compliance.yml @@ -1,7 +1,7 @@ --- # # NOTE: Playbook migrated to: deploy-edpm.yml#L96-119. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Run operators compliance scans hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/nfs.yml b/playbooks/nfs.yml index 72932bb8bf..3789b0a763 100644 --- a/playbooks/nfs.yml +++ b/playbooks/nfs.yml @@ -16,7 +16,7 @@ # # NOTE: Playbook migrated to: roles/cifmw_nfs/tasks/main.yml. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Deploy an NFS server diff --git a/playbooks/validations.yml b/playbooks/validations.yml index 6c0a6d063c..9d115404e5 100644 --- a/playbooks/validations.yml +++ b/playbooks/validations.yml @@ -2,7 +2,7 @@ # NOTE: Playbook migrated to: roles/cifmw_setup/tasks/hci_deploy.yml & # 06-deploy-architecture.yml. # This migration is temporary, and will be further migrated to role. -# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE.. # - name: Execute the validations role hosts: "{{ cifmw_target_host | default('localhost') }}" From 436d25544cdd275b43033a2e816491eb2ab377cc Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 24 Jun 2025 14:11:53 +0200 Subject: [PATCH 214/480] Improve molecule documentation The documentation before was very basic and it does not bring important information, how new community member can run the molecule tests locally. Signed-off-by: Daniel Pawlik --- docs/source/development/01_guidelines.md | 3 +- docs/source/development/01_nested_crc.md | 258 +++++++++++++++++++++++ docs/source/development/02_molecule.md | 109 +++++++++- 3 files changed, 358 insertions(+), 12 deletions(-) create mode 100644 docs/source/development/01_nested_crc.md diff --git a/docs/source/development/01_guidelines.md b/docs/source/development/01_guidelines.md index 4cf4f06552..b988072aff 100644 --- a/docs/source/development/01_guidelines.md +++ b/docs/source/development/01_guidelines.md @@ -57,7 +57,8 @@ module. For the rest, I'll use `import_*`. ### Ansible role Please take the time to ensure [molecule tests](./02_molecule.md) are present -and cover as many corner cases as possible. +and cover as many corner cases as possible. That would require to setup +your local environment, which can be created using [guide](./01_nested_crc.md) ### Ansible custom plugins diff --git a/docs/source/development/01_nested_crc.md b/docs/source/development/01_nested_crc.md new file mode 100644 index 0000000000..cd803611e3 --- /dev/null +++ b/docs/source/development/01_nested_crc.md @@ -0,0 +1,258 @@ +# Deploy local CRC VM + +## Local tests + +If you would like to run the molecule tests locally, you should have already +deployed VM with CRC. So far, in many places it is required to have `zuul` as a +main user. Below there would be an example how to deploy local CRC node +and a simply script, how to run example molecule test. + +### Setup the CRC node + +Here, we assume that you already create a VM that "fits" [CRC requirements](https://crc.dev/docs/installing/#_for_openshift_container_platform). +You should be aware, that some molecule tests would spawn few more virtual +machines on the same host (nested VMs), so it would be recommended to +deploy CRC on VM with minimum hardware: + +- 8 vCPUs +- 18 GB RAM +- 100 GB disk space +- CentOS 9 stream or RHEL 9 +- main user should be `zuul` (currently it is IMPORTANT) + +To setup a CRC on the node, you need to have [pull-secret.txt](https://cloud.redhat.com/openshift/create/local). + +You can continue deploy CRC using the [guide](https://crc.dev/docs/installing/), +or run Ansible tool described below. + +#### Automated way to deploy CRC + +Set required variables then run below script to setup the CRC: + +```shell +# Set important variables +CRC_VM_IP='' +PULL_SECRET='' + +# Install required packages +sudo dnf install -y git ansible-core + +# Clone sf-infra repo +git clone https://github.com/softwarefactory-project/sf-infra +cd sf-infra + +# Setup inventory file +cat << EOF > inventory.yaml +--- +all: + hosts: + crc.dev: + ansible_port: 22 + ansible_host: ${CRC_VM_IP} + ansible_user: zuul + vars: + crc_parameters: "--memory 14336 --disk-size 80 --cpus 6" + openshift_pull_secret: | + ${PULL_SECRET} +EOF + +# Create playbook +cat << EOF > crc-deploy.yaml +- name: Deploy CRC + hosts: crc.dev + tasks: + - name: Fail when crc_version is not set or openshift_pull_secret is not set + ansible.builtin.fail: + when: + - crc_version is not defined + - openshift_pull_secret is not defined + + - name: Ensure cloud init is installed and is running + ansible.builtin.include_role: + name: next-gen/crc-image + tasks_from: prepare_vm.yaml + + - name: Enable nested virt, install other kernel and configure other packages + ansible.builtin.include_role: + name: next-gen/crc-image + tasks_from: configure_vm.yaml + + - name: "Run CRC {{ crc_version }} deployment" + ansible.builtin.include_role: + name: extra/crc + + - name: Ensure cloud init is installed and snapshot would be able to boot + ansible.builtin.include_role: + name: next-gen/crc-image + tasks_from: post_vm.yaml +EOF + +# Run Ansible to deploy CRC +ansible-playbook -i inventory.yaml \ + -e "modify_etcd=false" \ + -e "extracted_crc=false" \ + -e "nested_crc=true" \ + -e "crc_version=2.48.0" \ + crc-deploy.yaml + +``` + +Helpful tip: +CRC deployment took a while, so it is good to stop the virtual machine (VM), +make a backup of the VM disk qcow2 in safe place. It would be helpful when +you want to make a CRC VM from scratch, all necessary files would be already +downloaded, so you will save time. + +To start CRC after VM "shutdown", just execute: + +```shell +# Set important variables +CRC_VM_IP='' + +# Setup inventory file +cat << EOF > inventory.yaml +--- +all: + hosts: + crc.dev: + ansible_port: 22 + ansible_host: ${CRC_VM_IP} + ansible_user: zuul + vars: + crc_parameters: "--memory 14336 --disk-size 80 --cpus 6" +EOF + +cat << EOF > start-crc.yaml +- hosts: crc.dev + tasks: + - name: Start crc + block: + - name: Execute crc start command + shell: | + /usr/local/bin/crc start {{ crc_parameters }} &> ~/crc-start.log + register: _crc_start_status + retries: 3 + delay: 30 + until: _crc_start_status.rc != 1 + + - name: Show available nodes + shell: | + /usr/bin/kubectl get nodes +EOF +``` + +#### Enable OpenShift Console + +Sometimes, it is needed to check how the OpenShift is working via Web interface. +In that case, we can enable such feature in CRC nested, but executing playbook: + +```shell +--- +# FROM: https://github.com/softwarefactory-project/sf-infra/blob/master/roles/extra/crc/tasks/console.yaml +- name: Enable console + hosts: crc.dev + tasks: + - name: Install required packages + become: true + ansible.builtin.package: + name: + - haproxy + - policycoreutils-python-utils + state: present + + - name: Get CRC ip address + ansible.builtin.shell: | + crc ip + register: _crc_ip + + - name: Get domain + ansible.builtin.shell: | + oc get ingresses.config/cluster -o jsonpath={.spec.domain} + register: _crc_domain + + # From https://crc.dev/crc/#setting-up-remote-server_gsg + - name: Set SELinux + become: true + community.general.seport: + ports: 6443 + proto: tcp + setype: http_port_t + state: present + + - name: Create haproxy config + become: true + ansible.builtin.copy: + content: | + global + log /dev/log local0 + + defaults + balance roundrobin + log global + maxconn 100 + mode tcp + timeout connect 5s + timeout client 500s + timeout server 500s + + listen apps + bind 0.0.0.0:80 + server crcvm {{ _crc_ip.stdout }}:80 check + + listen apps_ssl + bind 0.0.0.0:443 + server crcvm {{ _crc_ip.stdout }}:443 check + + listen api + bind 0.0.0.0:6443 + server crcvm {{ _crc_ip.stdout }}:6443 check + dest: /etc/haproxy/haproxy.cfg + register: haproxy_status + + - name: Restart service + become: true + ansible.builtin.systemd: + name: haproxy + state: restarted + enabled: true + daemon_reload: true + when: haproxy_status.changed + + - name: Generate local machine etc hosts template + ansible.builtin.copy: + content: > + # Generate /etc/host entry. + + echo -e "Run this on your machine\n\n" + + echo "$(ip route get 1.2.3.4 | awk '{print $7}' | tr -d '\n') + console-openshift-console.{{ _crc_domain.stdout }} + api.crc.testing canary-openshift-ingress-canary.{{ _crc_domain.stdout }} + default-route-openshift-image-registry.{{ _crc_domain.stdout }} + downloads-openshift-console.{{ _crc_domain.stdout }} + oauth-openshift.{{ _crc_domain.stdout }} {{ _crc_domain.stdout }} | sudo tee -a /etc/hosts" + + echo -e "\nNow the console is available at this address: https://console-openshift-console.apps-crc.testing/" + dest: console-access.sh + +``` + +Then, execute a script on the `crc` VM: + +```shell +./console-access.sh +``` + +It should create entries in `/etc/hosts`. It is not needed on `CRC` VM, but +you need to copy it to your local (laptop) `/etc/hosts`. +Example how it should look like: + +```shell +CRC_VM_IP='' +cat << EOF | sudo tee -a /etc/hosts +$CRC_VM_IP console-openshift-console.apps-crc.testing api.crc.testing canary-openshift-ingress-canary.apps-crc.testing default-route-openshift-image-registry.apps-crc.testing downloads-openshift-console.apps-crc.testing oauth-openshift.apps-crc.testing apps-crc.testing +EOF +``` + +After that operation, the OpenShift console should be available on this +address: [https://console-openshift-console.apps-crc.testing/](https://console-openshift-console.apps-crc.testing/) diff --git a/docs/source/development/02_molecule.md b/docs/source/development/02_molecule.md index c21a8cb78c..638cdecfd4 100644 --- a/docs/source/development/02_molecule.md +++ b/docs/source/development/02_molecule.md @@ -22,18 +22,105 @@ For example if we need to set a timeout to the job `cifmw-molecule-rhol_crc` the These directives will be merged with the job definition created in the script [scripts/create_role_molecule.py](https://github.com/openstack-k8s-operators/ci-framework/blob/main/scripts/create_role_molecule.py) +## Regenerate molecule job + +Once you have edited the script, re-generate the molecule job: +`make role_molecule`. ## My test needs CRC -By default, molecule tests are configured to consume a simple CentOS Stream 9 -node in Zuul. But it may happen you need to talk to an OpenShift API within -your role. -In order to consume a CRC node, you have to edit the following file: -[ci/config/molecule.yaml](https://github.com/openstack-k8s-operators/ci-framework/blob/main/ci/config/molecule.yaml) -and add the directive `nodeset: centos-9-stream-crc-2-19-0-xl` under the related job. -For now, we "only" support the crc-xl nodeset. It should cover most of the -needs for molecule. It matches the **centos-9-stream-crc-2-19-0-xl** -[label in rdoproject](https://review.rdoproject.org/zuul/labels). +The guide how to setup CRC VM was described in [guide](./01_nested_crc.md). +This would be needed to start the molecule test. -Once you have edited the script, re-generate the molecule job: -`make role_molecule`. +## Start molecule + +Below would be an example, how to run `reproducer crc_layout` molecule job. +NOTE: make sure, it is executed as `zuul` user, otherwise it might fail (currently). + +Steps: + +```shell +# Install required packages +sudo yum install -y git vim golang ansible-core + +# Clone required repos +git clone https://github.com/openstack-k8s-operators/ci-framework src/github.com/openstack-k8s-operators/ci-framework +# optionally +git clone https://github.com/openstack-k8s-operators/install_yamls src/github.com/openstack-k8s-operators/install_yamls + +cd src/github.com/openstack-k8s-operators/ci-framework + +# workaround for old Go lang binary +go install github.com/mikefarah/yq/v4@v4.40.1 +export PATH=$PATH:~/go/bin + +# Add host key to authorized keys +if ! [ -f ~/.ssh/id_ed25519.pub ]; then + ssh-keygen -t ed25519 -a 200 -f ~/.ssh/id_ed25519 -N "" +fi +cat ~/.ssh/id_ed25519.pub >> ~/.ssh/authorized_keys + +# Create required directories +mkdir -p ~/ci-framework-data/artifacts/{parameters,roles} + +cat << EOF > custom-vars.yaml +--- +ansible_user_dir: /home/$(whoami) +zuul: + projects: + github.com/openstack-k8s-operators/ci-framework: + src_dir: "src/github.com/openstack-k8s-operators/ci-framework" +cifmw_internal_registry_login: false +cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" +cifmw_openshift_setup_skip_internal_registry: true +cifmw_artifacts_basedir: "{{ ansible_user_dir }}/ci-framework-data/artifacts " +cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" +nodepool: + cloud: "" +roles_dir: /home/$(whoami)/src/github.com/openstack-k8s-operators/ci-framework/roles +mol_config_dir: /home/$(whoami)/src/github.com/openstack-k8s-operators/ci-framework/.config/molecule/config_local.yml +cifmw_zuul_target_host: localhost +EOF + +ansible-galaxy install -r requirements.yml + +# Mock some roles, that are needed for Zuul CI, but not for local deployment +mkdir -p roles/mirror-info-fork/tasks +mkdir -p roles/prepare-workspace/tasks + +# Execute Ansible to prepare molecule environment +ansible-playbook -i inventory.yml \ + -e@custom-vars.yaml \ + ci/playbooks/molecule-prepare.yml + +########################## +### START MOLECULE JOB ### +########################## + +# Copy molecule job - example: crc_layout +mkdir -p roles/molecule/default/ +cp -a ./roles/reproducer/molecule/crc_layout/* roles/molecule/default/ + +# It can be done using: +# - Ansible + +ansible-playbook -i inventory.yml \ + -e@custom-vars.yaml \ + ci/playbooks/molecule-test.yml + +# - shell steps +ln -s roles/molecule . +pip3 install -r test-requirements.txt +molecule -c .config/molecule/config_local.yml test --all +``` + +### SSH to controller-0 - molecule VM + +Sometimes it is required to SSH to the controller-0 (or other VM, here is +just an example), to verify the env. To achieve that, you can do: + +```shell +ssh controller-0 +``` + +And that's it! From 95311c74f903a8a244d78ce9d8fd5fa4705bcb58 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 8 Jul 2025 09:43:55 +0200 Subject: [PATCH 215/480] Ignore ansible lint in few tasks The ansible-lint tool raises a warning message: roles/ci_dcn_site/tasks/ceph.yml:33: jinja[invalid]: An unhandled exception occurred while templating '{{ ansible_all_ipv4_addresses | ansible.utils.ipaddr(_subnet_network_range) | first }}'. Error was a , original message: Unrecognized type <> for ipaddr filter roles/libvirt_manager/tasks/create_cloud_init_iso.yml:62: jinja[invalid]: An unhandled exception occurred while templating '{{ 99999999 | random(seed=vm) | to_uuid | lower }}'. Error was a , original message: Unexpected templating type error occurred on ({{ 99999999 | random(seed=vm) | to_uuid | lower }}): The only supported seed types are: None, int, float, str, bytes, and bytearray. which might be confusing for community members. Signed-off-by: Daniel Pawlik --- roles/ci_dcn_site/tasks/ceph.yml | 2 +- roles/libvirt_manager/tasks/create_cloud_init_iso.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/ci_dcn_site/tasks/ceph.yml b/roles/ci_dcn_site/tasks/ceph.yml index 835d64dcd5..f380a5c0ed 100644 --- a/roles/ci_dcn_site/tasks/ceph.yml +++ b/roles/ci_dcn_site/tasks/ceph.yml @@ -26,7 +26,7 @@ - name: Update the hosts file on the Ceph bootstrap host become: true vars: - ceph_boot_ssh_ip: "{{ ansible_all_ipv4_addresses | ansible.utils.ipaddr(_subnet_network_range) | first }}" + ceph_boot_ssh_ip: "{{ ansible_all_ipv4_addresses | ansible.utils.ipaddr(_subnet_network_range) | first }}" # noqa: jinja[invalid] delegate_to: "{{ _ceph_bootstrap_node }}" run_once: true ansible.builtin.lineinfile: diff --git a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml index b05fc30fa5..b2b4eff461 100644 --- a/roles/libvirt_manager/tasks/create_cloud_init_iso.yml +++ b/roles/libvirt_manager/tasks/create_cloud_init_iso.yml @@ -60,7 +60,7 @@ - name: "Call the config_drive role" vars: cifmw_config_drive_iso_image: "{{ _iso_path }}" - _default_uuid: "{{ 99999999 | random(seed=vm) | to_uuid | lower }}" + _default_uuid: "{{ 99999999 | random(seed=vm) | to_uuid | lower }}" # noqa: jinja[invalid] cifmw_config_drive_uuid: "{{ _uuid.stdout | default(_default_uuid) | trim}}" cifmw_config_drive_hostname: "{{ vm }}" cifmw_config_drive_networkconfig: "{{ _libvirt_manager_network_data | default(None) }}" From 295116b935244df4aabab86627131a43504d8109 Mon Sep 17 00:00:00 2001 From: Jiri Stransky Date: Fri, 4 Jul 2025 11:51:06 +0200 Subject: [PATCH 216/480] Update: fix broken registry login https://issues.redhat.com/browse/OSPCIX-945 --- .../tasks/create_local_openstackclient.yml | 31 ++++++++++--------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/roles/update/tasks/create_local_openstackclient.yml b/roles/update/tasks/create_local_openstackclient.yml index 74557fba37..0866473b08 100644 --- a/roles/update/tasks/create_local_openstackclient.yml +++ b/roles/update/tasks/create_local_openstackclient.yml @@ -14,9 +14,13 @@ msg: "No OSDPNS resources found in the 'openstack' namespace!" when: _cifmw_update_osdpns_all_info.resources | length == 0 -- name: Choose the first OSDPNS resource +- name: Choose the first OSDPNS resource which has edpm_container_registry_logins defined ansible.builtin.set_fact: - _cifmw_update_osdpns_info: "{{ _cifmw_update_osdpns_all_info.resources[0] }}" + _cifmw_update_osdpns_info: >- + {{ + _cifmw_update_osdpns_all_info.resources + | community.general.json_query('[?spec.nodeTemplate.ansible.ansibleVars.edpm_container_registry_logins] | [0]') + }} - name: Display which osdpns we're using ansible.builtin.debug: @@ -24,7 +28,7 @@ - name: Determine registry ansible.builtin.set_fact: - cifmw_update_local_registry: >- + osc_podman_login_registry: >- {{ (cifmw_ci_gen_kustomize_values_ooi_image.split('/')[0]) if cifmw_ci_gen_kustomize_values_ooi_image is defined @@ -34,31 +38,30 @@ - name: Check if credentials exist ansible.builtin.set_fact: - brew_username: "{{ login_username }}" - brew_password: "{{ login_dict[login_username] }}" + osc_podman_login_username: "{{ login_username }}" + osc_podman_login_password: "{{ login_dict[login_username] }}" vars: login_dict: >- {{ _cifmw_update_osdpns_info.spec.nodeTemplate.ansible.ansibleVars. - edpm_container_registry_logins[cifmw_update_local_registry] + edpm_container_registry_logins[osc_podman_login_registry] }} login_username: "{{ login_dict.keys()|list|first }}" when: - _cifmw_update_osdpns_info.spec.nodeTemplate.ansible.ansibleVars.edpm_container_registry_logins is defined - login_dict is defined - login_dict|length > 0 - - cifmw_update_local_registry != 'quay.io' + - osc_podman_login_registry != 'quay.io' - name: Log in to registry when needed containers.podman.podman_login: - # Hardcoded for now - registry: "registry.redhat.io" - username: "{{ brew_username }}" - password: "{{ brew_password }}" + registry: "{{ osc_podman_login_registry }}" + username: "{{ osc_podman_login_username }}" + password: "{{ osc_podman_login_password }}" when: - - brew_username is defined - - brew_password is defined - - cifmw_update_local_registry != 'quay.io' + - osc_podman_login_username is defined + - osc_podman_login_password is defined + - osc_podman_login_registry != 'quay.io' - name: Retrieve the openstackclient Pod kubernetes.core.k8s_info: From 8c5e91fed36d3debbb0efe8534edc2ecb297e974 Mon Sep 17 00:00:00 2001 From: Mauricio Harley Date: Thu, 5 Jun 2025 16:49:44 +0000 Subject: [PATCH 217/480] Add playbook to configure Proteccio access for Barbican This work is analog to what has been done for Thales Luna. This playbook will configure the Barbican pods on the test system to use a Proteccio HSM as a crypto backend to store and generate keys. The involved steps are: 1. Create modified barbican-api and barbican-worker images that contain the HSM client software. The new images will be published locally on the CRC node with a special tag ("cifmw_update_barbican_custom_tag") appended. 2. Create a secret to store certificates to access the HSM (server certificate, client certificate and key). 3. Create a secret to store the password needed to access the HSM device. 4. Use the update-containers role to modify openstackversion to use the updated Barbican images. 5. Modify the control plane CR to add the needed config to Barbican to use the Proteccio HSM as a backend. Steps 1-3 are done by a separate Ansible role (https://github.com/openstack-k8s-operators/ansible-role-rhoso-proteccio-hsm/). This is useful because we'll be able to modify and branch this role as appropriate as the HSM software changes. Jira: https://issues.redhat.com/browse/OSPRH-14750 Signed-off-by: Mauricio Harley --- docs/dictionary/en-custom.txt | 1 + hooks/playbooks/barbican-enable-proteccio.yml | 96 +++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 hooks/playbooks/barbican-enable-proteccio.yml diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index c826798514..a21c3655b3 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -82,6 +82,7 @@ ciuser cjeanner ckcg cli +client clusterimageset clusterpool cmd diff --git a/hooks/playbooks/barbican-enable-proteccio.yml b/hooks/playbooks/barbican-enable-proteccio.yml new file mode 100644 index 0000000000..5bd6ff4fcb --- /dev/null +++ b/hooks/playbooks/barbican-enable-proteccio.yml @@ -0,0 +1,96 @@ +--- +- name: Create modified barbican image and get secrets + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + tasks: + - name: Check out the role Git repository + ansible.builtin.git: + dest: "./rhoso_proteccio_hsm" + repo: "{{ cifmw_hsm_proteccio_ansible_role_repo | default('https://github.com/openstack-k8s-operators/ansible-role-rhoso-proteccio-hsm.git', true) }}" + version: "{{ cifmw_hsm_proteccio_ansible_role_version| default('main', true) }}" + + - name: Create and upload the new Barbican images + ansible.builtin.include_role: + name: rhoso_proteccio_hsm + tasks_from: create_image + vars: + barbican_src_api_image_name: "{{ cifmw_barbican_src_api_image_name }}" + barbican_src_worker_image_name: "{{ cifmw_barbican_src_worker_image_name }}" + barbican_src_image_registry: "{{ content_provider_registry_ip }}:5001" + barbican_src_image_namespace: "{{ cifmw_update_containers_org | default('podified-antelope-centos9') }}" + barbican_src_image_tag: "{{ cifmw_update_containers_tag | default('component-ci-testing') }}" + barbican_dest_api_image_name: "{{ cifmw_barbican_dest_api_image_name }}" + barbican_dest_worker_image_name: "{{ cifmw_barbican_dest_worker_image_name }}" + barbican_dest_image_registry: "{{ content_provider_registry_ip }}:5001" + barbican_dest_image_namespace: "{{ cifmw_update_containers_org | default('podified-antelope-centos9') }}" + barbican_dest_image_tag: "{{ cifmw_update_containers_barbican_custom_tag }}" + image_registry_verify_tls: "{{ cifmw_image_registry_verify_tls | default('false', true) }}" + proteccio_client_src: "{{ cifmw_hsm_proteccio_client_src }}" + proteccio_client_iso: "{{ cifmw_hsm_proteccio_client_iso | default('Proteccio3.06.05.iso') }}" + + - name: Create secrets with the HSM certificates and hsm-login credentials + ansible.builtin.include_role: + name: rhoso_proteccio_hsm + tasks_from: create_secrets + vars: + proteccio_conf_src: "{{ cifmw_hsm_proteccio_conf_src }}" + proteccio_client_crt_src: "{{ cifmw_hsm_proteccio_client_crt_src }}" + proteccio_client_key_src: "{{ cifmw_hsm_proteccio_client_key_src }}" + proteccio_server_crt_src: "{{ cifmw_hsm_proteccio_server_crt_src }}" + proteccio_password: "{{ cifmw_hsm_password }}" + kubeconfig_path: "{{ cifmw_openshift_kubeconfig }}" + oc_dir: "{{ cifmw_path }}" + proteccio_data_secret: "{{ cifmw_hsm_proteccio_client_data_secret | default('barbican-proteccio-client-data', true) }}" + proteccio_data_secret_namespace: "{{ cifmw_hsm_proteccio_client_data_secret_namespace }}" + login_secret: "{{ cifmw_hsm_login_secret | default('barbican-proteccio-login', true) }}" + login_secret_field: "{{ cifmw_hsm_login_secret_field | default('PKCS11Pin') }}" + +- name: Create kustomization to update Barbican to use proteccio + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + tasks: + - name: Create file to customize barbican resource deployed in the control plane + vars: + client_data_secret: "{{ cifmw_hsm_proteccio_client_data_secret | default('barbican-proteccio-client-data', true) }}" + login_secret: "{{ cifmw_hsm_login_secret | default('barbican-proteccio-login', true) }}" + ansible.builtin.copy: + mode: '0644' + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/93-barbican-proteccio.yaml" + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + namespace: {{ namespace }} + patches: + - target: + kind: OpenStackControlPlane + name: .* + patch: |- + - op: add + path: /spec/barbican/template/globalDefaultSecretStore + value: pkcs11 + - op: add + path: /spec/barbican/template/enabledSecretStores + value: + - pkcs11 + - op: add + path: /spec/barbican/template/pkcs11 + value: + loginSecret: {{ login_secret }} + clientDataSecret: {{ client_data_secret }} + clientDataPath: /etc/proteccio + - op: add + path: /spec/barbican/template/customServiceConfig + value: | + [p11_crypto_plugin] + plugin_name = PKCS11 + library_path = {{ cifmw_hsm_proteccio_library_path | default('/usr/lib64/libnethsm.so', true) }} + token_labels = {{ cifmw_hsm_proteccio_partition }} + mkek_label = {{ cifmw_hsm_mkek_label }} + hmac_label = {{ cifmw_hsm_hmac_label }} + encryption_mechanism = CKM_AES_CBC + hmac_key_type = CKK_GENERIC_SECRET + hmac_keygen_mechanism = CKM_GENERIC_SECRET_KEY_GEN + hmac_mechanism = CKM_SHA256_HMAC + key_wrap_mechanism = {{ cifmw_hsm_key_wrap_mechanism }} + key_wrap_generate_iv = true + always_set_cka_sensitive = true + os_locking_ok = false From 818550c76f456beb1b73bd35db9e00e15aff9023 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 25 Jun 2025 14:33:44 +0200 Subject: [PATCH 218/480] Improve crawl_n_mask script The script have few issues that gets mostly timeout in the CI jobs. Signed-off-by: Daniel Pawlik --- ci/playbooks/collect-logs.yml | 2 +- plugins/modules/crawl_n_mask.py | 230 +++++++++++------------- tests/unit/modules/test_crawl_n_mask.py | 69 +++---- 3 files changed, 126 insertions(+), 175 deletions(-) diff --git a/ci/playbooks/collect-logs.yml b/ci/playbooks/collect-logs.yml index aeabecdf1a..2677c33dc9 100644 --- a/ci/playbooks/collect-logs.yml +++ b/ci/playbooks/collect-logs.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/collect-logs.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/plugins/modules/crawl_n_mask.py b/plugins/modules/crawl_n_mask.py index 74c5619405..8470b9e5d8 100755 --- a/plugins/modules/crawl_n_mask.py +++ b/plugins/modules/crawl_n_mask.py @@ -63,10 +63,28 @@ import os import re -import yaml -from typing import Dict, Optional, Any, Union +import pathlib + from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_native + +# ### To debug ### +# # playbook: +# --- +# - name: test +# hosts: localhost +# tasks: +# - name: Mask secrets in yaml log files +# timeout: 3600 +# crawl_n_mask: +# path: "/tmp/logs/" +# isdir: true +# +# # args.json: +# {"ANSIBLE_MODULE_ARGS": {"path": "/tmp/logs/", "isdir": true}} +# +# # execute: +# python3 plugins/modules/crawl_n_mask.py ./args.json +################ # files which are yaml but do not end with .yaml or .yml ALLOWED_YAML_FILES = [ @@ -151,21 +169,9 @@ # Masking string MASK_STR = "**********" -# general and connection regexes are used to match the pattern that should  ̰be -# applied to both Protect keys and connection keys, which is the same thing -# done in SoS reports -gen_regex = r"(\w*(%s)\s*=\s*)(.*)" % "|".join(PROTECT_KEYS) -con_regex = r"((%s)\s*://)(\w*):(.*)(@(.*))" % "|".join(CONNECTION_KEYS) - # regex of excluded file extensions excluded_file_ext_regex = r"(^.*(%s).*)" % "|".join(EXCLUDED_FILE_EXT) -# regex of keys which will be checked against every key -# as in yaml files, we have data in format = -# if a key is sensitive, it will be found using this regex -key_regex = r"(%s)\d*$" % "|".join(PROTECT_KEYS) -regexes = [gen_regex, con_regex] - def handle_walk_errors(e): raise e @@ -177,22 +183,59 @@ def crawl(module, path) -> bool: and find eligible files for masking. """ changed = False - for root, _, files in os.walk(path, onerror=handle_walk_errors): - if any(excluded in root.split("/") for excluded in EXCLUDED_DIRS): + base_path = os.path.normpath(path) + for root, _, files in os.walk(base_path, onerror=handle_walk_errors): + # Get relative path from our base path + rel_path = os.path.relpath(root, base_path) + + # Check if any parent directory (not the root) is excluded + if any(part in EXCLUDED_DIRS for part in rel_path.split(os.sep)): continue for f in files: if not re.search(excluded_file_ext_regex, f): - file_changed = mask(module, os.path.join(root, f)) - # even if one file is masked, the final result will be True - if file_changed: + if mask(module, os.path.join(root, f)): + # even if one file is masked, the final result will be True changed = True return changed +def _get_masked_string(value): + if len(value) <= 4: + return value[:2] + MASK_STR + return value[:2] + MASK_STR + value[-2:] + + +def partial_mask(value): + """ + Check length of the string. If it is too long, take 2 chars + from beginning, then add mask string and add 2 chars from the + end. + If value is short, take just 2 chars and add mask string + """ + if not value.strip(): + return + + if "'" in value: + parsed_value = value.split("'") + if len(parsed_value) > 2 and parsed_value[1] != "": + prefix = parsed_value[0] + value = _get_masked_string(parsed_value[1]) + suffix = parsed_value[2] + return f"{prefix}'{value}'{suffix}" + else: + match = re.match(r"^(\s*)(.*?)(\n?)$", value) + if match: + parts = list(match.groups()) + prefix = parts[0] + value = _get_masked_string(parts[1]) + suffix = parts[2] + return f"{prefix}'{value}'{suffix}" + + def mask(module, path: str) -> bool: """ - Method responsible to begin masking on a provided + Function responsible to begin masking on a provided log file. It checks for file type, and calls respective masking methods for that file. """ @@ -201,124 +244,61 @@ def mask(module, path: str) -> bool: path.endswith((tuple(["yaml", "yml"]))) or os.path.basename(path).split(".")[0] in ALLOWED_YAML_FILES ): - changed = mask_yaml(module, path) + extension = "yaml" + changed = mask_file(module, path, extension) return changed -def process_list(lst: list) -> None: - """ - For each list we get in our yaml dict, - this method will check the type of item. - If the item in list is dict, it will call - apply_mask method to process it, else if - we get nested list, process_list will be - recursively called. - We are not checking for string as secrets - are mainly in form : in dict, - not in list as item. - """ - for item in lst: - if isinstance(item, dict): - apply_mask(item) - elif isinstance(item, list): - process_list(item) - - -def apply_regex(value: str) -> str: +def mask_yaml(infile, outfile, changed) -> bool: """ - For each string value passed as argument, try - to match the pattern according to the provided - regexes and mask any potential sensitive info. + Read the file, search for colon (':'), take value and + mask sensitive data """ - for pattern in regexes: - value = re.sub(pattern, r"\1{}".format(MASK_STR), value, flags=re.I) - return value - - -def apply_mask(yaml_dict: Dict[str, Any]) -> None: - """ - Check and mask value if key of dict matches - with key_regex, else perform action on data - type of value. Call _process_list if value - is of type list, call _apply_regex for strings, - recursively call _apply_mask in case value is - of type dict. - """ - for k, v in yaml_dict.items(): - if re.findall(key_regex, str(k)): - yaml_dict[k] = MASK_STR - - elif isinstance(v, str): - yaml_dict[k] = apply_regex(v) - - elif isinstance(v, list): - process_list(v) - - elif isinstance(v, dict): - apply_mask(v) + for line in infile: + # Skip lines without colon + if ":" not in line: + outfile.write(line) + continue + key, sep, value = line.partition(":") + masked_value = value + for word in PROTECT_KEYS: + if key.strip() == word: + masked = partial_mask(value) + if not masked: + continue + masked_value = masked_value.replace(value, masked) + changed = True -def mask_yaml(module, path) -> bool: - """ - Method to handle masking of yaml files. - Begin with reading yaml and storing in - list (check _read_yaml for return type - info), then process the list to mask - secrets, and then write the encoded - data back. - """ - yaml_content = read_yaml(module, path) - changed = False - if not yaml_content: - return changed - # we are directly calling _process_list as - # yaml.safe_load_all returns an Iterator of - # dictionaries which we have converted into - # a list (return type of _read_yaml) - process_list(yaml_content) - - changed = write_yaml(module, path, yaml_content) + outfile.write(f"{key}{sep}{masked_value}") return changed -def read_yaml(module, file_path: str) -> Optional[Union[list, None]]: - """ - Read and Load the yaml file for - processing. Using yaml.safe_load_all - to handle all documents within a - single yaml file stream. Return - type (Iterator) is parsed to list - to make in-place change easy. - """ - try: - assert file_path is not None - with open(file_path, "r") as f: - return list(yaml.safe_load_all(f)) - except (FileNotFoundError, yaml.YAMLError) as e: - module.warn("Error opening file: %s" % e) - return +def replace_file(temp_path, file_path, changed): + if changed: + temp_path.replace(file_path) + else: + temp_path.unlink(missing_ok=True) -def write_yaml(module, path, encoded_secret: Any) -> bool: +def mask_file(module, path, extension) -> bool: """ - Re-write the processed yaml file in - the same path. - Writing will occur only if there are - changes to the content. + Create temporary file, replace sensitive string with masked, + then replace the tmp file with original. """ + changed = False + file_path = pathlib.Path(path) + temp_path = file_path.with_suffix(".tmp") try: - assert path is not None - if read_yaml(module, path) != encoded_secret: - with open(path, "w") as f: - yaml.safe_dump_all(encoded_secret, f, default_flow_style=False) - changed = True - except (IOError, yaml.YAMLError) as e: - module.fail_json( - msg=f"Error writing to file: {to_native(e, nonstring='simplerepr')}", - path=path, - ) - return changed + with file_path.open("r", encoding="utf-8") as infile: + with temp_path.open("w", encoding="utf-8") as outfile: + if extension == "yaml": + changed = mask_yaml(infile, outfile, changed) + replace_file(temp_path, file_path, changed) + return changed + except Exception as e: + print(f"An unexpected error occurred on masking file {file_path}: {e}") def run_module(): @@ -348,9 +328,9 @@ def run_module(): # validate if the path exists and no wrong value of isdir and path is # provided if not os.path.exists(path): - module.fail_json(msg=f"Provided path doesn't exist", path=path) + module.fail_json(msg="Provided path doesn't exist", path=path) if os.path.isdir(path) != isdir: - module.fail_json(msg=f"Value of isdir/path is incorrect. Please check it") + module.fail_json(msg="Value of isdir/path is incorrect. Please check it") # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current diff --git a/tests/unit/modules/test_crawl_n_mask.py b/tests/unit/modules/test_crawl_n_mask.py index e3eb65fea0..519bdd2792 100644 --- a/tests/unit/modules/test_crawl_n_mask.py +++ b/tests/unit/modules/test_crawl_n_mask.py @@ -1,8 +1,5 @@ -from xmlrpc.client import Fault - import pytest -from unittest.mock import patch, MagicMock, mock_open - +from unittest.mock import patch, MagicMock from plugins.modules import crawl_n_mask as cnm @@ -44,51 +41,25 @@ def test_crawl_false(self, test_dir, expected_files): changed = cnm.crawl(module, test_dir) assert not changed - @patch("builtins.open", new_callable=mock_open, read_data="key: value") - @patch("yaml.safe_load_all") - def test_read_yaml_success(self, mock_load, mock_open_file): - mock_load.return_value = [{"key": "value"}] - module = MagicMock() - result = cnm.read_yaml(module, "/fake/file.yaml") - assert result == [{"key": "value"}] - - def test_apply_regex(self): - value = "password=supersecret" - masked = cnm.apply_regex(value) - assert cnm.MASK_STR in masked - - def test_apply_regex_no_match(self): - value = "normal=stuff" - result = cnm.apply_regex(value) - assert result == value + def test_partial_mask_scenario_1(self): + example_value = " 'test1234'\n" + expected_value = " 'te**********34'\n" + test_value = cnm.partial_mask(example_value) + assert expected_value == test_value - @pytest.mark.parametrize( - "data, ismasked", - [ - ([{"password": "secret"}], True), - ([{"secret": "value"}], True), - ([{True: "test_bool_key"}], False), - ([{1: "int_key"}], False), - ([{1.1: "float_key"}], False), - ], - ) - def test_process_list(self, data, ismasked): - cnm.process_list(data) - if ismasked: - assert cnm.MASK_STR in [list(item.values())[0] for item in data] - else: - assert cnm.MASK_STR not in [list(item.values())[0] for item in data] + def test_partial_mask_scenario_2(self): + example_value = " osp_ci_framework_keytab\n" + expected_value = " 'os**********ab'\n" + test_value = cnm.partial_mask(example_value) + assert expected_value == test_value - def test_apply_mask(self): - data = {"password": "secret", "normal": "data"} - cnm.apply_mask(data) - assert data["password"] == cnm.MASK_STR + def test_partial_mask_scenario_3(self): + example_value = " ''\n" + test_value = cnm.partial_mask(example_value) + assert test_value is None - @patch("plugins.modules.crawl_n_mask.read_yaml") - @patch("plugins.modules.crawl_n_mask.write_yaml") - def test_mask_yaml(self, mock_write, mock_read): - mock_read.return_value = [{"password": "secret"}] - mock_write.return_value = True - module = MagicMock() - changed = cnm.mask_yaml(module, "/fake/file.yaml") - assert changed + def test_partial_mask_scenario_4(self): + example_value = "tet" + expected_value = "'te**********'" + test_value = cnm.partial_mask(example_value) + assert expected_value == test_value From 592e9d20ade53d06abd5df99db7ec6c6abf0fbd2 Mon Sep 17 00:00:00 2001 From: Sofer Athlan-Guyot Date: Wed, 9 Jul 2025 13:49:39 +0200 Subject: [PATCH 219/480] update: fix naming convention. All variables in the update role should start with `cifmw_update`. --- .../tasks/create_local_openstackclient.yml | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/roles/update/tasks/create_local_openstackclient.yml b/roles/update/tasks/create_local_openstackclient.yml index 0866473b08..7a731745cf 100644 --- a/roles/update/tasks/create_local_openstackclient.yml +++ b/roles/update/tasks/create_local_openstackclient.yml @@ -28,7 +28,7 @@ - name: Determine registry ansible.builtin.set_fact: - osc_podman_login_registry: >- + cifmw_update_login_registry: >- {{ (cifmw_ci_gen_kustomize_values_ooi_image.split('/')[0]) if cifmw_ci_gen_kustomize_values_ooi_image is defined @@ -38,30 +38,30 @@ - name: Check if credentials exist ansible.builtin.set_fact: - osc_podman_login_username: "{{ login_username }}" - osc_podman_login_password: "{{ login_dict[login_username] }}" + cifmw_update_login_username: "{{ login_username }}" + cifmw_update_login_password: "{{ login_dict[login_username] }}" vars: login_dict: >- {{ _cifmw_update_osdpns_info.spec.nodeTemplate.ansible.ansibleVars. - edpm_container_registry_logins[osc_podman_login_registry] + edpm_container_registry_logins[cifmw_update_login_registry] }} login_username: "{{ login_dict.keys()|list|first }}" when: - _cifmw_update_osdpns_info.spec.nodeTemplate.ansible.ansibleVars.edpm_container_registry_logins is defined - login_dict is defined - login_dict|length > 0 - - osc_podman_login_registry != 'quay.io' + - cifmw_update_login_registry != 'quay.io' - name: Log in to registry when needed containers.podman.podman_login: - registry: "{{ osc_podman_login_registry }}" - username: "{{ osc_podman_login_username }}" - password: "{{ osc_podman_login_password }}" + registry: "{{ cifmw_update_login_registry }}" + username: "{{ cifmw_update_login_username }}" + password: "{{ cifmw_update_login_password }}" when: - - osc_podman_login_username is defined - - osc_podman_login_password is defined - - osc_podman_login_registry != 'quay.io' + - cifmw_update_login_username is defined + - cifmw_update_login_password is defined + - cifmw_update_login_registry != 'quay.io' - name: Retrieve the openstackclient Pod kubernetes.core.k8s_info: From 445a66815abb4018be09e1a16308e4812b12f803 Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Mon, 12 May 2025 12:07:06 +0200 Subject: [PATCH 220/480] Add IPA role to ci-framework The role is used to set up IPA using the FreeIPA opeator and then test configuring LDAP with domain-specific drivers. --- docs/dictionary/en-custom.txt | 2 + hooks/playbooks/ipa-controlplane-config.yml | 91 +++++++++ hooks/playbooks/ipa-post-deploy.yml | 28 +++ hooks/playbooks/ipa-pre-deploy.yml | 28 +++ roles/ipa/README.md | 4 + roles/ipa/defaults/main.yml | 11 ++ roles/ipa/tasks/run_ipa_setup.yml | 208 ++++++++++++++++++++ roles/ipa/tasks/run_ipa_user_setup.yml | 55 ++++++ roles/ipa/tasks/run_openstack_ldap_test.yml | 187 ++++++++++++++++++ roles/ipa/tasks/run_openstack_setup.yml | 36 ++++ roles/ipa/tasks/run_osp_cmd.yml | 30 +++ roles/ipa/templates/get-token.sh.j2 | 3 + roles/ipa/templates/ipa.yaml.j2 | 20 ++ roles/ipa/templates/ipauser1.j2 | 6 + 14 files changed, 709 insertions(+) create mode 100644 hooks/playbooks/ipa-controlplane-config.yml create mode 100644 hooks/playbooks/ipa-post-deploy.yml create mode 100644 hooks/playbooks/ipa-pre-deploy.yml create mode 100644 roles/ipa/README.md create mode 100644 roles/ipa/defaults/main.yml create mode 100644 roles/ipa/tasks/run_ipa_setup.yml create mode 100644 roles/ipa/tasks/run_ipa_user_setup.yml create mode 100644 roles/ipa/tasks/run_openstack_ldap_test.yml create mode 100644 roles/ipa/tasks/run_openstack_setup.yml create mode 100644 roles/ipa/tasks/run_osp_cmd.yml create mode 100644 roles/ipa/templates/get-token.sh.j2 create mode 100644 roles/ipa/templates/ipa.yaml.j2 create mode 100644 roles/ipa/templates/ipauser1.j2 diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index a21c3655b3..65819be42f 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -227,6 +227,7 @@ https ic icjbuue icokicagy +IDM IdP idrac iface @@ -286,6 +287,7 @@ kuttl kvm lacp lajly +LDAP ldp libguestfs libvirt diff --git a/hooks/playbooks/ipa-controlplane-config.yml b/hooks/playbooks/ipa-controlplane-config.yml new file mode 100644 index 0000000000..75a02cca85 --- /dev/null +++ b/hooks/playbooks/ipa-controlplane-config.yml @@ -0,0 +1,91 @@ +--- +- name: Create kustomization to update Keystone to use LDAP + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + tasks: + - name: Create file to customize keystone for IPA deployed in the control plane + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/keystone_ldap.yaml" + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - namespace: {{ namespace }} + patches: + - target: + kind: OpenStackControlPlane + name: .* + patch: |- + - op: add + path: /spec/keystone/template/extraMounts + value: + - name: v1 + region: r1 + extraVol: + - propagation: + - Keystone + extraVolType: Conf + volumes: + - name: keystone-domains + secret: + secretName: keystone-domains + mounts: + - name: keystone-domains + mountPath: "/etc/keystone/domains" + readOnly: true + - op: add + path: /spec/keystone/template/customServiceConfig + value: | + [DEFAULT] + insecure_debug = true + debug = true + [identity] + domain_specific_drivers_enabled = true + mode: "0644" + + - name: Get ipa route + kubernetes.core.k8s_info: + api_version: route.openshift.io/v1 + kind: Route + name: idm + namespace: "{{ cifmw_ipa_namespace | default('cert-manager') }}" + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + register: idm_route + + - name: Set IPA BaseDN, hostname and secret config key vars + ansible.builtin.set_fact: + cifmw_ipa_hostname: "{{ idm_route.resources.0.spec.host }}" + cifmw_ipa_basedn: "dc={{ idm_route.resources.0.spec.host.split('.')[1:] | join(',dc=') }}" + keystone_conf_key: "keystone.{{ cifmw_ipa_domain | default('REDHAT') }}.conf" + + - name: Create Keystone domain config secret for LDAP + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: keystone-domains + namespace: openstack + type: Opaque + stringData: "{{ {keystone_conf_key: keystone_ldap_config_content} }}" + vars: + keystone_ldap_config_content: | + [identity] + driver = ldap + [ldap] + url = ldap://ipa-directory-service.{{ cifmw_ipa_namespace | default('cert-manager') }}.svc.cluster.local + user = uid=admin,cn=users,cn=accounts,{{ cifmw_ipa_basedn }} + password = {{ cifmw_ipa_admin_password | default('nomoresecrets') }} + suffix = {{ cifmw_ipa_basedn }} + user_tree_dn = cn=users,cn=accounts,{{ cifmw_ipa_basedn }} + user_objectclass = person + user_id_attribute = uid + user_name_attribute = uid + user_mail_attribute = mail + group_tree_dn = cn=groups,cn=accounts,{{ cifmw_ipa_basedn }} + group_objectclass = groupOfNames + group_id_attribute = cn + group_name_attribute = cn + group_member_attribute = member + group_desc_attribute = description diff --git a/hooks/playbooks/ipa-post-deploy.yml b/hooks/playbooks/ipa-post-deploy.yml new file mode 100644 index 0000000000..276616424d --- /dev/null +++ b/hooks/playbooks/ipa-post-deploy.yml @@ -0,0 +1,28 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Run federation setup on openstack post reproducer deploy + hosts: "{{ cifmw_target_host | default('localhost') }}" + tasks: + - name: Run LDAP setup on OSP + ansible.builtin.import_role: + name: ipa + tasks_from: run_openstack_setup.yml + + - name: Run LDAP OSP User Auth test + ansible.builtin.import_role: + name: ipa + tasks_from: run_openstack_ldap_test.yml diff --git a/hooks/playbooks/ipa-pre-deploy.yml b/hooks/playbooks/ipa-pre-deploy.yml new file mode 100644 index 0000000000..fc06f9fe9e --- /dev/null +++ b/hooks/playbooks/ipa-pre-deploy.yml @@ -0,0 +1,28 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Run IPA setup on reproducer + hosts: "{{ cifmw_target_host | default('localhost') }}" + tasks: + - name: Run IPA pod setup on Openshift + ansible.builtin.import_role: + name: ipa + tasks_from: run_ipa_setup.yml + + - name: Run IPA realm setup for OSP + ansible.builtin.import_role: + name: ipa + tasks_from: run_ipa_user_setup.yml diff --git a/roles/ipa/README.md b/roles/ipa/README.md new file mode 100644 index 0000000000..524eb7b61c --- /dev/null +++ b/roles/ipa/README.md @@ -0,0 +1,4 @@ +IPA +========= + +This role will setup IPA with LDAP. The IDM system will be used for the LDAP domain-specific backend. diff --git a/roles/ipa/defaults/main.yml b/roles/ipa/defaults/main.yml new file mode 100644 index 0000000000..099993c623 --- /dev/null +++ b/roles/ipa/defaults/main.yml @@ -0,0 +1,11 @@ +--- +cifmw_ipa_deploy_type: crc +cifmw_ipa_namespace: cert-manager +cifmw_ipa_realm: openstack +cifmw_ipa_admin_username: admin +cifmw_ipa_admin_password: nomoresecrets +cifmw_ipa_user_password: nomoresecrets +cifmw_ipa_url_validate_certs: false +cifmw_ipa_run_osp_cmd_namespace: openstack +cifmw_ipa_domain: REDHAT +cifmw_ipa_operator_version: "d5951bd27be04e06952c1510bfd6f96c2b12a052" diff --git a/roles/ipa/tasks/run_ipa_setup.yml b/roles/ipa/tasks/run_ipa_setup.yml new file mode 100644 index 0000000000..d3e7739c1a --- /dev/null +++ b/roles/ipa/tasks/run_ipa_setup.yml @@ -0,0 +1,208 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Create namespace + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + name: "{{ cifmw_ipa_namespace }}" + api_version: v1 + kind: Namespace + state: present + +- name: Get IPA operator deployment config from repository + ansible.builtin.git: + dest: "{{ ansible_user_dir }}/ci-framework-data/tmp/freeipa-operator" + repo: "https://github.com/freeipa/freeipa-operator" + version: "{{ cifmw_ipa_operator_version }}" + force: true + +- name: Wait for SecurityContextConstraints API to be available + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: oc api-resources + register: api_resources_check + until: > + api_resources_check.rc == 0 and + 'securitycontextconstraints' in api_resources_check.stdout + retries: 60 + delay: 10 + changed_when: false + check_mode: false + ignore_errors: true + +- name: Fail if SCCs did not become available + ansible.builtin.fail: + msg: "Timeout: SecurityContextConstraints API (securitycontextconstraints) did not become available after waiting." + when: "'securitycontextconstraints' not in api_resources_check.stdout" + +- name: Report success + ansible.builtin.debug: + msg: "SecurityContextConstraints API is available." + when: "'securitycontextconstraints' in api_resources_check.stdout" + +- name: Install IPA operator + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.shell: + cmd: |- + set -eo pipefail + cd "{{ ansible_user_dir }}/ci-framework-data/tmp/freeipa-operator" + oc create -f config/rbac/scc.yaml + (cd config/default && kustomize edit set namespace "{{ cifmw_ipa_namespace }}") + (cd config/manager && kustomize edit set image controller=quay.io/freeipa/freeipa-operator:nightly) + kustomize build config/default | kubectl apply -f - + +- name: Wait for it to be deployed + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_version: apps/v1 + kind: Deployment + name: idm-operator-controller-manager + namespace: "{{ cifmw_ipa_namespace }}" + wait: true + wait_condition: + type: "Available" + reason: "MinimumReplicasAvailable" + wait_timeout: 60 + +- name: Add IDM admin password secret + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: present + definition: + apiVersion: v1 + kind: Secret + type: Opaque + metadata: + name: idm-secret + namespace: "{{ cifmw_ipa_namespace }}" + data: + IPA_DM_PASSWORD: "{{ cifmw_ipa_admin_password | b64encode }}" + IPA_ADMIN_PASSWORD: "{{ cifmw_ipa_admin_password | b64encode }}" + +- name: Read IPA instance template + ansible.builtin.template: + src: ipa.yaml.j2 + dest: "{{ ansible_user_dir }}/ci-framework-data/tmp/ipa.yaml" + mode: "0644" + +- name: Install IPA pod + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: |- + oc apply -n {{ cifmw_ipa_namespace }} -f {{ ansible_user_dir }}/ci-framework-data/tmp/ipa.yaml + +- name: Wait on pod to be ready + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + kind: Pod + name: idm-main-0 + namespace: "{{ cifmw_ipa_namespace }}" + wait: true + wait_timeout: 300 + +- name: Get ipa route + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_version: route.openshift.io/v1 + kind: Route + name: idm + namespace: "{{ cifmw_ipa_namespace }}" + register: idm_route + +- name: Wait for IPA pod to be avalable + ansible.builtin.uri: + url: "https://{{ idm_route.resources.0.spec.host }}" + follow_redirects: true + method: GET + validate_certs: "{{ cifmw_ipa_url_validate_certs }}" + register: _result + until: _result.status == 200 + retries: 100 + delay: 10 + +- name: Ensure IPA LDAP/LDAPS service is exposed + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: present + definition: + apiVersion: v1 + kind: Service + metadata: + name: ipa-directory-service + namespace: "{{ cifmw_ipa_namespace | default('ipa') }}" + spec: + selector: + app: idm + ports: + - name: ldap + protocol: TCP + port: 389 + targetPort: 389 + - name: ldaps + protocol: TCP + port: 636 + targetPort: 636 + +- name: Wait or fail + block: + - name: Wait for FreeIPA server install completion in pod logs + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: "oc logs idm-main-0 -n cert-manager --tail=-1" + register: ipa_pod_logs + until: > + ipa_pod_logs.rc == 0 and + ("The ipa-server-install command was successful" in ipa_pod_logs.stdout or + "The ipa-server-install command failed" in ipa_pod_logs.stdout) + retries: 60 + delay: 10 + changed_when: false + check_mode: false + + - name: Fail if IPA install reported an error in logs + ansible.builtin.fail: + msg: | + FreeIPA installation failed according to pod logs. Last 50 lines: + {{ (ipa_pod_logs.stdout_lines | default([]))[-50:] | join('\n') }} + when: "'The ipa-server-install command failed' in ipa_pod_logs.stdout" + + - name: Report success if IPA install completed + ansible.builtin.debug: + msg: "FreeIPA installation appears successful in pod logs." + when: "'The ipa-server-install command was successful' in ipa_pod_logs.stdout" + + rescue: + - name: Get the last 100 lines from IPA pod logs on failure/timeout + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: "oc logs idm-main-0 -n cert-manager --tail=100" + register: pod_log_tail_on_failure + changed_when: false + ignore_errors: true + + - name: Print logs and fail task due to timeout or error + ansible.builtin.fail: + msg: | + Timeout or unexpected error waiting for FreeIPA server installation. + Last 100 log lines from 'idm-main-0': + {{ pod_log_tail_on_failure.stdout | default("Could not retrieve pod logs.") }} diff --git a/roles/ipa/tasks/run_ipa_user_setup.yml b/roles/ipa/tasks/run_ipa_user_setup.yml new file mode 100644 index 0000000000..fd6d666f6c --- /dev/null +++ b/roles/ipa/tasks/run_ipa_user_setup.yml @@ -0,0 +1,55 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Copy user setup script into idm pod + kubernetes.core.k8s_cp: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: to_pod + pod: idm-main-0 + namespace: "{{ cifmw_ipa_namespace }}" + remote_path: /tmp/user_setup.sh + content: | + export IPAADMINPW="{{ cifmw_ipa_admin_password }}" + export USERPW="{{ cifmw_ipa_user_password }}" + echo $IPAADMINPW|kinit admin + ipa user-add svc-ldap --first=Openstack --last=LDAP + echo $IPAADMINPW | ipa passwd svc-ldap + ipa user-add ipauser1 --first=ipa1 --last=user1 + echo $IPAADMINPW | ipa passwd ipauser1 + ipa user-add ipauser2 --first=ipa2 --last=user2 + echo $IPAADMINPW | ipa passwd ipauser2 + ipa user-add ipauser3 --first=ipa3 --last=user3 + echo $IPAADMINPW | ipa passwd ipauser3 + ipa group-add --desc="OpenStack Users" grp-openstack + ipa group-add --desc="OpenStack Admin Users" grp-openstack-admin + ipa group-add --desc="OpenStack Demo Users" grp-openstack-demo + ipa group-add-member --users=svc-ldap grp-openstack + ipa group-add-member --users=ipauser1 grp-openstack + ipa group-add-member --users=ipauser1 grp-openstack-admin + ipa group-add-member --users=ipauser2 grp-openstack + ipa group-add-member --users=ipauser2 grp-openstack-demo + ipa group-add-member --users=ipauser3 grp-openstack + echo -e "$IPAADMINPW\n$USERPW\n$USERPW"|/usr/bin/kinit svc-ldap + echo -e "$IPAADMINPW\n$USERPW\n$USERPW"|/usr/bin/kinit ipauser1 + echo -e "$IPAADMINPW\n$USERPW\n$USERPW"|/usr/bin/kinit ipauser2 + echo -e "$IPAADMINPW\n$USERPW\n$USERPW"|/usr/bin/kinit ipauser3 + +- name: Setup openstack test users and groups in IPA + kubernetes.core.k8s_exec: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + pod: idm-main-0 + namespace: "{{ cifmw_ipa_namespace }}" + command: bash /tmp/user_setup.sh diff --git a/roles/ipa/tasks/run_openstack_ldap_test.yml b/roles/ipa/tasks/run_openstack_ldap_test.yml new file mode 100644 index 0000000000..3f90f5d408 --- /dev/null +++ b/roles/ipa/tasks/run_openstack_ldap_test.yml @@ -0,0 +1,187 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Set test output filename + ansible.builtin.set_fact: + output_file: "{{ cifmw_basedir }}/artifacts/run_openstack_ldap_test_result.out" + +- name: Delete old file if existing + ansible.builtin.file: + path: "{{ output_file }}" + state: absent + ignore_errors: true # noqa: ignore-errors + +- name: Create output file + ansible.builtin.file: + path: "{{ output_file }}" + mode: "u=rw,g=r,o=r" + state: touch + +- name: Get keystone route + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_version: route.openshift.io/v1 + kind: Route + name: keystone-public + namespace: "{{ cifmw_ipa_run_osp_cmd_namespace }}" + register: keystone_route + +- name: Wait for Keystone API to be avalable + ansible.builtin.uri: + url: "https://{{ keystone_route.resources.0.spec.host }}/v3" + follow_redirects: true + method: GET + register: _result + until: _result.status == 200 + retries: 100 + delay: 10 + +- name: Read ipa test user1 cloudrc template + ansible.builtin.template: + src: ipauser1.j2 + dest: "{{ ansible_user_dir }}/ci-framework-data/tmp/ipauser1" + mode: "0644" + +- name: Copy ipa test user1 cloudrc file into pod + kubernetes.core.k8s_cp: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + namespace: "{{ cifmw_ipa_run_osp_cmd_namespace }}" + pod: openstackclient + remote_path: "/home/cloud-admin/ipauser1" + local_path: "{{ ansible_user_dir }}/ci-framework-data/tmp/ipauser1" + + +- name: RHELOSP-53684 - Security - List IPA ldap users + vars: + _osp_cmd: "openstack user list --domain REDHAT" + ansible.builtin.include_tasks: run_osp_cmd.yml + +- name: RHELOSP-53684 - Security - List IPA ldap users - success + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53684=passed" + when: "'ipauser1' in ipa_run_osc_cmd.stdout and 'ipauser2' in ipa_run_osc_cmd.stdout and 'ipauser3' in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53684 - Security - List IPA ldap users - failed + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53684=failed" + when: "'ipauser1' not in ipa_run_osc_cmd.stdout and 'ipauser2' not in ipa_run_osc_cmd.stdout and 'ipauser3' not in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53685 - Security - List IPA ldap groups + vars: + _osp_cmd: "openstack group list --domain REDHAT" + ansible.builtin.include_tasks: run_osp_cmd.yml + +- name: RHELOSP-53685 - Security - List IPA ldap groups - success + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53685=passed" + when: "'grp-openstack' in ipa_run_osc_cmd.stdout and 'grp-openstack-admin' in ipa_run_osc_cmd.stdout and 'grp-openstack-demo' in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53685 - Security - List IPA ldap groups - failed + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53685=failed" + when: "'grp-openstack' not in ipa_run_osc_cmd.stdout and 'grp-openstack-admin' not in ipa_run_osc_cmd.stdout and 'grp-openstack-demo' not in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53932 - Security - Check ipauser1 in ldap group grp-openstack-admin + vars: + _osp_cmd: "openstack group contains user --group-domain REDHAT --user-domain REDHAT grp-openstack-admin ipauser1" + ansible.builtin.include_tasks: run_osp_cmd.yml + +- name: RHELOSP-53932 - Security - Check ipauser1 in ldap group grp-openstack-admin - success + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53932=passed" + when: "'ipauser1 in group grp-openstack-admin' in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53932 - Security - Check ipauser1 in ldap group grp-openstack-admin - failed + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53932=failed" + when: "'ipauser1 in group grp-openstack-admin' not in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53933 - Security - Check ipauser2 in ldap group grp-openstack-demo + vars: + _osp_cmd: "openstack group contains user --group-domain REDHAT --user-domain REDHAT grp-openstack-demo ipauser2" + ansible.builtin.include_tasks: run_osp_cmd.yml + +- name: RHELOSP-53933 - Security - Check ipauser2 in ldap group grp-openstack-demo - success + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53933=passed" + when: "'ipauser2 in group grp-openstack-demo' in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53933 - Security - Check ipauser2 in ldap group grp-openstack-demo - failed + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53933=failed" + when: "'ipauser2 in group grp-openstack-demo' not in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53934 - Security - Check ipauser3 ldap in group grp-openstack + vars: + _osp_cmd: "openstack group contains user --group-domain REDHAT --user-domain REDHAT grp-openstack ipauser3" + ansible.builtin.include_tasks: run_osp_cmd.yml + +- name: RHELOSP-53934 - Security - Check ipauser3 in ldap group grp-openstack - success + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53934=passed" + when: "'ipauser3 in group grp-openstack' in ipa_run_osc_cmd.stdout" + +- name: RHELOSP-53934 - Security - Check ipauser3 in ldap group grp-openstack - failed + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53934=failed" + when: "'ipauser3 in group grp-openstack' not in ipa_run_osc_cmd.stdout" + +- name: Template get-token.sh script + ansible.builtin.template: + src: get-token.sh.j2 + dest: "{{ ansible_user_dir }}/ci-framework-data/tmp/get-token.sh" + mode: "0755" + +- name: Copy get-token.sh script into openstackclient pod + kubernetes.core.k8s_cp: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + namespace: "{{ cifmw_ipa_run_osp_cmd_namespace }}" + pod: openstackclient + remote_path: "/home/cloud-admin/get-token.sh" + local_path: "{{ ansible_user_dir }}/ci-framework-data/tmp/get-token.sh" + +- name: RHELOSP-53935 - Security - Get token with ipauser1 user + vars: + _osp_cmd: "/home/cloud-admin/get-token.sh ipauser1" + ansible.builtin.include_tasks: run_osp_cmd.yml + +- name: RHELOSP-53935 - Security - Get token with ipauser1 user - success + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53935=passed" + when: ipa_run_osc_cmd.stdout|length >= 180 + +- name: RHELOSP-53935 - Security - Get token with ipauser1 user - failed + ansible.builtin.lineinfile: + path: "{{ output_file }}" + line: "RHELOSP-53935=failed" + when: ipa_run_osc_cmd.stdout|length < 180 + +- name: Fail in case one of the above tests failed + ansible.builtin.command: "grep failed {{ output_file }}" + changed_when: false + register: grep_cmd + failed_when: grep_cmd.rc != 1 diff --git a/roles/ipa/tasks/run_openstack_setup.yml b/roles/ipa/tasks/run_openstack_setup.yml new file mode 100644 index 0000000000..c3ebdc9b9d --- /dev/null +++ b/roles/ipa/tasks/run_openstack_setup.yml @@ -0,0 +1,36 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Run create domain + vars: + _osp_cmd: "openstack domain create {{ cifmw_ipa_domain }}" + ansible.builtin.include_tasks: run_osp_cmd.yml + +- name: Restart keystone + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec + -n {{ cifmw_ipa_run_osp_cmd_namespace }} + deploy/keystone + -- + kill 1 + +- name: Wait for a couple of seconds for keystone to start restarting + ansible.builtin.pause: + seconds: 10 diff --git a/roles/ipa/tasks/run_osp_cmd.yml b/roles/ipa/tasks/run_osp_cmd.yml new file mode 100644 index 0000000000..c3723fb56f --- /dev/null +++ b/roles/ipa/tasks/run_osp_cmd.yml @@ -0,0 +1,30 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Run OpenStack Command + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: >- + oc exec + -n {{ cifmw_ipa_run_osp_cmd_namespace }} + -t openstackclient + -- + {{ _osp_cmd }} + register: ipa_run_osc_cmd + retries: 10 + delay: 10 diff --git a/roles/ipa/templates/get-token.sh.j2 b/roles/ipa/templates/get-token.sh.j2 new file mode 100644 index 0000000000..346d904e9e --- /dev/null +++ b/roles/ipa/templates/get-token.sh.j2 @@ -0,0 +1,3 @@ +#!/bin/bash +source /home/cloud-admin/$1 +openstack token issue -c id -f value diff --git a/roles/ipa/templates/ipa.yaml.j2 b/roles/ipa/templates/ipa.yaml.j2 new file mode 100644 index 0000000000..88a7194eae --- /dev/null +++ b/roles/ipa/templates/ipa.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: idmocp.redhat.com/v1alpha1 +kind: IDM +metadata: + name: idm +spec: + # Add fields here + # Update this value for your cluster ingress + # host: this-is.64-char-length-12345678-freeipa-invalid.apps-crc.testing + # host: this-is.65-char-length-123456789-freeipa-invalid.apps-crc.testing + # host: ipa.apps-crc.testing + realm: EXAMPLE.TESTING + passwordSecret: idm-secret + resources: + requests: + cpu: "2000m" + memory: "3Gi" + limits: + cpu: "3000m" + memory: "4Gi" diff --git a/roles/ipa/templates/ipauser1.j2 b/roles/ipa/templates/ipauser1.j2 new file mode 100644 index 0000000000..2a72f1947c --- /dev/null +++ b/roles/ipa/templates/ipauser1.j2 @@ -0,0 +1,6 @@ +unset OS_CLOUD +export OS_IDENTITY_API_VERSION=3 +export OS_AUTH_URL="https://{{ keystone_route.resources.0.spec.host }}/v3" +export OS_USER_DOMAIN_NAME="{{ cifmw_ipa_domain }}" +export OS_USERNAME=ipauser1 +export OS_PASSWORD="{{ cifmw_ipa_user_password }}" From 55a697905874b45da0e6c93fc1fb6ab009014c28 Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Wed, 9 Jul 2025 17:26:31 +0200 Subject: [PATCH 221/480] Add ipa and federation roles owners Both of these roles are used to test Keystone which is in scope of the security group. --- .github/CODEOWNERS | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 55387f6cd0..8b188e8fc8 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,9 +11,6 @@ playbooks/bgp-l3-computes-ready.yml @openstack-k8s-operators/bgp playbooks/bgp @openstack-k8s-operators/bgp scenarios/reproducers/bgp-l3-xl.yml @openstack-k8s-operators/bgp -# Compliance -roles/compliance @openstack-k8s-operators/security - # DCN roles/ci_dcn_site @openstack-k8s-operators/dcn playbooks/dcn.yml @openstack-k8s-operators/dcn @@ -38,6 +35,11 @@ roles/polarion @tosky @jparoly @jirimacku # Report portal roles/reportportal @jirimacku @dsariel @sdatko +# Security +roles/compliance @openstack-k8s-operators/security +roles/federation @openstack-k8s-operators/security +roles/ipa @openstack-k8s-operators/security + # Shiftstack roles/shiftstack @rlobillo @eurijon roles/ci_gen_kustomize_values/templates/shiftstack @rlobillo @eurijon From 3cef05fbe650b85282be3eccf1731920cd171fef Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Fri, 11 Jul 2025 12:42:58 +0200 Subject: [PATCH 222/480] Set ovn-egress-iface=true for br-ex physical NIC In order to fix OSPRH-17551 and make BW limits properly applied to physical ports (ports from neutron VLAN and flat networks), the br-ex member interfaces need to be configured with ovn-egress-iface=true. This is applies when os-net-config is used with the nmstate driver. --- hooks/playbooks/fetch_compute_facts.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index 92a2acd176..8be29fdf61 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -234,6 +234,11 @@ mtu: {{ min_viable_mtu }} # force the MAC address of the bridge to this interface primary: true + {% if edpm_network_config_nmstate | bool %} + # this ovs_extra configuration fixes OSPRH-17551, but it will be not needed when FDP-1472 is resolved + ovs_extra: + - "set interface eth1 external-ids:ovn-egress-iface=true" + {% endif %} {% for network in nodeset_networks %} - type: vlan mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} From 9ce7251a6b69e0f773cc33e44b1cf2ae5b104c30 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 10 Jul 2025 13:08:22 +0200 Subject: [PATCH 223/480] Remove non-ascii characters; create log dir before executing DLRN Harmonization to ASCII can prevent errors due to encoding inconsistencies. Executed command: LC_ALL=C find . -type f -name '*.yml' -exec grep -l '[^[:print:]]' {} \;| grep -vE '\.git|_build|\.tox' Also fix an issue, where "logs" dir does not exists, where stdout of the DLRN execution is pushing logs to a file inside the dir. Signed-off-by: Daniel Pawlik --- ci/playbooks/architecture/run.yml | 2 +- .../architecture/validate-architecture.yml | 16 ++++++++-------- ci/playbooks/build_runner_image.yml | 2 +- ci/playbooks/content_provider/pre.yml | 2 +- ci/playbooks/content_provider/run.yml | 2 +- ci/playbooks/e2e-collect-logs.yml | 2 +- ci/playbooks/edpm/run.yml | 2 +- ci/playbooks/edpm/update.yml | 2 +- ci/playbooks/edpm_baremetal_deployment/run.yml | 2 +- ci/playbooks/edpm_build_images/run.yml | 2 +- ci/playbooks/meta_content_provider/run.yml | 2 +- ci/playbooks/tcib/run.yml | 2 +- ci/playbooks/test-base-job/test-run.yml | 2 +- playbooks/99-logs.yml | 2 +- .../build_openstack_packages/tasks/run_dlrn.yml | 6 ++++++ roles/libvirt_manager/defaults/main.yml | 2 +- .../molecule/login_token_based/prepare.yml | 8 ++++---- roles/operator_deploy/defaults/main.yml | 2 +- .../molecule/default/converge.yml | 2 +- roles/operator_deploy/tasks/main.yml | 4 ++-- roles/pkg_build/tasks/build.yml | 2 +- roles/repo_setup/tasks/ci_mirror.yml | 2 +- roles/repo_setup/tasks/component_repo.yml | 6 +++--- roles/repo_setup/tasks/configure.yml | 2 +- roles/repo_setup/tasks/populate_gating_repo.yml | 2 +- roles/reproducer/tasks/push_code.yml | 2 +- roles/run_hook/tasks/playbook.yml | 2 +- scenarios/reproducers/3-nodes.yml | 2 +- scenarios/reproducers/bgp-4-racks-3-ocps.yml | 2 +- scenarios/reproducers/dt-dcn.yml | 2 +- scenarios/reproducers/dt-osasinfra.yml | 2 +- scenarios/reproducers/external-ceph.yml | 2 +- scenarios/reproducers/va-hci.yml | 2 +- scenarios/reproducers/va-multi.yml | 4 ++-- scenarios/reproducers/va-pidone.yml | 2 +- 35 files changed, 54 insertions(+), 48 deletions(-) diff --git a/ci/playbooks/architecture/run.yml b/ci/playbooks/architecture/run.yml index 3f4cbcadc7..0b45fd5cdb 100644 --- a/ci/playbooks/architecture/run.yml +++ b/ci/playbooks/architecture/run.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/architecture/validate-architecture.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/architecture/validate-architecture.yml b/ci/playbooks/architecture/validate-architecture.yml index 2f1c5a1c4d..283fab317a 100644 --- a/ci/playbooks/architecture/validate-architecture.yml +++ b/ci/playbooks/architecture/validate-architecture.yml @@ -33,7 +33,7 @@ cifmw_path: >- {{ ['~/bin', - ansible_env.PATH] | join(':') + ansible_env.PATH] | join(':') }} _mock_file: >- {{ @@ -120,8 +120,8 @@ ansible.builtin.set_fact: vas: >- {{ - vas | default({}) | - combine(item.content | b64decode | from_yaml, recursive=true) + vas | default({}) | + combine(item.content | b64decode | from_yaml, recursive=true) }} loop: "{{ _automation_contents.results }}" loop_control: @@ -165,23 +165,23 @@ ansible.builtin.set_fact: cifmw_ci_gen_kustomize_values_ssh_authorizedkeys: >- {{ - _pub_keys.results[1].content | b64decode + _pub_keys.results[1].content | b64decode }} cifmw_ci_gen_kustomize_values_ssh_private_key: >- {{ - _priv_keys.results[1].content | b64decode + _priv_keys.results[1].content | b64decode }} cifmw_ci_gen_kustomize_values_ssh_public_key: >- {{ - _pub_keys.results[1].content | b64decode + _pub_keys.results[1].content | b64decode }} cifmw_ci_gen_kustomize_values_migration_pub_key: >- {{ - _pub_keys.results[0].content | b64decode + _pub_keys.results[0].content | b64decode }} cifmw_ci_gen_kustomize_values_migration_priv_key: >- {{ - _priv_keys.results[0].content | b64decode + _priv_keys.results[0].content | b64decode }} cifmw_ci_gen_kustomize_values_sshd_ranges: >- {{ diff --git a/ci/playbooks/build_runner_image.yml b/ci/playbooks/build_runner_image.yml index e8b06d9b02..6be5d12244 100644 --- a/ci/playbooks/build_runner_image.yml +++ b/ci/playbooks/build_runner_image.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/build_runner_image.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" tasks: - name: Filter out host if needed when: diff --git a/ci/playbooks/content_provider/pre.yml b/ci/playbooks/content_provider/pre.yml index 83f4d6fa30..35aa5f712e 100644 --- a/ci/playbooks/content_provider/pre.yml +++ b/ci/playbooks/content_provider/pre.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/content_provider/pre.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" tasks: - name: Filter out host if needed when: diff --git a/ci/playbooks/content_provider/run.yml b/ci/playbooks/content_provider/run.yml index ec779012a1..81788f7e03 100644 --- a/ci/playbooks/content_provider/run.yml +++ b/ci/playbooks/content_provider/run.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/content_provider/run.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/e2e-collect-logs.yml b/ci/playbooks/e2e-collect-logs.yml index b7733626a9..63371fe3ba 100644 --- a/ci/playbooks/e2e-collect-logs.yml +++ b/ci/playbooks/e2e-collect-logs.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/e2e-collect-logs.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/edpm/run.yml b/ci/playbooks/edpm/run.yml index 53504af261..028bbd4755 100644 --- a/ci/playbooks/edpm/run.yml +++ b/ci/playbooks/edpm/run.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/edpm/run.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/edpm/update.yml b/ci/playbooks/edpm/update.yml index 719af3fb31..cc663f7ccc 100644 --- a/ci/playbooks/edpm/update.yml +++ b/ci/playbooks/edpm/update.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/edpm/update.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/edpm_baremetal_deployment/run.yml b/ci/playbooks/edpm_baremetal_deployment/run.yml index 79e76a2b4d..ed388ed081 100644 --- a/ci/playbooks/edpm_baremetal_deployment/run.yml +++ b/ci/playbooks/edpm_baremetal_deployment/run.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/edpm_baremetal_deployment/run.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/edpm_build_images/run.yml b/ci/playbooks/edpm_build_images/run.yml index 044382a941..2d1b0aef74 100644 --- a/ci/playbooks/edpm_build_images/run.yml +++ b/ci/playbooks/edpm_build_images/run.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/edpm_build_images/run.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/meta_content_provider/run.yml b/ci/playbooks/meta_content_provider/run.yml index ceaae33969..965a447c4b 100644 --- a/ci/playbooks/meta_content_provider/run.yml +++ b/ci/playbooks/meta_content_provider/run.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/meta_content_provider/run.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/tcib/run.yml b/ci/playbooks/tcib/run.yml index e384eaa68c..0d97f918a4 100644 --- a/ci/playbooks/tcib/run.yml +++ b/ci/playbooks/tcib/run.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/tcib/run.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/ci/playbooks/test-base-job/test-run.yml b/ci/playbooks/test-base-job/test-run.yml index e552d947ea..a314546458 100644 --- a/ci/playbooks/test-base-job/test-run.yml +++ b/ci/playbooks/test-base-job/test-run.yml @@ -1,6 +1,6 @@ --- - name: "Run ci/playbooks/test-base-job/run.yml" - hosts: "{{ cifmw_zuul_target_host | default('all') }}" + hosts: "{{ cifmw_zuul_target_host | default('all') }}" gather_facts: true tasks: - name: Filter out host if needed diff --git a/playbooks/99-logs.yml b/playbooks/99-logs.yml index 7c587c127a..d6ebe76532 100644 --- a/playbooks/99-logs.yml +++ b/playbooks/99-logs.yml @@ -23,7 +23,7 @@ - name: Load parameters files when: - - param_dir.stat.exists | bool + - param_dir.stat.exists | bool ansible.builtin.include_vars: dir: "{{ cifmw_basedir }}/artifacts/parameters" always: diff --git a/roles/build_openstack_packages/tasks/run_dlrn.yml b/roles/build_openstack_packages/tasks/run_dlrn.yml index 8174e098aa..82a7bcfdcf 100644 --- a/roles/build_openstack_packages/tasks/run_dlrn.yml +++ b/roles/build_openstack_packages/tasks/run_dlrn.yml @@ -173,6 +173,12 @@ ansible.builtin.debug: msg: "Building change for {{ project_name_mapped.stdout|default('unknown') }}" + - name: Ensure logs dir exists + ansible.builtin.file: + path: "{{ cifmw_bop_artifacts_basedir }}/logs" + state: directory + mode: "0755" + - name: Run DLRN register: repo_built ansible.builtin.shell: diff --git a/roles/libvirt_manager/defaults/main.yml b/roles/libvirt_manager/defaults/main.yml index 0bc6f17544..9dfabddbf3 100644 --- a/roles/libvirt_manager/defaults/main.yml +++ b/roles/libvirt_manager/defaults/main.yml @@ -62,7 +62,7 @@ cifmw_libvirt_manager_pub_net: public # Those parameters are usually set via the reproducer role. # We will therefore use them, and default to the same value set in the role. cifmw_libvirt_manager_dns_servers: "{{ cifmw_reproducer_dns_servers | default(['1.1.1.1', '8.8.8.8']) }}" -cifmw_libvirt_manager_crc_private_nic: "{{ cifmw_reproducer_crc_private_nic | default('enp2s0') }}" +cifmw_libvirt_manager_crc_private_nic: "{{ cifmw_reproducer_crc_private_nic | default('enp2s0') }}" # Allow to inject custom node family cifmw_libvirt_manager_vm_net_ip_set: {} diff --git a/roles/openshift_login/molecule/login_token_based/prepare.yml b/roles/openshift_login/molecule/login_token_based/prepare.yml index 9ef1af7f9c..235dda0dc6 100644 --- a/roles/openshift_login/molecule/login_token_based/prepare.yml +++ b/roles/openshift_login/molecule/login_token_based/prepare.yml @@ -26,15 +26,15 @@ - name: Login as kubeadmin environment: - PATH: "{{ ansible_user_dir  }}/.crc/bin/oc/:{{ ansible_env.PATH }}" - KUBECONFIG: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" + PATH: "{{ ansible_user_dir }}/.crc/bin/oc/:{{ ansible_env.PATH }}" + KUBECONFIG: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" ansible.builtin.command: cmd: oc login -u kubeadmin -p 123456789 - name: Get initial token environment: - PATH: "{{ ansible_user_dir  }}/.crc/bin/oc/:{{ ansible_env.PATH }}" - KUBECONFIG: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" + PATH: "{{ ansible_user_dir }}/.crc/bin/oc/:{{ ansible_env.PATH }}" + KUBECONFIG: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" register: whoami_out ansible.builtin.command: cmd: oc whoami -t diff --git a/roles/operator_deploy/defaults/main.yml b/roles/operator_deploy/defaults/main.yml index 0e25bd3d78..93eb9ca3dd 100644 --- a/roles/operator_deploy/defaults/main.yml +++ b/roles/operator_deploy/defaults/main.yml @@ -19,7 +19,7 @@ # All variables within this role should have a prefix of "cifmw_operator_deploy" # output base directory -cifmw_operator_deploy_basedir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}" +cifmw_operator_deploy_basedir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}" # List of operators you want to deploy cifmw_operator_deploy_list: [] # install_yamls repository location diff --git a/roles/operator_deploy/molecule/default/converge.yml b/roles/operator_deploy/molecule/default/converge.yml index 3368789784..3d4aa84ff5 100644 --- a/roles/operator_deploy/molecule/default/converge.yml +++ b/roles/operator_deploy/molecule/default/converge.yml @@ -18,7 +18,7 @@ - name: Converge hosts: all environment: - KUBECONFIG: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" + KUBECONFIG: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" vars: cifmw_installyamls_repos: "/tmp/install_yamls" cifmw_operator_deploy_list: diff --git a/roles/operator_deploy/tasks/main.yml b/roles/operator_deploy/tasks/main.yml index e5bc84135f..6df250f8dc 100644 --- a/roles/operator_deploy/tasks/main.yml +++ b/roles/operator_deploy/tasks/main.yml @@ -16,8 +16,8 @@ - name: Deploy selected operators ci_script: - output_dir: "{{ cifmw_operator_deploy_basedir }}/artifacts" + output_dir: "{{ cifmw_operator_deploy_basedir }}/artifacts" chdir: "{{ cifmw_operator_deploy_installyamls }}" script: "make {{ item.name }}" - extra_args: "{{ item.params | default({}) }}" + extra_args: "{{ item.params | default({}) }}" loop: "{{ cifmw_operator_deploy_list }}" diff --git a/roles/pkg_build/tasks/build.yml b/roles/pkg_build/tasks/build.yml index 841595f223..a5f867ed44 100644 --- a/roles/pkg_build/tasks/build.yml +++ b/roles/pkg_build/tasks/build.yml @@ -5,7 +5,7 @@ {% for pkg in cifmw_pkg_build_list -%} - "{{ pkg.src|default(cifmw_pkg_build_pkg_basedir ~ '/' ~ pkg.name) }}:/root/src/{{ pkg.name }}:z" - "{{ cifmw_pkg_build_basedir }}/volumes/packages/{{ pkg.name }}:/root/{{ pkg.name }}:z" - - "{{ cifmw_pkg_build_basedir }}/logs/build_{{ pkg.name }}:/root/logs:z" + - "{{ cifmw_pkg_build_basedir }}/logs/build_{{ pkg.name }}:/root/logs:z" {% endfor -%} - "{{ cifmw_pkg_build_basedir }}/volumes/packages/gating_repo:/root/gating_repo:z" - "{{ cifmw_pkg_build_basedir }}/artifacts/repositories:/root/yum.repos.d:z,ro" diff --git a/roles/repo_setup/tasks/ci_mirror.yml b/roles/repo_setup/tasks/ci_mirror.yml index dae3377660..7eb47c26b6 100644 --- a/roles/repo_setup/tasks/ci_mirror.yml +++ b/roles/repo_setup/tasks/ci_mirror.yml @@ -5,7 +5,7 @@ register: mirror_path - name: Use proxy mirrors - become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" + become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" when: - mirror_path.stat.exists block: diff --git a/roles/repo_setup/tasks/component_repo.yml b/roles/repo_setup/tasks/component_repo.yml index 10dec3b317..a5213495b8 100644 --- a/roles/repo_setup/tasks/component_repo.yml +++ b/roles/repo_setup/tasks/component_repo.yml @@ -1,20 +1,20 @@ --- - name: Get component repo - become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" + become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" ansible.builtin.get_url: url: "{{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/component/{{ cifmw_repo_setup_component_name }}/{{ cifmw_repo_setup_component_promotion_tag }}/delorean.repo" dest: "{{ cifmw_repo_setup_output }}/{{ cifmw_repo_setup_component_name }}_{{ cifmw_repo_setup_component_promotion_tag }}_delorean.repo" mode: "0644" - name: Rename component repo - become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" + become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" ansible.builtin.replace: path: "{{ cifmw_repo_setup_output }}/{{ cifmw_repo_setup_component_name }}_{{ cifmw_repo_setup_component_promotion_tag }}_delorean.repo" regexp: 'delorean-component-{{ cifmw_repo_setup_component_name }}' replace: '{{ cifmw_repo_setup_component_name }}-{{ cifmw_repo_setup_component_promotion_tag }}' - name: Disable component repo in current-podified dlrn repo - become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" + become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" community.general.ini_file: path: "{{ cifmw_repo_setup_output }}/delorean.repo" section: 'delorean-component-{{ cifmw_repo_setup_component_name }}' diff --git a/roles/repo_setup/tasks/configure.yml b/roles/repo_setup/tasks/configure.yml index a88c66796e..46c96598cf 100644 --- a/roles/repo_setup/tasks/configure.yml +++ b/roles/repo_setup/tasks/configure.yml @@ -8,7 +8,7 @@ - (not cifmw_run_update|default(false)) or (update_playbook_run is defined and cifmw_run_update|default(false)) - name: Run repo-setup - become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" + become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" ansible.builtin.command: cmd: >- {{ cifmw_repo_setup_venv }}/bin/repo-setup diff --git a/roles/repo_setup/tasks/populate_gating_repo.yml b/roles/repo_setup/tasks/populate_gating_repo.yml index ae56086d18..b06160317a 100644 --- a/roles/repo_setup/tasks/populate_gating_repo.yml +++ b/roles/repo_setup/tasks/populate_gating_repo.yml @@ -7,7 +7,7 @@ - name: Construct gating repo when: _url_status.status == 200 - become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" + become: "{{ not cifmw_repo_setup_output.startswith(ansible_user_dir) }}" block: - name: Populate gating repo from content provider ip ansible.builtin.copy: diff --git a/roles/reproducer/tasks/push_code.yml b/roles/reproducer/tasks/push_code.yml index 98b7b4e28d..985b9e655e 100644 --- a/roles/reproducer/tasks/push_code.yml +++ b/roles/reproducer/tasks/push_code.yml @@ -161,5 +161,5 @@ - name: Install collections on controller-0 delegate_to: controller-0 ansible.builtin.command: - chdir: "{{ _cifmw_reproducer_framework_location }}" + chdir: "{{ _cifmw_reproducer_framework_location }}" cmd: ansible-galaxy collection install --upgrade --force . diff --git a/roles/run_hook/tasks/playbook.yml b/roles/run_hook/tasks/playbook.yml index 2463c7ae27..3f3155460a 100644 --- a/roles/run_hook/tasks/playbook.yml +++ b/roles/run_hook/tasks/playbook.yml @@ -25,7 +25,7 @@ default('openstack') }} ansible.builtin.set_fact: - cifmw_basedir: "{{ _bdir }}" + cifmw_basedir: "{{ _bdir }}" hook_name: "{{ _hook_name }}" playbook_path: "{{ _play | realpath }}" log_path: >- diff --git a/scenarios/reproducers/3-nodes.yml b/scenarios/reproducers/3-nodes.yml index 23a44eaa3e..3796e14d9e 100644 --- a/scenarios/reproducers/3-nodes.yml +++ b/scenarios/reproducers/3-nodes.yml @@ -30,7 +30,7 @@ cifmw_libvirt_manager_configuration: - osp_trunk compute: uefi: "{{ cifmw_use_uefi }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 1] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 1] | max }}" root_part_id: "{{ cifmw_root_partition_id }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" diff --git a/scenarios/reproducers/bgp-4-racks-3-ocps.yml b/scenarios/reproducers/bgp-4-racks-3-ocps.yml index 478ebc34fb..5ed675f751 100644 --- a/scenarios/reproducers/bgp-4-racks-3-ocps.yml +++ b/scenarios/reproducers/bgp-4-racks-3-ocps.yml @@ -236,7 +236,7 @@ cifmw_libvirt_manager_configuration: - ocpbm - osp_trunk compute: - amount: "{{ cifmw_libvirt_manager_compute_amount }}" + amount: "{{ cifmw_libvirt_manager_compute_amount }}" root_part_id: "{{ cifmw_root_partition_id }}" uefi: "{{ cifmw_use_uefi }}" image_url: "{{ cifmw_discovered_image_url }}" diff --git a/scenarios/reproducers/dt-dcn.yml b/scenarios/reproducers/dt-dcn.yml index fdfa6b8ea4..9ccaf2f317 100644 --- a/scenarios/reproducers/dt-dcn.yml +++ b/scenarios/reproducers/dt-dcn.yml @@ -116,7 +116,7 @@ cifmw_libvirt_manager_configuration: compute: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" diff --git a/scenarios/reproducers/dt-osasinfra.yml b/scenarios/reproducers/dt-osasinfra.yml index dae99d6827..680ec32865 100644 --- a/scenarios/reproducers/dt-osasinfra.yml +++ b/scenarios/reproducers/dt-osasinfra.yml @@ -97,7 +97,7 @@ cifmw_libvirt_manager_configuration: compute: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" diff --git a/scenarios/reproducers/external-ceph.yml b/scenarios/reproducers/external-ceph.yml index 2313b745ea..ea0a9c5787 100644 --- a/scenarios/reproducers/external-ceph.yml +++ b/scenarios/reproducers/external-ceph.yml @@ -46,7 +46,7 @@ cifmw_libvirt_manager_configuration: compute: uefi: "{{ cifmw_use_uefi | default(false) }}" root_part_id: 4 - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" diff --git a/scenarios/reproducers/va-hci.yml b/scenarios/reproducers/va-hci.yml index 422241a29a..5065c19fee 100644 --- a/scenarios/reproducers/va-hci.yml +++ b/scenarios/reproducers/va-hci.yml @@ -68,7 +68,7 @@ cifmw_libvirt_manager_configuration: compute: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" diff --git a/scenarios/reproducers/va-multi.yml b/scenarios/reproducers/va-multi.yml index f4f68a12c3..1ebd366ac4 100644 --- a/scenarios/reproducers/va-multi.yml +++ b/scenarios/reproducers/va-multi.yml @@ -98,7 +98,7 @@ cifmw_libvirt_manager_configuration: compute: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" @@ -112,7 +112,7 @@ cifmw_libvirt_manager_configuration: compute2: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" diff --git a/scenarios/reproducers/va-pidone.yml b/scenarios/reproducers/va-pidone.yml index 164e842949..ed3f2d510e 100644 --- a/scenarios/reproducers/va-pidone.yml +++ b/scenarios/reproducers/va-pidone.yml @@ -87,7 +87,7 @@ cifmw_libvirt_manager_configuration: compute: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" - amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" + amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}" image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" From 1a7bf6b83f0375160e15f4e65075be91c94beaf0 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 9 Jul 2025 17:10:41 +0200 Subject: [PATCH 224/480] Copy on remote host when pull secret is available there The commit helps to run reproducer playbook locally and not on remote host directly. Signed-off-by: Daniel Pawlik --- roles/manage_secrets/tasks/_push_secret.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/manage_secrets/tasks/_push_secret.yml b/roles/manage_secrets/tasks/_push_secret.yml index 7d84ea019d..453811358b 100644 --- a/roles/manage_secrets/tasks/_push_secret.yml +++ b/roles/manage_secrets/tasks/_push_secret.yml @@ -38,8 +38,14 @@ msg: | {{ _secret_file }} must be an absolute path + - name: Check if pull secret src file exists + ansible.builtin.stat: + path: "{{ _secret_file }}" + register: _ps_exists + - name: Copy file to location ansible.builtin.copy: + remote_src: "{{ _ps_exists.stat.exists }}" dest: "{{ _secret_dest }}" src: "{{ _secret_file }}" mode: "0600" From 74265b4c90061a203d022a6519c95f5451f5f81c Mon Sep 17 00:00:00 2001 From: Oliver Walsh Date: Mon, 14 Jul 2025 17:09:22 +0100 Subject: [PATCH 225/480] Fix rabbitmq-cluster-operator install_yamls make vars install_yamls uses RABBITMQ_REPO, not RABBITMQ_CLUSTER_REPO --- roles/install_yamls/tasks/zuul_set_operators_repo.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/roles/install_yamls/tasks/zuul_set_operators_repo.yml b/roles/install_yamls/tasks/zuul_set_operators_repo.yml index e5a3b37e23..28a89d5049 100644 --- a/roles/install_yamls/tasks/zuul_set_operators_repo.yml +++ b/roles/install_yamls/tasks/zuul_set_operators_repo.yml @@ -27,7 +27,10 @@ block: - name: Set fact with local repos based on Zuul items vars: - _repo_operator_name: "{{ zuul_item.project.short_name | regex_search('(?:openstack-)?(.*)-operator', '\\1') | first }}" + _repo_varname_overrides: + rabbitmq-cluster: rabbitmq + __repo_operator_name: "{{ zuul_item.project.short_name | regex_search('(?:openstack-)?(.*)-operator', '\\1') | first }}" + _repo_operator_name: "{{ _repo_varname_overrides.get(__repo_operator_name, __repo_operator_name) }}" _repo_operator_info: - key: "{{ _repo_operator_name | upper }}_REPO" value: "{{ ansible_user_dir }}/{{ zuul_item.project.src_dir }}" @@ -41,7 +44,10 @@ - name: Print helpful data for debugging vars: - _repo_operator_name: "{{ zuul_item.project.short_name | regex_search('(?:openstack-)?(.*)-operator', '\\1') | first }}" + _repo_varname_overrides: + rabbitmq-cluster: rabbitmq + __repo_operator_name: "{{ zuul_item.project.short_name | regex_search('(?:openstack-)?(.*)-operator', '\\1') | first }}" + _repo_operator_name: "{{ _repo_varname_overrides.get(__repo_operator_name, __repo_operator_name) }}" _repo_operator_info: - key: "{{ _repo_operator_name | upper }}_REPO" value: "{{ ansible_user_dir }}/{{ zuul_item.project.src_dir }}" From bd6d4f826e35af4bafbd4ec997e046f41657fe40 Mon Sep 17 00:00:00 2001 From: Oliver Walsh Date: Mon, 14 Jul 2025 18:36:10 +0100 Subject: [PATCH 226/480] Skip rabbitmq-cluster-operator build --- roles/operator_build/tasks/build.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/roles/operator_build/tasks/build.yml b/roles/operator_build/tasks/build.yml index 27eb610d04..bdc12df5cd 100644 --- a/roles/operator_build/tasks/build.yml +++ b/roles/operator_build/tasks/build.yml @@ -126,6 +126,8 @@ }}}, recursive=True)}} - name: "{{ operator.name }} - Call manifests" # noqa: name[template] + when: + - operator.name != "rabbitmq-cluster-operator" ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" @@ -159,6 +161,8 @@ extra_args: "{{ _binddata_vars }}" - name: "{{ operator.name }} - Call docker-build" # noqa: name[template] + when: + - operator.name != "rabbitmq-cluster-operator" ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" @@ -170,6 +174,7 @@ - name: "{{ operator.name }} - Call docker-push" # noqa: name[template] when: - cifmw_operator_build_push_ct|bool + - operator.name != "rabbitmq-cluster-operator" ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" @@ -184,6 +189,8 @@ delay: 10 - name: "{{ operator.name }} - Call bundle" # noqa: name[template] + when: + - operator.name != "rabbitmq-cluster-operator" ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" @@ -197,6 +204,8 @@ LOCAL_REGISTRY: "{{ cifmw_operator_build_local_registry }}" - name: "{{ operator.name }} - Call bundle-build" # noqa: name[template] + when: + - operator.name != "rabbitmq-cluster-operator" ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" @@ -213,6 +222,7 @@ - name: "{{ operator.name }} - Push bundle image" # noqa: name[template] when: - cifmw_operator_build_push_ct|bool + - operator.name != "rabbitmq-cluster-operator" containers.podman.podman_image: name: "{{ operator_img_bundle }}" pull: false @@ -223,6 +233,8 @@ delay: 10 - name: "{{ operator.name }} - Call catalog-build" # noqa: name[template] + when: + - operator.name != "rabbitmq-cluster-operator" ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" @@ -240,6 +252,7 @@ - name: "{{ operator.name }} - Call catalog-push" # noqa: name[template] when: - cifmw_operator_build_push_ct|bool + - operator.name != "rabbitmq-cluster-operator" ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" From 08aabb7a73daac8fe82922e3551ea1c6f1c5266c Mon Sep 17 00:00:00 2001 From: Jon Uriarte Date: Wed, 9 Jul 2025 13:38:33 +0200 Subject: [PATCH 227/480] Update shiftstack reviewers Update the CODEOWNERS file according to current shiftstack QE members. --- .github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8b188e8fc8..d74c2cd320 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -41,8 +41,8 @@ roles/federation @openstack-k8s-operators/security roles/ipa @openstack-k8s-operators/security # Shiftstack -roles/shiftstack @rlobillo @eurijon -roles/ci_gen_kustomize_values/templates/shiftstack @rlobillo @eurijon +roles/shiftstack @imatza-rh @eurijon +roles/ci_gen_kustomize_values/templates/shiftstack @imatza-rh @eurijon # Storage roles/cifmw_block_device @openstack-k8s-operators/storage From a1b284ccd31d8b9c1cba4e91284289f352195e43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Ciecierski?= Date: Tue, 15 Jul 2025 13:07:29 +0200 Subject: [PATCH 228/480] (update): Simplify registry login for lopenstackclient This commit refactors the logic for creating the local openstackclient container. The previous implementation relied on parsing OpenStackDataPlaneNodeSet resources to determine registry credentials, which was complex and error-prone. The new approach simplifies this by: - Fetching the openstackclient image directly from its pod definition. - Conditionally logging into `registry.redhat.io` only when the image originates from there. - Using the `cifmw_registry_token` dictionary for credentials to logging into `registry.redhat.io`. --- .../tasks/create_local_openstackclient.yml | 81 ++++--------------- 1 file changed, 16 insertions(+), 65 deletions(-) diff --git a/roles/update/tasks/create_local_openstackclient.yml b/roles/update/tasks/create_local_openstackclient.yml index 7a731745cf..2d8b20999f 100644 --- a/roles/update/tasks/create_local_openstackclient.yml +++ b/roles/update/tasks/create_local_openstackclient.yml @@ -1,68 +1,4 @@ --- -- name: Gather NodeSet resource information - kubernetes.core.k8s_info: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - namespace: "openstack" - api_key: "{{ cifmw_openshift_token | default(omit) }}" - context: "{{ cifmw_openshift_context | default(omit) }}" - kind: "OpenStackDataPlaneNodeSet" - api_version: "dataplane.openstack.org/v1beta1" - register: _cifmw_update_osdpns_all_info - -- name: Fail if no OSDPNS resources are found - ansible.builtin.fail: - msg: "No OSDPNS resources found in the 'openstack' namespace!" - when: _cifmw_update_osdpns_all_info.resources | length == 0 - -- name: Choose the first OSDPNS resource which has edpm_container_registry_logins defined - ansible.builtin.set_fact: - _cifmw_update_osdpns_info: >- - {{ - _cifmw_update_osdpns_all_info.resources - | community.general.json_query('[?spec.nodeTemplate.ansible.ansibleVars.edpm_container_registry_logins] | [0]') - }} - -- name: Display which osdpns we're using - ansible.builtin.debug: - msg: "Found OSDPNS named: '{{ _cifmw_update_osdpns_info.metadata.name }}'" - -- name: Determine registry - ansible.builtin.set_fact: - cifmw_update_login_registry: >- - {{ - (cifmw_ci_gen_kustomize_values_ooi_image.split('/')[0]) - if cifmw_ci_gen_kustomize_values_ooi_image is defined - else 'quay.io' - | trim - }} - -- name: Check if credentials exist - ansible.builtin.set_fact: - cifmw_update_login_username: "{{ login_username }}" - cifmw_update_login_password: "{{ login_dict[login_username] }}" - vars: - login_dict: >- - {{ - _cifmw_update_osdpns_info.spec.nodeTemplate.ansible.ansibleVars. - edpm_container_registry_logins[cifmw_update_login_registry] - }} - login_username: "{{ login_dict.keys()|list|first }}" - when: - - _cifmw_update_osdpns_info.spec.nodeTemplate.ansible.ansibleVars.edpm_container_registry_logins is defined - - login_dict is defined - - login_dict|length > 0 - - cifmw_update_login_registry != 'quay.io' - -- name: Log in to registry when needed - containers.podman.podman_login: - registry: "{{ cifmw_update_login_registry }}" - username: "{{ cifmw_update_login_username }}" - password: "{{ cifmw_update_login_password }}" - when: - - cifmw_update_login_username is defined - - cifmw_update_login_password is defined - - cifmw_update_login_registry != 'quay.io' - - name: Retrieve the openstackclient Pod kubernetes.core.k8s_info: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" @@ -80,7 +16,22 @@ - name: Set the openstackclient image fact ansible.builtin.set_fact: - openstackclient_image: "{{ _cifmw_update_openstackclient_pod.resources[0].spec.containers[0].image }}" + openstackclient_image: "{{ _cifmw_update_openstackclient_pod | community.general.json_query('resources[0].spec.containers[0].image') | default('') }}" + +- name: Login to registry.redhat.io if needed + when: "'registry.redhat.io' in openstackclient_image" + block: + - name: Fail if cifmw_registry_token.credentials is not defined + ansible.builtin.fail: + msg: "cifmw_registry_token.credentials is not defined, cannot login to registry.redhat.io" + when: "'credentials' not in cifmw_registry_token | default({})" + + - name: Login to registry.redhat.io + containers.podman.podman_login: + username: "{{ cifmw_registry_token.credentials.username }}" + password: "{{ cifmw_registry_token.credentials.password }}" + registry: "registry.redhat.io" + no_log: true - name: Collect and save OpenStack config files ansible.builtin.include_tasks: collect_openstackclient_config.yml From d1cb4e198510c8ac0257a577e9432d2ed1d88b26 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Fri, 18 Jul 2025 19:50:29 +0200 Subject: [PATCH 229/480] Fix terminology: use "architecture-based" instead of "VA" The architecture repository contains severals DTs, some of which are VAs. At the beginning the architecture-runner code was developed with VA1 (later HCI VA) in mind, but we are really talking about architecture-based deployments, not specifically to VAs. This change only affects the ansible output and it has no functional impact. --- deploy-edpm.yml | 2 +- docs/source/usage/01_usage.md | 2 +- playbooks/06-deploy-architecture.yml | 2 +- roles/ci_gen_kustomize_values/README.md | 2 +- roles/kustomize_deploy/README.md | 8 ++++---- roles/kustomize_deploy/tasks/check_requirements.yml | 2 +- roles/reproducer/tasks/main.yml | 4 ++-- roles/reproducer/tasks/reuse_main.yaml | 4 ++-- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/deploy-edpm.yml b/deploy-edpm.yml index e2ab177375..ed7dd92489 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -150,7 +150,7 @@ tags: - edpm -- name: Deploy VA and validate workflow +- name: Deploy architecture and validate workflow hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: diff --git a/docs/source/usage/01_usage.md b/docs/source/usage/01_usage.md index b48568d982..100ff70185 100644 --- a/docs/source/usage/01_usage.md +++ b/docs/source/usage/01_usage.md @@ -46,7 +46,7 @@ are shared among multiple roles: - `cifmw_ssh_keysize`: (Integer) Size of ssh keys that will be injected into the controller in order to connect to the rest of the nodes. Defaults to 521. - `cifmw_architecture_repo`: (String) Path of the architecture repository on the controller node. Defaults to `~/src/github.com/openstack-k8s-operators/architecture` -- `cifmw_architecture_scenario`: (String) The selected VA scenario to deploy. +- `cifmw_architecture_scenario`: (String) The selected architecture-based scenario to deploy. - `cifmw_architecture_wait_condition`: (Dict) Structure defining custom wait_conditions for the automation. - `cifmw_architecture_user_kustomize.*`: (Dict) Structures defining user provided kustomization for automation. All these variables are combined together. - `cifmw_architecture_user_kustomize_base_dir`: (String) Path where to lock for kustomization patches. diff --git a/playbooks/06-deploy-architecture.yml b/playbooks/06-deploy-architecture.yml index 41e997c97b..71d8edf0e8 100644 --- a/playbooks/06-deploy-architecture.yml +++ b/playbooks/06-deploy-architecture.yml @@ -3,7 +3,7 @@ # NOTE: Playbook migrated to: roles/cifmw_setup/tasks/deploy_architecture.yml # DO NOT EDIT THIS PLAYBOOK. IT WILL BE REMOVED IN NEAR FUTURE. # -- name: Deploy VA +- name: Deploy an architecture-based scenario hosts: "{{ cifmw_target_host | default('localhost') }}" tasks: - name: Run pre_deploy hooks diff --git a/roles/ci_gen_kustomize_values/README.md b/roles/ci_gen_kustomize_values/README.md index 72baafe29a..7692d67c56 100644 --- a/roles/ci_gen_kustomize_values/README.md +++ b/roles/ci_gen_kustomize_values/README.md @@ -8,7 +8,7 @@ None ```{warning} The top level parameter `cifmw_architecture_scenario` is required in order -to select the proper VA scenario to deploy. If not provided, the role will fail +to select the proper architecture-based scenario to deploy. If not provided, the role will fail with a message. ``` diff --git a/roles/kustomize_deploy/README.md b/roles/kustomize_deploy/README.md index b3c40451f7..0cd373b7a8 100644 --- a/roles/kustomize_deploy/README.md +++ b/roles/kustomize_deploy/README.md @@ -1,12 +1,12 @@ # kustomize_deploy -Ansible role designed to deploy VA scenarios using the kustomize tool. +Ansible role designed to deploy architecture-based scenarios using the kustomize tool. ## Parameters ```{warning} The top level parameter `cifmw_architecture_scenario` is required in order -to select the proper VA scenario to deploy. If not provided, the role will fail +to select the proper architecture-based scenario to deploy. If not provided, the role will fail with a message. ``` @@ -15,7 +15,7 @@ with a message. - `cifmw_kustomize_deploy_basedir`: _(string)_ Base directory for the ci-framework artifacts. Defaults to `~/ci-framework-data/` - `cifmw_kustomize_deploy_architecture_repo_url`: _(string)_ URL of The - "architecture" repository, where the VA scenarios are defined. + "architecture" repository, where the architecture-based scenarios are defined. Defaults to `https://github.com/openstack-k8s-operators/architecture` - `cifmw_kustomize_deploy_architecture_repo_dest_dir`: _(string)_ Directory where the architecture repo is cloned on the controller node. @@ -26,7 +26,7 @@ with a message. Relative path of the common CRs in the architecture repo. Defaults to `/examples/common` - `cifmw_kustomize_deploy_architecture_examples_path`: _(string)_ Relative - path of the VA scenario list in the operator repo. Defaults to `/examples/va` + path of the architecture-based scenario list in the operator repo. Defaults to `/examples/va` - `cifmw_kustomize_deploy_kustomizations_dest_dir`: _(string)_ Path for the generated CR files. Defaults to `cifmw_kustomize_deploy_destfiles_basedir + /artifacts/kustomize_deploy` diff --git a/roles/kustomize_deploy/tasks/check_requirements.yml b/roles/kustomize_deploy/tasks/check_requirements.yml index e281978fbe..e3376f9190 100644 --- a/roles/kustomize_deploy/tasks/check_requirements.yml +++ b/roles/kustomize_deploy/tasks/check_requirements.yml @@ -41,7 +41,7 @@ ansible.builtin.fail: msg: > You need to properly set the `cifmw_architecture_scenario` variable - in order to select the VA scenario to deploy. You can take a list of + in order to select the architecture-based scenario to deploy. You can take a list of scenario in the `examples/va` folder in the architecture repo. when: - cifmw_architecture_scenario is not defined diff --git a/roles/reproducer/tasks/main.yml b/roles/reproducer/tasks/main.yml index 14671a1750..01a681be48 100644 --- a/roles/reproducer/tasks/main.yml +++ b/roles/reproducer/tasks/main.yml @@ -360,7 +360,7 @@ - cifmw_job_uri is defined ansible.builtin.include_tasks: ci_job.yml - - name: Prepare VA deployment + - name: Prepare architecture-based deployment when: - cifmw_architecture_scenario is defined - cifmw_job_uri is undefined @@ -372,7 +372,7 @@ tags: - deploy_architecture - - name: Prepare VA post deployment + - name: Prepare architecture-based post deployment when: - cifmw_architecture_scenario is defined - cifmw_job_uri is undefined diff --git a/roles/reproducer/tasks/reuse_main.yaml b/roles/reproducer/tasks/reuse_main.yaml index fe05c8a708..d299115285 100644 --- a/roles/reproducer/tasks/reuse_main.yaml +++ b/roles/reproducer/tasks/reuse_main.yaml @@ -180,7 +180,7 @@ - cifmw_job_uri is defined ansible.builtin.include_tasks: ci_job.yml - - name: Prepare VA deployment + - name: Prepare architecture-based deployment when: - cifmw_architecture_scenario is defined - cifmw_job_uri is undefined @@ -192,7 +192,7 @@ tags: - deploy_architecture - - name: Prepare VA post deployment + - name: Prepare architecture-based post deployment when: - cifmw_architecture_scenario is defined - cifmw_job_uri is undefined From edec60a97c15c0aefad3ec9989437c7f7dd58f1e Mon Sep 17 00:00:00 2001 From: Luca Miccini Date: Fri, 18 Jul 2025 10:57:32 +0200 Subject: [PATCH 230/480] Run custom command after rhos-release --- roles/reproducer/tasks/rhos_release.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/reproducer/tasks/rhos_release.yml b/roles/reproducer/tasks/rhos_release.yml index 0283d608fc..752d1c8caf 100644 --- a/roles/reproducer/tasks/rhos_release.yml +++ b/roles/reproducer/tasks/rhos_release.yml @@ -8,3 +8,9 @@ - name: Install repos ansible.builtin.command: cmd: "rhos-release {{ cifmw_repo_setup_rhos_release_args | default('rhel') }}" + +- name: Run custom commands after rhos-release setup + ansible.builtin.command: + cmd: "{{ cifmw_repo_setup_rhos_release_post }}" + when: + - cifmw_repo_setup_rhos_release_post is defined From 24cf7db286e368f60df0a249bc50794528a9415c Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 22 Jul 2025 16:38:31 +0530 Subject: [PATCH 231/480] Pin ansible-core version for doc-requirements The new release of Ansible Core 2.19 is breaking our doc jobs. Until the reason is investigated, doc jobs should use older Ansible version. --- docs/doc-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/doc-requirements.txt b/docs/doc-requirements.txt index 59430bbda7..0bae27209c 100644 --- a/docs/doc-requirements.txt +++ b/docs/doc-requirements.txt @@ -5,5 +5,5 @@ Pygments>=2.2.0 reno>=2.5.0 sphinxemoji myst-parser[linkify] -ansible-core +ansible-core==2.15.13 ansible-doc-extractor From 46c1e1227dbc8d64dbabea6a715f6bbe24f74ec2 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Thu, 17 Jul 2025 14:24:12 +0200 Subject: [PATCH 232/480] [debug] Print the rhos-release URL We are investigating a case and it appeared that despite setting the variable to the desired value, in the end in the job it ends up requesting something else... This debug task would allow us inspecting what we really get. --- roles/repo_setup/tasks/rhos_release.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/repo_setup/tasks/rhos_release.yml b/roles/repo_setup/tasks/rhos_release.yml index 998af1223f..af86233e7c 100644 --- a/roles/repo_setup/tasks/rhos_release.yml +++ b/roles/repo_setup/tasks/rhos_release.yml @@ -8,6 +8,10 @@ state: directory mode: "0755" + - name: Print the URL to request + ansible.builtin.debug: + msg: "{{ cifmw_repo_setup_rhos_release_rpm }}" + - name: Download the RPM vars: cifmw_krb_request_url: "{{ cifmw_repo_setup_rhos_release_rpm }}" From 30c1b1c507c73040c954faa9c2580529b01f1618 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 16 Jul 2025 15:32:56 +0530 Subject: [PATCH 233/480] Use FQCN during the use of a module Usage of FQCN while calling a module avoids ambiguity [1] [1] https://ansible.readthedocs.io/projects/lint/rules/fqcn/\#problematic-code --- plugins/README.md | 2 +- plugins/action/ci_kustomize.py | 4 ++-- plugins/action/ci_script.py | 2 +- plugins/action/discover_latest_image.py | 2 +- plugins/modules/generate_make_tasks.py | 2 +- roles/artifacts/tasks/crc.yml | 4 ++-- roles/artifacts/tasks/main.yml | 2 +- roles/discover_latest_image/tasks/main.yml | 2 +- .../edpm_prepare/tasks/kustomize_and_deploy.yml | 2 +- roles/install_yamls/README.md | 4 ++-- roles/operator_build/tasks/build.yml | 16 ++++++++-------- roles/operator_deploy/tasks/main.yml | 2 +- .../tasks/build_openstack-must-gather_image.yml | 4 ++-- .../targets/kustomize/tasks/run_test_case.yml | 2 +- tests/integration/targets/make/tasks/ci_make.yml | 16 ++++++++-------- tests/integration/targets/script/tasks/main.yml | 10 +++++----- 16 files changed, 38 insertions(+), 38 deletions(-) diff --git a/plugins/README.md b/plugins/README.md index dc6678926b..12161f14c4 100644 --- a/plugins/README.md +++ b/plugins/README.md @@ -57,7 +57,7 @@ Any of the `ansible.builtin.uri` module is supported. ```YAML - name: Get latest CentOS 9 Stream image register: discovered_image - discover_latest_image: + cifmw.general.discover_latest_image: url: "https://cloud.centos.org/centos/9-stream/x86_64/images/" image_prefix: "CentOS-Stream-GenericCloud" diff --git a/plugins/action/ci_kustomize.py b/plugins/action/ci_kustomize.py index 0dd9ec4bf0..0e3bd44040 100644 --- a/plugins/action/ci_kustomize.py +++ b/plugins/action/ci_kustomize.py @@ -105,7 +105,7 @@ # Apply the kustomizations in `/home/user/source/k8s-manifets-dir` to the # `target_path` manifest and output the result in `output_pat` - name: Apply the file and variables kustomizations to multiple CRs - ci_kustomize: + cifmw.general.ci_kustomize: target_path: /home/user/source/k8s-manifets-dir/manifest.yaml output_path: /home/user/source/k8s-manifets-dir/out.yaml @@ -113,7 +113,7 @@ # `/home/user/source/k8s-manifets-dir` and `extra_dir` dirs to the # manifests available in the `target_path` dir - name: Apply the file and variables kustomizations to multiple CRs - ci_kustomize: + cifmw.general.ci_kustomize: target_path: /home/user/source/k8s-manifets-dir kustomizations: - apiVersion: kustomize.config.k8s.io/v1beta1 diff --git a/plugins/action/ci_script.py b/plugins/action/ci_script.py index 30b0dcd80e..e24151ade9 100644 --- a/plugins/action/ci_script.py +++ b/plugins/action/ci_script.py @@ -56,7 +56,7 @@ EXAMPLES = r""" - name: Run custom script register: script_output - ci_script: + cifmw.general.ci_script: output_dir: "/home/zuul/ci-framework-data/artifacts" script: | mkdir /home/zuul/test-dir diff --git a/plugins/action/discover_latest_image.py b/plugins/action/discover_latest_image.py index 3bc5101288..4bdc72dc9a 100644 --- a/plugins/action/discover_latest_image.py +++ b/plugins/action/discover_latest_image.py @@ -31,7 +31,7 @@ EXAMPLES = r""" - name: Get latest CentOS 9 Stream image register: discovered_images - discover_latest_image: + cifmw.general.discover_latest_image: base_url: "https://cloud.centos.org/centos/{{ ansible_distribution_major_version }}-stream/x86_64/images" image_prefix: "CentOS-Stream-GenericCloud-" images_file: "CHECKSUM" diff --git a/plugins/modules/generate_make_tasks.py b/plugins/modules/generate_make_tasks.py index 4cbcf6aef6..ec6d0da03d 100644 --- a/plugins/modules/generate_make_tasks.py +++ b/plugins/modules/generate_make_tasks.py @@ -73,7 +73,7 @@ delay: "{{ make_%(target)s_delay | default(omit) }}" until: "{{ make_%(target)s_until | default(true) }}" register: "make_%(target)s_status" - ci_script: + cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "%(chdir)s" script: "make %(target)s" diff --git a/roles/artifacts/tasks/crc.yml b/roles/artifacts/tasks/crc.yml index 7d6a302d38..4e30b9d9b7 100644 --- a/roles/artifacts/tasks/crc.yml +++ b/roles/artifacts/tasks/crc.yml @@ -30,7 +30,7 @@ - name: Prepare root ssh accesses ignore_errors: true # noqa: ignore-errors - ci_script: + cifmw.general.ci_script: output_dir: "{{ cifmw_artifacts_basedir }}/artifacts" script: |- ssh -i {{ new_keypair_path | default(cifmw_artifacts_crc_sshkey) }} {{ cifmw_artifacts_crc_user }}@{{ cifmw_artifacts_crc_host }} <- scp -v -r -i {{ new_keypair_path | default(cifmw_artifacts_crc_sshkey) }} diff --git a/roles/artifacts/tasks/main.yml b/roles/artifacts/tasks/main.yml index e7210bb9ca..36e10f79a6 100644 --- a/roles/artifacts/tasks/main.yml +++ b/roles/artifacts/tasks/main.yml @@ -94,7 +94,7 @@ when: cifmw_artifacts_mask_logs |bool ignore_errors: true # noqa: ignore-errors timeout: 3600 - crawl_n_mask: + cifmw.general.crawl_n_mask: path: "{{ item }}" isdir: true loop: diff --git a/roles/discover_latest_image/tasks/main.yml b/roles/discover_latest_image/tasks/main.yml index 9426acae3c..3bc5d6dc40 100644 --- a/roles/discover_latest_image/tasks/main.yml +++ b/roles/discover_latest_image/tasks/main.yml @@ -16,7 +16,7 @@ - name: Get latest image register: discovered_image - discover_latest_image: + cifmw.general.discover_latest_image: url: "{{ cifmw_discover_latest_image_base_url }}" image_prefix: "{{ cifmw_discover_latest_image_qcow_prefix }}" images_file: "{{ cifmw_discover_latest_image_images_file }}" diff --git a/roles/edpm_prepare/tasks/kustomize_and_deploy.yml b/roles/edpm_prepare/tasks/kustomize_and_deploy.yml index b3054f9cb1..fc4d24fb71 100644 --- a/roles/edpm_prepare/tasks/kustomize_and_deploy.yml +++ b/roles/edpm_prepare/tasks/kustomize_and_deploy.yml @@ -63,7 +63,7 @@ 'cr' ] | ansible.builtin.path_join }} - ci_kustomize: + cifmw.general.ci_kustomize: target_path: "{{ cifmw_edpm_prepare_openstack_crs_path }}" sort_ascending: false kustomizations: "{{ cifmw_edpm_prepare_kustomizations + _ctlplane_name_kustomizations }}" diff --git a/roles/install_yamls/README.md b/roles/install_yamls/README.md index ee38655e7e..dc3dbae312 100644 --- a/roles/install_yamls/README.md +++ b/roles/install_yamls/README.md @@ -41,7 +41,7 @@ The created role directory contains multiple task files, similar to delay: "{{ make_crc_storage_delay | default(omit) }}" until: "{{ make_crc_storage_until | default(true) }}" register: "make_crc_storage_status" - ci_script: + cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: make crc_storage @@ -119,7 +119,7 @@ Let's look at below example:- delay: "{{ make_ansibleee_cleanup_delay | default(omit) }}" until: "{{ make_ansibleee_cleanup_until | default(true) }}" register: "make_ansibleee_cleanup_status" - ci_script: + cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" script: "make ansibleee_cleanup" diff --git a/roles/operator_build/tasks/build.yml b/roles/operator_build/tasks/build.yml index bdc12df5cd..6aefbe2c1e 100644 --- a/roles/operator_build/tasks/build.yml +++ b/roles/operator_build/tasks/build.yml @@ -128,7 +128,7 @@ - name: "{{ operator.name }} - Call manifests" # noqa: name[template] when: - operator.name != "rabbitmq-cluster-operator" - ci_script: + cifmw.general.ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" output_dir: "{{ cifmw_operator_build_basedir }}/artifacts" @@ -153,7 +153,7 @@ } if 'image_base' in operator else {} ) }} - ci_script: + cifmw.general.ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" output_dir: "{{ cifmw_operator_build_basedir }}/artifacts" @@ -163,7 +163,7 @@ - name: "{{ operator.name }} - Call docker-build" # noqa: name[template] when: - operator.name != "rabbitmq-cluster-operator" - ci_script: + cifmw.general.ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" output_dir: "{{ cifmw_operator_build_basedir }}/artifacts" @@ -175,7 +175,7 @@ when: - cifmw_operator_build_push_ct|bool - operator.name != "rabbitmq-cluster-operator" - ci_script: + cifmw.general.ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" output_dir: "{{ cifmw_operator_build_basedir }}/artifacts" @@ -191,7 +191,7 @@ - name: "{{ operator.name }} - Call bundle" # noqa: name[template] when: - operator.name != "rabbitmq-cluster-operator" - ci_script: + cifmw.general.ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" output_dir: "{{ cifmw_operator_build_basedir }}/artifacts" @@ -206,7 +206,7 @@ - name: "{{ operator.name }} - Call bundle-build" # noqa: name[template] when: - operator.name != "rabbitmq-cluster-operator" - ci_script: + cifmw.general.ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" output_dir: "{{ cifmw_operator_build_basedir }}/artifacts" @@ -235,7 +235,7 @@ - name: "{{ operator.name }} - Call catalog-build" # noqa: name[template] when: - operator.name != "rabbitmq-cluster-operator" - ci_script: + cifmw.general.ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" output_dir: "{{ cifmw_operator_build_basedir }}/artifacts" @@ -253,7 +253,7 @@ when: - cifmw_operator_build_push_ct|bool - operator.name != "rabbitmq-cluster-operator" - ci_script: + cifmw.general.ci_script: dry_run: "{{ cifmw_operator_build_dryrun|bool }}" chdir: "{{ operator.src }}" output_dir: "{{ cifmw_operator_build_basedir }}/artifacts" diff --git a/roles/operator_deploy/tasks/main.yml b/roles/operator_deploy/tasks/main.yml index 6df250f8dc..978a48c095 100644 --- a/roles/operator_deploy/tasks/main.yml +++ b/roles/operator_deploy/tasks/main.yml @@ -15,7 +15,7 @@ # under the License. - name: Deploy selected operators - ci_script: + cifmw.general.ci_script: output_dir: "{{ cifmw_operator_deploy_basedir }}/artifacts" chdir: "{{ cifmw_operator_deploy_installyamls }}" script: "make {{ item.name }}" diff --git a/roles/os_must_gather/tasks/build_openstack-must-gather_image.yml b/roles/os_must_gather/tasks/build_openstack-must-gather_image.yml index b8b01c84fc..fd0e90be99 100644 --- a/roles/os_must_gather/tasks/build_openstack-must-gather_image.yml +++ b/roles/os_must_gather/tasks/build_openstack-must-gather_image.yml @@ -25,7 +25,7 @@ msg: "{{ openstack_must_gather_tag }}" - name: Build openstack-must-gather container - ci_script: + cifmw.general.ci_script: chdir: "{{ cifmw_os_must_gather_repo_path }}" output_dir: "{{ cifmw_os_must_gather_output_dir }}/artifacts" script: make podman-build @@ -35,7 +35,7 @@ MUST_GATHER_IMAGE: "openstack-must-gather" - name: Push openstack-must-gather container - ci_script: + cifmw.general.ci_script: chdir: "{{ cifmw_os_must_gather_repo_path }}" output_dir: "{{ cifmw_os_must_gather_output_dir }}/artifacts" script: make podman-push diff --git a/tests/integration/targets/kustomize/tasks/run_test_case.yml b/tests/integration/targets/kustomize/tasks/run_test_case.yml index 452dfbd907..7adfda15e9 100644 --- a/tests/integration/targets/kustomize/tasks/run_test_case.yml +++ b/tests/integration/targets/kustomize/tasks/run_test_case.yml @@ -127,7 +127,7 @@ {% endif -%} {% endfor -%} {{ paths }} - ci_kustomize: + cifmw.general.ci_kustomize: target_path: >- {{ ( diff --git a/tests/integration/targets/make/tasks/ci_make.yml b/tests/integration/targets/make/tasks/ci_make.yml index 1fd544b3a3..1b1a15fdd8 100644 --- a/tests/integration/targets/make/tasks/ci_make.yml +++ b/tests/integration/targets/make/tasks/ci_make.yml @@ -24,7 +24,7 @@ - name: Run ci_script make without any extra_args register: no_extra_args - ci_script: + cifmw.general.ci_script: script: make help chdir: /tmp/project_makefile output_dir: /tmp/artifacts @@ -38,7 +38,7 @@ - name: Run ci_script make with extra_args register: with_extra_args - ci_script: + cifmw.general.ci_script: script: make help chdir: /tmp/project_makefile output_dir: /tmp/artifacts @@ -52,7 +52,7 @@ - "'This is the help thing showing starwars' in with_extra_args.stdout" - name: Try dry_run parameter - ci_script: + cifmw.general.ci_script: chdir: /tmp/project_makefile output_dir: /tmp/artifacts dry_run: true @@ -62,7 +62,7 @@ - name: Test with extra_args - ci_script: + cifmw.general.ci_script: chdir: /tmp/project_makefile output_dir: /tmp/artifacts script: make help @@ -82,20 +82,20 @@ ONE: 1 FOO_BAR: Baz - name: Run ci_script make with custom env variable - ci_script: + cifmw.general.ci_script: chdir: /tmp/project_makefile output_dir: /tmp/artifacts script: make help extra_args: "{{ dict((my_env_vars|default({})), **(other_env_vars|default({}))) }}" - name: Run ci_script make custom env var and default - ci_script: + cifmw.general.ci_script: chdir: /tmp/project_makefile output_dir: /tmp/artifacts script: make help extra_args: "{{ my_env_vars | default({}) }}" - name: Run ci_script make with extra_args and default - ci_script: + cifmw.general.ci_script: chdir: /tmp/project_makefile output_dir: /tmp/artifacts script: make help @@ -107,7 +107,7 @@ register: failing_make failed_when: - "'Error 255' not in failing_make.stdout" - ci_script: + cifmw.general.ci_script: chdir: /tmp/project_makefile output_dir: /tmp/artifacts script: make failing diff --git a/tests/integration/targets/script/tasks/main.yml b/tests/integration/targets/script/tasks/main.yml index 375717f459..e555bca973 100644 --- a/tests/integration/targets/script/tasks/main.yml +++ b/tests/integration/targets/script/tasks/main.yml @@ -12,7 +12,7 @@ register: out_ok environment: TEST_VAR: "test-value" - ci_script: + cifmw.general.ci_script: output_dir: /tmp/artifacts script: | mkdir -p /tmp/test/target @@ -23,7 +23,7 @@ register: out_fail environment: TEST_VAR: "test-value" - ci_script: + cifmw.general.ci_script: output_dir: /tmp/artifacts script: | printf "I am about to fail" >&2 @@ -43,7 +43,7 @@ - name: Run with global debug enabled vars: cifmw_debug: true - ci_script: + cifmw.general.ci_script: output_dir: /tmp/artifacts script: | printf "Debug 1" @@ -51,7 +51,7 @@ - name: Run with action debug enabled vars: cifmw_ci_script_debug: true - ci_script: + cifmw.general.ci_script: output_dir: /tmp/artifacts script: | printf "Debug 2" @@ -66,7 +66,7 @@ - name: Run using chdir option vars: cifmw_ci_script_debug: true - ci_script: + cifmw.general.ci_script: output_dir: /tmp/artifacts chdir: /tmp/dummy/test script: | From 1576c38d5d3f3248036f54ff8b378b4600ff11c6 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Tue, 1 Jul 2025 11:12:40 +0200 Subject: [PATCH 234/480] Allow Ceph to deploy rbd-mirror daemon This patch introduces the variables and the tasks required to deploy rbd-mirror, useful to test cinder replication. We enable rbd_mirror daemon in a DCN scenario to see if we can properly enable and configure replication. Signed-off-by: Francesco Pantano --- playbooks/ceph.yml | 6 +++ roles/cifmw_cephadm/defaults/main.yml | 1 + roles/cifmw_cephadm/tasks/rbd_mirror.yml | 37 +++++++++++++++++++ .../templates/ceph_rbd_mirror.yml.j2 | 8 ++++ scenarios/reproducers/dt-dcn.yml | 1 + 5 files changed, 53 insertions(+) create mode 100644 roles/cifmw_cephadm/tasks/rbd_mirror.yml create mode 100644 roles/cifmw_cephadm/templates/ceph_rbd_mirror.yml.j2 diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 8bdc4ba86e..e06bd772c3 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -436,6 +436,12 @@ # we reuse the same VIP reserved for rgw cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" + - name: Deploy rbd-mirror + when: cifmw_ceph_daemons_layout.ceph_rbd_mirror_enabled | default(false) | bool + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: rbd_mirror + - name: Create Cephx Keys for OpenStack ansible.builtin.import_role: name: cifmw_cephadm diff --git a/roles/cifmw_cephadm/defaults/main.yml b/roles/cifmw_cephadm/defaults/main.yml index 5cace63145..bc245d45be 100644 --- a/roles/cifmw_cephadm/defaults/main.yml +++ b/roles/cifmw_cephadm/defaults/main.yml @@ -66,6 +66,7 @@ cifmw_cephadm_pacific_filter: "16.*" # The path of the rendered rgw spec file cifmw_ceph_rgw_spec_path: /tmp/ceph_rgw.yml cifmw_ceph_mds_spec_path: /tmp/ceph_mds.yml +cifmw_ceph_rbd_mirror_spec_path: /tmp/ceph_rbd_mirror.yml cifmw_ceph_rgw_keystone_ep: "https://keystone-internal.openstack.svc:5000" cifmw_ceph_rgw_keystone_psw: 12345678 cifmw_ceph_rgw_keystone_user: "swift" diff --git a/roles/cifmw_cephadm/tasks/rbd_mirror.yml b/roles/cifmw_cephadm/tasks/rbd_mirror.yml new file mode 100644 index 0000000000..e9729e3aab --- /dev/null +++ b/roles/cifmw_cephadm/tasks/rbd_mirror.yml @@ -0,0 +1,37 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Collect the host and build the resulting host list + ansible.builtin.set_fact: + _hosts: "{{ _hosts|default([]) + [ item ] }}" + loop: "{{ groups[cifmw_ceph_target | default('computes')] | default([]) }}" + +- name: Create RBD Mirror spec + ansible.builtin.template: + src: templates/ceph_rbd_mirror.yml.j2 + dest: "{{ cifmw_ceph_rbd_mirror_spec_path }}" + mode: '0644' + force: true + +- name: Get ceph_cli + ansible.builtin.include_tasks: ceph_cli.yml + vars: + mount_spec: true + cifmw_cephadm_spec: "{{ cifmw_ceph_rbd_mirror_spec_path }}" + +- name: Apply spec + ansible.builtin.command: "{{ cifmw_cephadm_ceph_cli }} orch apply --in-file {{ cifmw_cephadm_container_spec }}" + become: true diff --git a/roles/cifmw_cephadm/templates/ceph_rbd_mirror.yml.j2 b/roles/cifmw_cephadm/templates/ceph_rbd_mirror.yml.j2 new file mode 100644 index 0000000000..e444dc8edf --- /dev/null +++ b/roles/cifmw_cephadm/templates/ceph_rbd_mirror.yml.j2 @@ -0,0 +1,8 @@ +--- +service_type: rbd-mirror +service_name: rbd-mirror +placement: + hosts: +{% for host in _hosts | unique %} + - {{ host }} +{% endfor %} diff --git a/scenarios/reproducers/dt-dcn.yml b/scenarios/reproducers/dt-dcn.yml index 9ccaf2f317..97cab60742 100644 --- a/scenarios/reproducers/dt-dcn.yml +++ b/scenarios/reproducers/dt-dcn.yml @@ -17,6 +17,7 @@ cifmw_ceph_daemons_layout: dashboard_enabled: false cephfs_enabled: true ceph_nfs_enabled: false + ceph_rbd_mirror_enabled: true cifmw_run_tests: false cifmw_cephadm_log_path: /home/zuul/ci-framework-data/logs cifmw_arch_automation_file: dcn.yaml From 2736848b77d43136017c96eaebbdc1a14325522a Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Mon, 21 Jul 2025 20:09:24 +0530 Subject: [PATCH 235/480] [bop] Donot parse changes for cifmw_bop_skipped_projects cifmw_bop_skipped_projects contains lists of projects on which build_openstack_package should not run. We should not parse zuul change url on cifmw_bop_skipped_projects to avoid unwanted error. This pr adds the conditional for the same to avoid parsing. Note: - This pr also adds infrawatch/feature-verification-tests to cifmw_bop_skipped_projects list also. As there is no packaging support for this project. - Donot run DLRN if cifmw_bop_change_list is empty Signed-off-by: Chandan Kumar (raukadah) Resolves: OSPCIX-983 --- roles/build_openstack_packages/defaults/main.yml | 1 + roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/roles/build_openstack_packages/defaults/main.yml b/roles/build_openstack_packages/defaults/main.yml index debd11f844..549c7c4967 100644 --- a/roles/build_openstack_packages/defaults/main.yml +++ b/roles/build_openstack_packages/defaults/main.yml @@ -103,6 +103,7 @@ cifmw_bop_skipped_projects: - openstack-k8s-operators/repo-setup - openstack-k8s-operators/swift-operator - openstack-k8s-operators/telemetry-operator + - infrawatch/feature-verification-tests cifmw_bop_gating_port: 8766 diff --git a/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml b/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml index 6560a3826a..2126c27f19 100644 --- a/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml +++ b/roles/build_openstack_packages/tasks/parse_and_build_pkgs.yml @@ -5,6 +5,7 @@ - zuul is defined - "'change_url' in item" - '"-distgit" not in item.project' + - item.project.name not in cifmw_bop_skipped_projects - item.project.name not in cifmw_bop_change_list|default([]) | map(attribute='project') |list - >- cifmw_bop_release_mapping[cifmw_bop_openstack_release] in item.branch or @@ -32,6 +33,7 @@ - name: Build DLRN packages from zuul changes when: + - cifmw_bop_change_list | length > 0 - '"-distgit" not in _change.project' - _change.project not in cifmw_bop_skipped_projects - >- From cd0fae5346011c1c8aa30db3b7827a98c99df9ca Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 23 Jul 2025 12:46:58 +0530 Subject: [PATCH 236/480] Add zuul jobs for ipa role which were missing in https://github.com/openstack-k8s-operators/ci-framework/pull/2972 --- zuul.d/molecule.yaml | 9 +++++++++ zuul.d/projects.yaml | 1 + 2 files changed, 10 insertions(+) diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 79db6dedb8..e95075a946 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -911,6 +911,15 @@ - ^.config/molecule/.* name: cifmw-molecule-federation parent: cifmw-molecule-noop +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/ipa/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-ipa + parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index c7f8d7bdae..7866c54463 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -60,6 +60,7 @@ - cifmw-molecule-install_ca - cifmw-molecule-install_openstack_ca - cifmw-molecule-install_yamls + - cifmw-molecule-ipa - cifmw-molecule-krb_request - cifmw-molecule-kustomize_deploy - cifmw-molecule-libvirt_manager From 16f4873108e62559a4b5b933bd9a0ec6076256cc Mon Sep 17 00:00:00 2001 From: Maor Blaustein Date: Tue, 22 Jul 2025 13:10:18 +0300 Subject: [PATCH 237/480] [WNTP job] Skip QoS gateway related tests New tests including QoS gateway bandwidth checks [1] should be skipped until whitebox-neutron-tempest-plugin job uses newer version than antelope (when LP#2110018 [2] supported in 2025.1). [1] 954607: Test QoS max bandwidth limit for router gateways, along with other types | https://review.opendev.org/c/x/whitebox-neutron-tempest-plugin/+/954607 [2] https://bugs.launchpad.net/neutron/+bug/2110018 --- zuul.d/whitebox_neutron_tempest_jobs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zuul.d/whitebox_neutron_tempest_jobs.yaml b/zuul.d/whitebox_neutron_tempest_jobs.yaml index f321bbcef1..c47bca126d 100644 --- a/zuul.d/whitebox_neutron_tempest_jobs.yaml +++ b/zuul.d/whitebox_neutron_tempest_jobs.yaml @@ -141,6 +141,8 @@ excludeList: | # remove when this job use openstackclient version bigger than in antelope branch (no more releases) ^whitebox_neutron_tempest_plugin.tests.scenario.test_ports.PortListLongOptSGsCmd + # remove when this job use neutron version bigger than antelope (LP#2110018 supported in 2025.1) + ^whitebox_neutron_tempest_plugin.tests.scenario.test_qos.QosTestCommon.test_bw_limit_south_north # remove when bug OSPRH-9569 resolved ^whitebox_neutron_tempest_plugin.tests.scenario.test_metadata_rate_limiting # remove traffic logging tests when OSPRH-9203 resolved From a07479fce687f09d301e8e98f052a14e07b1c91a Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 22 Jul 2025 11:39:56 +0530 Subject: [PATCH 238/480] Symlink cifmw collection adjacent to ci playbooks Since we can neither install cifmw collection on Zuul executor nor make Zuul to use cifmw's ansible.cfg to find our custom plugins, we are going with the installation of cifmw collection adjacent to all ci playbooks [1]. To avoid duplicate code, symlink is used. [1] https://docs.ansible.com/ansible/latest/collections_guide/collections_installing.html#installing-collections-adjacent-to-playbooks --- .../collections/ansible_collections/cifmw/general/galaxy.yml | 1 + .../ansible_collections/cifmw/general/plugins/README.md | 1 + .../cifmw/general/plugins/action/ci_kustomize.py | 1 + .../ansible_collections/cifmw/general/plugins/action/ci_make.py | 1 + .../cifmw/general/plugins/action/ci_net_map.py | 1 + .../cifmw/general/plugins/action/ci_script.py | 1 + .../cifmw/general/plugins/action/discover_latest_image.py | 1 + .../cifmw/general/plugins/filter/reproducer_gerrit_infix.py | 1 + .../cifmw/general/plugins/filter/reproducer_refspec.py | 1 + .../cifmw/general/plugins/filter/to_nice_yaml_all.py | 1 + .../cifmw/general/plugins/module_utils/__init__.py | 1 + .../cifmw/general/plugins/module_utils/encoding | 1 + .../cifmw/general/plugins/module_utils/net_map | 1 + .../cifmw/general/plugins/modules/approve_csr.py | 1 + .../cifmw/general/plugins/modules/bridge_vlan.py | 1 + .../cifmw/general/plugins/modules/cephx_key.py | 1 + .../cifmw/general/plugins/modules/crawl_n_mask.py | 1 + .../cifmw/general/plugins/modules/generate_make_tasks.py | 1 + .../cifmw/general/plugins/modules/get_makefiles_env.py | 1 + .../cifmw/general/plugins/modules/krb_request.py | 1 + .../cifmw/general/plugins/modules/pem_read.py | 1 + .../cifmw/general/plugins/modules/tempest_list_allowed.py | 1 + .../cifmw/general/plugins/modules/tempest_list_skipped.py | 1 + 23 files changed, 23 insertions(+) create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/galaxy.yml create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/README.md create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_kustomize.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_make.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_net_map.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_script.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/discover_latest_image.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/reproducer_gerrit_infix.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/reproducer_refspec.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/to_nice_yaml_all.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/__init__.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/encoding create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/net_map create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/approve_csr.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/bridge_vlan.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/cephx_key.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/crawl_n_mask.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/generate_make_tasks.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/get_makefiles_env.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/krb_request.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/pem_read.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/tempest_list_allowed.py create mode 120000 ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/tempest_list_skipped.py diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/galaxy.yml b/ci/playbooks/collections/ansible_collections/cifmw/general/galaxy.yml new file mode 120000 index 0000000000..23d66b633c --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/galaxy.yml @@ -0,0 +1 @@ +../../../../../../galaxy.yml \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/README.md b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/README.md new file mode 120000 index 0000000000..47a991346c --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/README.md @@ -0,0 +1 @@ +../../../../../../../plugins/README.md \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_kustomize.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_kustomize.py new file mode 120000 index 0000000000..bfd37d63ca --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_kustomize.py @@ -0,0 +1 @@ +../../../../../../../../plugins/action/ci_kustomize.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_make.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_make.py new file mode 120000 index 0000000000..058987b480 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_make.py @@ -0,0 +1 @@ +../../../../../../../../plugins/action/ci_make.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_net_map.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_net_map.py new file mode 120000 index 0000000000..d771958157 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_net_map.py @@ -0,0 +1 @@ +../../../../../../../../plugins/action/ci_net_map.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_script.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_script.py new file mode 120000 index 0000000000..3592488e31 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/ci_script.py @@ -0,0 +1 @@ +../../../../../../../../plugins/action/ci_script.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/discover_latest_image.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/discover_latest_image.py new file mode 120000 index 0000000000..ea5ddab731 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/action/discover_latest_image.py @@ -0,0 +1 @@ +../../../../../../../../plugins/action/discover_latest_image.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/reproducer_gerrit_infix.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/reproducer_gerrit_infix.py new file mode 120000 index 0000000000..a605e9cc54 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/reproducer_gerrit_infix.py @@ -0,0 +1 @@ +../../../../../../../../plugins/filter/reproducer_gerrit_infix.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/reproducer_refspec.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/reproducer_refspec.py new file mode 120000 index 0000000000..8d16a02f6c --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/reproducer_refspec.py @@ -0,0 +1 @@ +../../../../../../../../plugins/filter/reproducer_refspec.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/to_nice_yaml_all.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/to_nice_yaml_all.py new file mode 120000 index 0000000000..5a43b9562a --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/filter/to_nice_yaml_all.py @@ -0,0 +1 @@ +../../../../../../../../plugins/filter/to_nice_yaml_all.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/__init__.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/__init__.py new file mode 120000 index 0000000000..a3a6e677a5 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/__init__.py @@ -0,0 +1 @@ +../../../../../../../../plugins/module_utils/__init__.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/encoding b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/encoding new file mode 120000 index 0000000000..8004cd5d16 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/encoding @@ -0,0 +1 @@ +../../../../../../../../plugins/module_utils/encoding \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/net_map b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/net_map new file mode 120000 index 0000000000..26c415523f --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/module_utils/net_map @@ -0,0 +1 @@ +../../../../../../../../plugins/module_utils/net_map \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/approve_csr.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/approve_csr.py new file mode 120000 index 0000000000..79bbc6b3ff --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/approve_csr.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/approve_csr.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/bridge_vlan.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/bridge_vlan.py new file mode 120000 index 0000000000..0e2a39ba6b --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/bridge_vlan.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/bridge_vlan.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/cephx_key.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/cephx_key.py new file mode 120000 index 0000000000..0c919b3f8f --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/cephx_key.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/cephx_key.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/crawl_n_mask.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/crawl_n_mask.py new file mode 120000 index 0000000000..b50b410d3a --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/crawl_n_mask.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/crawl_n_mask.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/generate_make_tasks.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/generate_make_tasks.py new file mode 120000 index 0000000000..ee73be578d --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/generate_make_tasks.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/generate_make_tasks.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/get_makefiles_env.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/get_makefiles_env.py new file mode 120000 index 0000000000..964794a8e2 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/get_makefiles_env.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/get_makefiles_env.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/krb_request.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/krb_request.py new file mode 120000 index 0000000000..c73062b6e0 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/krb_request.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/krb_request.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/pem_read.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/pem_read.py new file mode 120000 index 0000000000..f76ed78062 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/pem_read.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/pem_read.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/tempest_list_allowed.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/tempest_list_allowed.py new file mode 120000 index 0000000000..e26f4784b5 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/tempest_list_allowed.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/tempest_list_allowed.py \ No newline at end of file diff --git a/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/tempest_list_skipped.py b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/tempest_list_skipped.py new file mode 120000 index 0000000000..a8bfccff22 --- /dev/null +++ b/ci/playbooks/collections/ansible_collections/cifmw/general/plugins/modules/tempest_list_skipped.py @@ -0,0 +1 @@ +../../../../../../../../plugins/modules/tempest_list_skipped.py \ No newline at end of file From 5307f1095ac302fd69eeee628faf16ef565f6e31 Mon Sep 17 00:00:00 2001 From: Sofer Athlan-Guyot Date: Thu, 10 Jul 2025 22:55:30 +0200 Subject: [PATCH 239/480] Update: Don't fail if the monitoring process is not up anymore. We don't want the job to fail because monitoring had some issue. --- playbooks/update.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/playbooks/update.yml b/playbooks/update.yml index 4ebc5dc49f..46ae9f8c4e 100644 --- a/playbooks/update.yml +++ b/playbooks/update.yml @@ -141,6 +141,8 @@ cmd: >- kill $(cat {{ cifmw_basedir }}/tests/update/monitor_resources_changes.pid) + register: _kill_result + failed_when: _kill_result.rc not in [0, 1] when: cifmw_update_monitoring_pid.stat.exists | bool - name: Run post_update hooks From 93254ddc724e9b9a794054033e82946c94e8b9e2 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 23 Jul 2025 10:23:39 +0200 Subject: [PATCH 240/480] Trigger cifmw-pod-zuul-files on each change It is not a first time, that some molecule job is missing, because some role was added and cifmw-pod-zuul-files job was not triggered. Add the CI job and run it for each change that was done. Signed-off-by: Daniel Pawlik --- zuul.d/project-templates.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/zuul.d/project-templates.yaml b/zuul.d/project-templates.yaml index 95e7af7932..f2462560f4 100644 --- a/zuul.d/project-templates.yaml +++ b/zuul.d/project-templates.yaml @@ -12,6 +12,7 @@ dependencies: - openstack-k8s-operators-content-provider - cifmw-crc-podified-edpm-baremetal: *content_provider + - cifmw-pod-zuul-files - project-template: name: podified-multinode-edpm-pipeline @@ -22,6 +23,7 @@ - openstack-k8s-operators-content-provider - podified-multinode-edpm-deployment-crc: *content_provider - podified-multinode-hci-deployment-crc: *content_provider + - cifmw-pod-zuul-files - project-template: name: podified-ironic-operator-pipeline @@ -29,11 +31,11 @@ Project template to run content provider with ironic podified job. github-check: jobs: - - noop - openstack-k8s-operators-content-provider - podified-multinode-ironic-deployment: dependencies: - openstack-k8s-operators-content-provider + - cifmw-pod-zuul-files - project-template: name: podified-multinode-edpm-ci-framework-pipeline @@ -50,6 +52,7 @@ - cifmw-crc-podified-edpm-baremetal: *content_provider - podified-multinode-hci-deployment-crc: *content_provider - cifmw-multinode-tempest: *content_provider + - cifmw-pod-zuul-files - project-template: name: data-plane-adoption-ci-framework-pipeline @@ -65,6 +68,7 @@ - adoption-standalone-to-crc-ceph-provider: dependencies: - openstack-k8s-operators-content-provider + - cifmw-pod-zuul-files - project-template: name: data-plane-adoption-pipeline @@ -76,3 +80,4 @@ - adoption-standalone-to-crc-ceph-provider: dependencies: - openstack-k8s-operators-content-provider + - cifmw-pod-zuul-files From a02aab5403c88e26bb8461877f2f481678dd2260 Mon Sep 17 00:00:00 2001 From: Antonio Romito Date: Thu, 24 Jul 2025 11:29:47 +0200 Subject: [PATCH 241/480] Add cifmw_snr_nhc role for SNR and NHC deployment This role automates the deployment and verification of Self Node Remediation (SNR) and Node Health Check (NHC) components on OpenShift clusters. Includes namespace creation, operator installation, CR verification, and comprehensive molecule testing framework. --- docs/dictionary/en-custom.txt | 8 +- playbooks/snr-nhc.yml | 9 + roles/cifmw_snr_nhc/README.md | 192 ++++++ roles/cifmw_snr_nhc/TESTING.md | 267 ++++++++ roles/cifmw_snr_nhc/defaults/main.yml | 6 + roles/cifmw_snr_nhc/meta/main.yml | 12 + .../molecule/default/converge.yml | 420 ++++++++++++ .../molecule/default/molecule.yml | 45 ++ .../molecule/default/prepare.yml | 51 ++ .../cifmw_snr_nhc/molecule/default/verify.yml | 47 ++ roles/cifmw_snr_nhc/molecule/requirements.txt | 11 + roles/cifmw_snr_nhc/tasks/main.yml | 596 ++++++++++++++++++ zuul.d/molecule.yaml | 11 + zuul.d/projects.yaml | 1 + 14 files changed, 1675 insertions(+), 1 deletion(-) create mode 100644 playbooks/snr-nhc.yml create mode 100644 roles/cifmw_snr_nhc/README.md create mode 100644 roles/cifmw_snr_nhc/TESTING.md create mode 100644 roles/cifmw_snr_nhc/defaults/main.yml create mode 100644 roles/cifmw_snr_nhc/meta/main.yml create mode 100644 roles/cifmw_snr_nhc/molecule/default/converge.yml create mode 100644 roles/cifmw_snr_nhc/molecule/default/molecule.yml create mode 100644 roles/cifmw_snr_nhc/molecule/default/prepare.yml create mode 100644 roles/cifmw_snr_nhc/molecule/default/verify.yml create mode 100644 roles/cifmw_snr_nhc/molecule/requirements.txt create mode 100644 roles/cifmw_snr_nhc/tasks/main.yml diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 65819be42f..d308eed2c0 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -11,6 +11,7 @@ ansibletest ansibletests ansibleuser ansiblevars +APIs apiversion apivips appcreds @@ -85,6 +86,7 @@ cli client clusterimageset clusterpool +ClusterServiceVersion cmd cn cni @@ -229,6 +231,7 @@ icjbuue icokicagy IDM IdP +Idempotency idrac iface igfsbg @@ -291,9 +294,9 @@ LDAP ldp libguestfs libvirt -libvirt's libvirtd libvirterror +libvirt's ljaumtawojy ljaumtaxojy ljaumtayojy @@ -356,6 +359,7 @@ networkmanager networktype nfs nftables +nhc nic nigzpbgugpsavdmfyl nlcggvjgnsdxn @@ -364,6 +368,7 @@ nmstate nncp nobuild nodeexporter +NodeHealthCheck nodenetworkconfigurationpolicy nodepool nodeps @@ -511,6 +516,7 @@ Sinha sizepercent skbg skiplist +snr specificities spnego spxzvbhvtzxmsihbyb diff --git a/playbooks/snr-nhc.yml b/playbooks/snr-nhc.yml new file mode 100644 index 0000000000..86ae757238 --- /dev/null +++ b/playbooks/snr-nhc.yml @@ -0,0 +1,9 @@ +--- +- name: Execute Self Node Remediation role + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + vars: + cifmw_snr_nhc_cleanup_before_install: "{{ cleanup_before_install | default(false) }}" + cifmw_snr_nhc_cleanup_namespace: "{{ cleanup_namespace | default(false) }}" + roles: + - cifmw_snr_nhc diff --git a/roles/cifmw_snr_nhc/README.md b/roles/cifmw_snr_nhc/README.md new file mode 100644 index 0000000000..956076f33d --- /dev/null +++ b/roles/cifmw_snr_nhc/README.md @@ -0,0 +1,192 @@ +# cifmw_snr_nhc + +Apply Self Node Remediation and Node Health Check Custom Resources on OpenShift. + +## Overview + +This Ansible role automates the deployment and configuration of: +- **Self Node Remediation (SNR)** - Automatically remediates unhealthy nodes +- **Node Health Check (NHC)** - Monitors node health and triggers remediation + +The role creates the necessary operators, subscriptions, and custom resources to enable automatic node remediation in OpenShift clusters. + +## Privilege escalation + +None - all actions use the provided kubeconfig and require no additional host privileges. + +## Parameters + +* `cifmw_snr_nhc_kubeconfig`: (String) Path to the kubeconfig file. +* `cifmw_snr_nhc_kubeadmin_password_file`: (String) Path to the kubeadmin password file. +* `cifmw_snr_nhc_namespace`: (String) Namespace used for SNR and NHC resources. Default: `openshift-workload-availability` +* `cifmw_snr_nhc_cleanup_before_install`: (Boolean) If true, removes existing SNR and NHC resources before installation. Default: `false` +* `cifmw_snr_nhc_cleanup_namespace`: (Boolean) If true, deletes the entire namespace before installation. Default: `false` + +## Role Tasks + +The role performs the following tasks in sequence: + +1. **Cleanup (Optional)** - Removes existing resources if cleanup is enabled +2. **Create Namespace** - Creates the target namespace if it doesn't exist +3. **Create OperatorGroup** - Sets up the OperatorGroup for operator deployment +4. **Create SNR Subscription** - Deploys the Self Node Remediation operator +5. **Wait for SNR Deployment** - Waits for the SNR operator to be ready +6. **Create NHC Subscription** - Deploys the Node Health Check operator +7. **Wait for CSV** - Waits for the ClusterServiceVersion to be ready +8. **Create NHC CR** - Creates the NodeHealthCheck custom resource + +## Examples + +### Basic Usage + +```yaml +- name: Configure SNR and NHC + hosts: masters + roles: + - role: cifmw_snr_nhc + cifmw_snr_nhc_kubeconfig: "/home/zuul/.kube/config" + cifmw_snr_nhc_kubeadmin_password_file: "/home/zuul/.kube/kubeadmin-password" + cifmw_snr_nhc_namespace: openshift-workload-availability +``` + +### Custom Namespace + +```yaml +- name: Configure SNR and NHC in custom namespace + hosts: masters + roles: + - role: cifmw_snr_nhc + cifmw_snr_nhc_kubeconfig: "/path/to/kubeconfig" + cifmw_snr_nhc_kubeadmin_password_file: "/path/to/password" + cifmw_snr_nhc_namespace: custom-workload-namespace +``` + +### With Cleanup + +```yaml +- name: Configure SNR and NHC with cleanup + hosts: masters + roles: + - role: cifmw_snr_nhc + cifmw_snr_nhc_kubeconfig: "/home/zuul/.kube/config" + cifmw_snr_nhc_cleanup_before_install: true + cifmw_snr_nhc_cleanup_namespace: false +``` + +### Complete Cleanup and Reinstall + +```yaml +- name: Complete cleanup and reinstall SNR and NHC + hosts: masters + roles: + - role: cifmw_snr_nhc + cifmw_snr_nhc_kubeconfig: "/home/zuul/.kube/config" + cifmw_snr_nhc_cleanup_before_install: true + cifmw_snr_nhc_cleanup_namespace: true +``` + +## Testing + +This role includes comprehensive testing using Molecule and pytest. Tests validate: +- Role syntax and structure +- Individual task execution +- Idempotency +- Error handling +- Integration with Kubernetes APIs + +### Quick Test Run + +```bash +# Install test dependencies +pip install --user -r molecule/requirements.txt +ansible-galaxy collection install -r molecule/default/requirements.yml --force + +# Run all tests +molecule test + +# Run specific test phases +molecule converge # Execute role +molecule verify # Run verification tests +``` + +### Development Testing + +```bash +# Quick development cycle +molecule converge # Apply changes +molecule verify # Check results +molecule destroy # Clean up +``` + +For detailed testing information, see [TESTING.md](TESTING.md). + +## Requirements + +### System Requirements + +- Python 3.9+ +- Ansible 2.14+ +- Access to OpenShift/Kubernetes cluster + +### Ansible Collections + +- `kubernetes.core` (>=6.0.0) +- `ansible.posix` +- `community.general` + +### Python Dependencies + +- `kubernetes` (>=24.0.0) +- `pyyaml` (>=6.0.0) +- `jsonpatch` (>=1.32) + +## Development + +### Contributing + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Run tests: `molecule test` +5. Submit a pull request + +### Code Style + +- Follow Ansible best practices +- Use descriptive task names +- Include proper error handling +- Test all changes with molecule + +### Linting + +```bash +# Run linting checks +ansible-lint tasks/main.yml +yamllint . +``` + +## Troubleshooting + +### Common Issues + +1. **Permission denied**: Ensure kubeconfig has proper permissions +2. **Namespace already exists**: Role handles existing namespaces gracefully +3. **Operator not ready**: Check cluster resources and connectivity + +### Debug Mode + +```bash +# Run with debug output +ansible-playbook -vvv your-playbook.yml +``` + +## License + +This role is distributed under the terms of the Apache License 2.0. + +## Support + +For issues and questions: +- Check the [TESTING.md](TESTING.md) for testing guidance +- Review the troubleshooting section above +- Submit issues to the project repository diff --git a/roles/cifmw_snr_nhc/TESTING.md b/roles/cifmw_snr_nhc/TESTING.md new file mode 100644 index 0000000000..327576857a --- /dev/null +++ b/roles/cifmw_snr_nhc/TESTING.md @@ -0,0 +1,267 @@ +# Testing Guide for cifmw_snr_nhc Role + +This document describes how to test the `cifmw_snr_nhc` role using Molecule and pytest. + +## Prerequisites + +Before running tests, you need to install the required dependencies: + +```bash +# Install Python dependencies +pip install --user -r molecule/requirements.txt + +# Install Ansible collections +ansible-galaxy collection install -r molecule/default/requirements.yml --force +``` + +## Test Framework + +This role uses two testing frameworks: + +1. **Molecule** - For integration testing and role behavior validation +2. **Pytest** - For unit testing and structural validation + +## Running Tests + +### Quick Start + +```bash +# Run all molecule tests (recommended) +~/.local/bin/molecule test + +# Run only syntax and lint checks +ansible-lint tasks/main.yml +yamllint . + +# Run pytest unit tests +pytest tests/ -v +``` + +### Molecule Tests + +Molecule provides end-to-end testing of the role: + +```bash +# Run full test suite (recommended) +~/.local/bin/molecule test + +# Run individual steps for development +~/.local/bin/molecule create # Create test environment +~/.local/bin/molecule converge # Run the role +~/.local/bin/molecule verify # Run verification tests +~/.local/bin/molecule destroy # Clean up + +# Quick development cycle +~/.local/bin/molecule converge # Apply changes +~/.local/bin/molecule verify # Check results +``` + +### Unit Tests + +Pytest runs structural and unit tests: + +```bash +# Run all pytest tests +pytest tests/ -v + +# Run specific test categories +pytest tests/ -v -m "not integration" # Unit tests only +pytest tests/ -v -m "integration" # Integration tests only + +# Run specific test file +pytest tests/test_cifmw_snr_nhc.py -v +``` + +## Test Structure + +``` +├── molecule/ +│ ├── default/ +│ │ ├── molecule.yml # Molecule configuration +│ │ ├── converge.yml # Playbook to test the role +│ │ ├── verify.yml # Verification tests +│ │ ├── prepare.yml # Environment preparation +│ │ └── requirements.yml # Ansible Galaxy dependencies +│ └── requirements.txt # Python dependencies +├── tests/ +│ ├── __init__.py +│ └── test_cifmw_snr_nhc.py # Unit tests +├── pytest.ini # Pytest configuration +├── .yamllint # YAML linting rules +└── .ansible-lint # Ansible linting rules +``` + +## Test Scenarios + +### Molecule Test Scenarios + +The molecule tests validate all 7 tasks of the role: + +1. **Create Namespace** - Tests namespace creation and idempotency +2. **Create OperatorGroup** - Tests OperatorGroup creation and idempotency +3. **Create SNR Subscription** - Tests SNR subscription creation and idempotency +4. **Wait for SNR Deployment** - Tests deployment waiting logic and timeout handling +5. **Create NHC Subscription** - Tests NHC subscription creation and idempotency +6. **Wait for CSV** - Tests ClusterServiceVersion waiting logic and timeout handling +7. **Create NHC CR** - Tests NodeHealthCheck custom resource creation and idempotency + +Each test includes: +- **Syntax validation** - Ensures Ansible syntax is correct +- **Role execution** - Tests role with mock Kubernetes environment +- **Idempotency checks** - Ensures role can run multiple times safely +- **Error handling** - Validates appropriate error handling +- **Verification** - Validates expected outcomes + +### Unit Test Scenarios + +1. **File Structure** - Validates role directory structure +2. **YAML Validation** - Ensures all YAML files are valid +3. **Variable Consistency** - Checks variable definitions +4. **Metadata Validation** - Validates role metadata + +## Test Configuration + +### Mock Environment + +The tests use mock Kubernetes configurations: + +- Mock kubeconfig with test cluster settings (`/tmp/kubeconfig`) +- Mock credentials for authentication (`/tmp/kubeadmin-password`) +- Test namespace: `workload-availability` +- Mock server: `api.test.example.com:6443` + +### Test Variables + +```yaml +# molecule/default/converge.yml +vars: + cifmw_snr_nhc_kubeconfig: /tmp/kubeconfig + cifmw_snr_nhc_namespace: workload-availability +``` + +### Expected Behavior + +In the test environment: +- **Connection failures are expected** - Tests use mock endpoints +- **All tasks should complete without fatal errors** - Error handling is validated +- **Idempotency is verified** - Each task runs twice to ensure consistency +- **Proper error messages are displayed** - Mock environment limitations are handled gracefully + +## Continuous Integration + +The tests are designed to run in CI/CD environments: + +- **Container-based**: Uses Podman containers for isolation +- **No external dependencies**: Mocks Kubernetes/OpenShift APIs +- **Fast execution**: Optimized for quick feedback (~2-3 minutes) +- **Comprehensive coverage**: Tests all role tasks individually + +## Troubleshooting + +### Common Issues + +1. **Collection not found**: + ```bash + ansible-galaxy collection install -r molecule/default/requirements.yml --force + ``` + +2. **Molecule not found**: + ```bash + pip install --user -r molecule/requirements.txt + ``` + +3. **Podman not available**: Install podman or configure docker driver in `molecule.yml` + +4. **Permission denied**: Ensure user has container runtime permissions + +### Debug Mode + +```bash +# Run with verbose output +~/.local/bin/molecule test --debug + +# Keep environment after failure +~/.local/bin/molecule test --destroy=never + +# Check detailed logs +~/.local/bin/molecule converge -- --vvv +``` + +### Linting + +```bash +# Run all linting checks +ansible-lint tasks/main.yml +yamllint . +yamllint molecule/default/*.yml + +# Check specific files +ansible-lint tasks/main.yml --parseable +yamllint molecule/default/converge.yml -d relaxed +``` + +## Development Workflow + +1. **Make changes** to the role +2. **Run syntax check**: `ansible-lint tasks/main.yml` +3. **Run linting**: `yamllint .` +4. **Test changes**: `~/.local/bin/molecule converge` +5. **Verify results**: `~/.local/bin/molecule verify` +6. **Run full test suite**: `~/.local/bin/molecule test` +7. **Clean up**: `~/.local/bin/molecule destroy` + +## Test Customization + +### Adding New Tests + +1. **Molecule tests**: Add tasks to `molecule/default/verify.yml` +2. **Unit tests**: Add functions to `tests/test_cifmw_snr_nhc.py` +3. **Integration tests**: Mark with `@pytest.mark.integration` + +### Modifying Test Environment + +1. **Test variables**: Update `molecule/default/converge.yml` +2. **Mock data**: Update `molecule/default/prepare.yml` +3. **Test configuration**: Update `molecule/default/molecule.yml` + +## Test Results Interpretation + +### Successful Test Run + +A successful test run should show: +``` +PLAY RECAP ********************************************************************* +instance : ok=38 changed=0 unreachable=0 failed=0 +``` + +### Expected Warnings + +The following warnings/errors are normal in the test environment: +- `Name or service not known` - Mock server is not real +- `MODULE FAILURE` in debug output - Expected with mock Kubernetes API +- `Max retries exceeded` - Connection timeouts are expected + +### Test Coverage + +Current test coverage includes: +- All 7 role tasks individually tested +- Idempotency verification for each task +- Error handling validation +- Mock environment setup and teardown +- Syntax and linting validation +- Variable consistency checks + +## Performance + +- **Total test time**: ~2-3 minutes +- **Individual task tests**: ~10-15 seconds each +- **Full molecule cycle**: ~1-2 minutes +- **Container startup**: ~30 seconds + +## Best Practices + +1. **Always run full test suite** before committing changes +2. **Use development cycle** (`converge` → `verify`) for quick iterations +3. **Check linting** before running molecule tests +4. **Review test output** for any unexpected changes +5. **Keep tests updated** when modifying role functionality diff --git a/roles/cifmw_snr_nhc/defaults/main.yml b/roles/cifmw_snr_nhc/defaults/main.yml new file mode 100644 index 0000000000..73ac086f25 --- /dev/null +++ b/roles/cifmw_snr_nhc/defaults/main.yml @@ -0,0 +1,6 @@ +--- +cifmw_snr_nhc_kubeconfig: "/home/{{ ansible_user | default('zuul') }}/.kube/config" +cifmw_snr_nhc_kubeadmin_password_file: "/home/{{ ansible_user | default('zuul') }}/.kube/kubeadmin-password" +cifmw_snr_nhc_namespace: openshift-workload-availability +cifmw_snr_nhc_cleanup_before_install: false +cifmw_snr_nhc_cleanup_namespace: false diff --git a/roles/cifmw_snr_nhc/meta/main.yml b/roles/cifmw_snr_nhc/meta/main.yml new file mode 100644 index 0000000000..517a5c4875 --- /dev/null +++ b/roles/cifmw_snr_nhc/meta/main.yml @@ -0,0 +1,12 @@ +--- +galaxy_info: + author: CI Framework + description: CI Framework Role -- cifmw_snr_nhc + company: Red Hat + license: Apache-2.0 + min_ansible_version: "2.14" + namespace: cifmw + galaxy_tags: + - cifmw + +dependencies: [] diff --git a/roles/cifmw_snr_nhc/molecule/default/converge.yml b/roles/cifmw_snr_nhc/molecule/default/converge.yml new file mode 100644 index 0000000000..472091d89b --- /dev/null +++ b/roles/cifmw_snr_nhc/molecule/default/converge.yml @@ -0,0 +1,420 @@ +--- +- name: Converge + hosts: all + gather_facts: false + vars: + cifmw_snr_nhc_kubeconfig: /tmp/kubeconfig + cifmw_snr_nhc_namespace: workload-availability + tasks: + - name: Test that required variables are defined + ansible.builtin.assert: + that: + - cifmw_snr_nhc_kubeconfig is defined + - cifmw_snr_nhc_namespace is defined + fail_msg: "Required variables are not defined" + success_msg: "Required variables are defined" + + - name: Display test information + ansible.builtin.debug: + msg: "Testing role cifmw_snr_nhc with kubeconfig: {{ cifmw_snr_nhc_kubeconfig }} and namespace: {{ cifmw_snr_nhc_namespace }}" + + - name: Test that Python kubernetes library is available + ansible.builtin.command: python3 -c "import kubernetes; print('Library available')" + register: k8s_test + changed_when: false + + - name: Display kubernetes library test result + ansible.builtin.debug: + msg: "{{ k8s_test.stdout }}" + + - name: Test that mock kubeconfig exists + ansible.builtin.stat: + path: "{{ cifmw_snr_nhc_kubeconfig }}" + register: kubeconfig_stat + + - name: Assert kubeconfig exists + ansible.builtin.assert: + that: + - kubeconfig_stat.stat.exists + fail_msg: "Kubeconfig file does not exist" + success_msg: "Kubeconfig file exists" + + - name: Test kubernetes.core.k8s module availability + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: v1 + kind: Namespace + name: test-connection + state: present + validate_certs: false + register: k8s_test_result + failed_when: false + changed_when: false + + - name: Display k8s connection test result + ansible.builtin.debug: + msg: "K8s connection test result: {{ k8s_test_result.failed }}" + + # Execute the complete role first for integration testing + - name: Execute the complete cifmw_snr_nhc role + block: + - name: Include the cifmw_snr_nhc role + ansible.builtin.include_role: + name: cifmw_snr_nhc + vars: + # Force validate_certs: false for testing + cifmw_snr_nhc_validate_certs: false + rescue: + - name: Capture role execution error + ansible.builtin.set_fact: + role_execution_error: "{{ ansible_failed_task }}" + + - name: Display role execution error details + ansible.builtin.debug: + msg: | + Role execution failed with error: + {{ role_execution_error }} + + - name: Analyze specific error patterns + ansible.builtin.debug: + msg: | + Error analysis: + - Connection error: {{ 'connection' in role_execution_error.msg | default('') | lower }} + - Authentication error: {{ 'auth' in role_execution_error.msg | default('') | lower }} + - API error: {{ 'api' in role_execution_error.msg | default('') | lower }} + - Timeout error: {{ 'timeout' in role_execution_error.msg | default('') | lower }} + + - name: Continue with test evaluation + ansible.builtin.debug: + msg: "Role failed as expected in test environment - this is normal" + + # VERIFICATION TASK 1: Verify namespace creation + - name: "VERIFICATION 1: Verify namespace creation" + block: + - name: Verify namespace exists + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ cifmw_snr_nhc_namespace }}" + validate_certs: false + register: namespace_verification_result + failed_when: false + + - name: Display namespace verification result + ansible.builtin.debug: + msg: "Namespace verification result: {{ namespace_verification_result }}" + + - name: Test namespace creation idempotency + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ cifmw_snr_nhc_namespace }}" + validate_certs: false + register: namespace_idempotency_result + failed_when: false + + - name: Assert expected behavior for namespace creation + ansible.builtin.assert: + that: + - namespace_verification_result.failed == namespace_idempotency_result.failed + fail_msg: "Namespace creation behavior is not consistent" + success_msg: "Namespace creation task behaves consistently" + + # VERIFICATION TASK 2: Verify OperatorGroup creation + - name: "VERIFICATION 2: Verify OperatorGroup creation" + block: + - name: Verify OperatorGroup exists + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1 + kind: OperatorGroup + metadata: + name: workload-availability-operator-group + namespace: "{{ cifmw_snr_nhc_namespace }}" + validate_certs: false + register: operatorgroup_verification_result + failed_when: false + + - name: Display OperatorGroup verification result + ansible.builtin.debug: + msg: "OperatorGroup verification result: {{ operatorgroup_verification_result }}" + + - name: Test OperatorGroup creation idempotency + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1 + kind: OperatorGroup + metadata: + name: workload-availability-operator-group + namespace: "{{ cifmw_snr_nhc_namespace }}" + validate_certs: false + register: operatorgroup_idempotency_result + failed_when: false + + - name: Assert expected behavior for OperatorGroup creation + ansible.builtin.assert: + that: + - operatorgroup_verification_result.failed == operatorgroup_idempotency_result.failed + fail_msg: "OperatorGroup creation behavior is not consistent" + success_msg: "OperatorGroup creation task behaves consistently" + + # VERIFICATION TASK 3: Verify SNR Subscription creation + - name: "VERIFICATION 3: Verify SNR Subscription creation" + block: + - name: Verify SNR Subscription exists + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: self-node-remediation-operator + namespace: "{{ cifmw_snr_nhc_namespace }}" + spec: + channel: stable + installPlanApproval: Automatic + name: self-node-remediation + package: self-node-remediation + source: redhat-operators + sourceNamespace: openshift-marketplace + validate_certs: false + register: snr_subscription_verification_result + failed_when: false + + - name: Display SNR Subscription verification result + ansible.builtin.debug: + msg: "SNR Subscription verification result: {{ snr_subscription_verification_result }}" + + - name: Test SNR Subscription creation idempotency + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: self-node-remediation-operator + namespace: "{{ cifmw_snr_nhc_namespace }}" + spec: + channel: stable + installPlanApproval: Automatic + name: self-node-remediation + package: self-node-remediation + source: redhat-operators + sourceNamespace: openshift-marketplace + validate_certs: false + register: snr_subscription_idempotency_result + failed_when: false + + - name: Assert expected behavior for SNR Subscription creation + ansible.builtin.assert: + that: + - snr_subscription_verification_result.failed == snr_subscription_idempotency_result.failed + fail_msg: "SNR Subscription creation behavior is not consistent" + success_msg: "SNR Subscription creation task behaves consistently" + + # VERIFICATION TASK 4: Verify SNR deployment readiness + - name: "VERIFICATION 4: Verify SNR deployment readiness" + block: + - name: Verify SNR deployment status + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: apps/v1 + kind: Deployment + namespace: "{{ cifmw_snr_nhc_namespace }}" + name: self-node-remediation-controller-manager + validate_certs: false + register: snr_deployment_verification_result + failed_when: false + + - name: Display SNR deployment verification result + ansible.builtin.debug: + msg: "SNR deployment verification result: {{ snr_deployment_verification_result }}" + + - name: Test deployment verification behavior + ansible.builtin.debug: + msg: "Testing deployment verification logic - expected to fail in mock environment" + + - name: Assert SNR deployment verification behaves as expected + ansible.builtin.assert: + that: + - snr_deployment_verification_result.failed != None + fail_msg: "SNR deployment verification should produce consistent results" + success_msg: "SNR deployment verification logic behaves as expected" + + # VERIFICATION TASK 5: Verify NHC Subscription creation + - name: "VERIFICATION 5: Verify NHC Subscription creation" + block: + - name: Verify NHC Subscription exists + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: node-health-check-operator + namespace: "{{ cifmw_snr_nhc_namespace }}" + spec: + channel: stable + installPlanApproval: Automatic + name: node-healthcheck-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + package: node-healthcheck-operator + validate_certs: false + register: nhc_subscription_verification_result + failed_when: false + + - name: Display NHC Subscription verification result + ansible.builtin.debug: + msg: "NHC Subscription verification result: {{ nhc_subscription_verification_result }}" + + - name: Test NHC Subscription creation idempotency + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: node-health-check-operator + namespace: "{{ cifmw_snr_nhc_namespace }}" + spec: + channel: stable + installPlanApproval: Automatic + name: node-healthcheck-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + package: node-healthcheck-operator + validate_certs: false + register: nhc_subscription_idempotency_result + failed_when: false + + - name: Assert expected behavior for NHC Subscription creation + ansible.builtin.assert: + that: + - nhc_subscription_verification_result.failed == nhc_subscription_idempotency_result.failed + fail_msg: "NHC Subscription creation behavior is not consistent" + success_msg: "NHC Subscription creation task behaves consistently" + + # VERIFICATION TASK 6: Verify CSV status + - name: "VERIFICATION 6: Verify CSV status" + block: + - name: Verify CSV status + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1alpha1 + kind: ClusterServiceVersion + namespace: "{{ cifmw_snr_nhc_namespace }}" + validate_certs: false + register: csv_verification_result + failed_when: false + + - name: Display CSV verification result + ansible.builtin.debug: + msg: "CSV verification result: {{ csv_verification_result }}" + + - name: Test CSV verification behavior + ansible.builtin.debug: + msg: "Testing CSV verification logic - expected to fail in mock environment" + + - name: Assert CSV verification behaves as expected + ansible.builtin.assert: + that: + - csv_verification_result.failed != None + fail_msg: "CSV verification should produce consistent results" + success_msg: "CSV verification logic behaves as expected" + + # VERIFICATION TASK 7: Verify NHC CR creation + - name: "VERIFICATION 7: Verify NHC CR creation" + block: + - name: Verify NHC CR exists + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + metadata: + name: nodehealthcheck-sample + spec: + minHealthy: 51% + remediationTemplate: + apiVersion: self-node-remediation.medik8s.io/v1alpha1 + name: self-node-remediation-automatic-strategy-template + namespace: "{{ cifmw_snr_nhc_namespace }}" + kind: SelfNodeRemediationTemplate + selector: + matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + unhealthyConditions: + - type: Ready + status: "False" + duration: 30s + - type: Ready + status: Unknown + duration: 30s + validate_certs: false + register: nhc_cr_verification_result + failed_when: false + + - name: Display NHC CR verification result + ansible.builtin.debug: + msg: "NHC CR verification result: {{ nhc_cr_verification_result }}" + + - name: Test NHC CR creation idempotency + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + metadata: + name: nodehealthcheck-sample + spec: + minHealthy: 51% + remediationTemplate: + apiVersion: self-node-remediation.medik8s.io/v1alpha1 + name: self-node-remediation-automatic-strategy-template + namespace: "{{ cifmw_snr_nhc_namespace }}" + kind: SelfNodeRemediationTemplate + selector: + matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + unhealthyConditions: + - type: Ready + status: "False" + duration: 30s + - type: Ready + status: Unknown + duration: 30s + validate_certs: false + register: nhc_cr_idempotency_result + failed_when: false + + - name: Assert expected behavior for NHC CR creation + ansible.builtin.assert: + that: + - nhc_cr_verification_result.failed == nhc_cr_idempotency_result.failed + fail_msg: "NHC CR creation behavior is not consistent" + success_msg: "NHC CR creation task behaves consistently" + + - name: Verify role structure and logic + ansible.builtin.debug: + msg: "Role execution and verification completed - errors are expected in test environment without real K8s cluster" diff --git a/roles/cifmw_snr_nhc/molecule/default/molecule.yml b/roles/cifmw_snr_nhc/molecule/default/molecule.yml new file mode 100644 index 0000000000..be4602e4b7 --- /dev/null +++ b/roles/cifmw_snr_nhc/molecule/default/molecule.yml @@ -0,0 +1,45 @@ +--- +dependency: + name: galaxy + options: + requirements-file: requirements.yml + force: true + +driver: + name: podman + +platforms: + - name: instance + image: registry.access.redhat.com/ubi9/ubi:latest + pre_build_image: true + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + tmpfs: + - /run + - /tmp + privileged: true + command: "sleep infinity" + capabilities: + - SYS_ADMIN + +provisioner: + name: ansible + inventory: + group_vars: + all: + cifmw_snr_nhc_kubeconfig: "/tmp/kubeconfig" + cifmw_snr_nhc_kubeadmin_password_file: "/tmp/kubeadmin-password" + cifmw_snr_nhc_namespace: "test-workload-availability" + ansible_python_interpreter: /usr/bin/python3 + +verifier: + name: ansible + +scenario: + test_sequence: + - dependency + - create + - prepare + - converge + - verify + - destroy diff --git a/roles/cifmw_snr_nhc/molecule/default/prepare.yml b/roles/cifmw_snr_nhc/molecule/default/prepare.yml new file mode 100644 index 0000000000..9f34b4d0e3 --- /dev/null +++ b/roles/cifmw_snr_nhc/molecule/default/prepare.yml @@ -0,0 +1,51 @@ +--- +- name: Prepare + hosts: all + gather_facts: true + tasks: + - name: Install Python pip and dependencies using dnf + ansible.builtin.dnf: + name: + - python3-pip + - python3-devel + - gcc + state: present + + - name: Install Python dependencies + ansible.builtin.pip: + name: + - kubernetes>=12.0.0 + - pyyaml>=5.4.0 + - jsonpatch + state: present + executable: /usr/bin/pip3 + + - name: Create mock kubeconfig file + ansible.builtin.copy: + content: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: >- + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJBQ0NRRHVOSFpkOUhxL0RMQS0tLS0tCk9QRFJBUUVGSUFBVGVHMUVNQkdBMVVFQ0F3S1JteGlUMXBWZERsb1cweENLQWNEVEExUXpFOTQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + server: https://api.test.example.com:6443 + name: test-cluster + contexts: + - context: + cluster: test-cluster + user: test-user + name: test-context + current-context: test-context + kind: Config + users: + - name: test-user + user: + token: test-token + dest: /tmp/kubeconfig + mode: '0600' + + - name: Create mock kubeadmin password file + ansible.builtin.copy: + content: "test-password123" + dest: /tmp/kubeadmin-password + mode: '0600' diff --git a/roles/cifmw_snr_nhc/molecule/default/verify.yml b/roles/cifmw_snr_nhc/molecule/default/verify.yml new file mode 100644 index 0000000000..abac81a8a5 --- /dev/null +++ b/roles/cifmw_snr_nhc/molecule/default/verify.yml @@ -0,0 +1,47 @@ +--- +- name: Verify + hosts: all + gather_facts: false + tasks: + - name: Check if Python kubernetes library is installed + ansible.builtin.command: >- + python3 -c "import kubernetes; print('kubernetes library version:', kubernetes.__version__)" + register: k8s_lib_check + changed_when: false + + - name: Display kubernetes library version + ansible.builtin.debug: + msg: "{{ k8s_lib_check.stdout }}" + + - name: Verify kubeconfig mock file exists + ansible.builtin.stat: + path: /tmp/kubeconfig + register: kubeconfig_verify + + - name: Assert kubeconfig mock file exists + ansible.builtin.assert: + that: + - kubeconfig_verify.stat.exists + fail_msg: "Mock kubeconfig file was not created" + success_msg: "Mock kubeconfig file exists" + + - name: Verify kubeadmin password mock file exists + ansible.builtin.stat: + path: /tmp/kubeadmin-password + register: kubeadmin_verify + + - name: Assert kubeadmin password mock file exists + ansible.builtin.assert: + that: + - kubeadmin_verify.stat.exists + fail_msg: "Mock kubeadmin password file was not created" + success_msg: "Mock kubeadmin password file exists" + + - name: Test Python yaml library + ansible.builtin.command: python3 -c "import yaml; print('yaml library works')" + register: yaml_check + changed_when: false + + - name: Display yaml test result + ansible.builtin.debug: + msg: "{{ yaml_check.stdout }}" diff --git a/roles/cifmw_snr_nhc/molecule/requirements.txt b/roles/cifmw_snr_nhc/molecule/requirements.txt new file mode 100644 index 0000000000..0a3cdc63bb --- /dev/null +++ b/roles/cifmw_snr_nhc/molecule/requirements.txt @@ -0,0 +1,11 @@ +# Python dependencies for molecule testing +molecule>=6.0.0 +molecule-plugins[podman]>=23.0.0 +ansible-core>=2.14.0 +ansible-lint>=6.0.0 +yamllint>=1.26.0 +pytest>=7.0.0 +pytest-ansible>=4.0.0 +kubernetes>=24.0.0 +pyyaml>=6.0.0 +jsonpatch>=1.32 diff --git a/roles/cifmw_snr_nhc/tasks/main.yml b/roles/cifmw_snr_nhc/tasks/main.yml new file mode 100644 index 0000000000..2a9c7322ab --- /dev/null +++ b/roles/cifmw_snr_nhc/tasks/main.yml @@ -0,0 +1,596 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Cleanup existing resources before installation + when: cifmw_snr_nhc_cleanup_before_install | bool + block: + - name: Check if NodeHealthCheck exists and is active + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + name: nodehealthcheck-sample + register: nhc_check + ignore_errors: true + + - name: Check for active SelfNodeRemediations + when: nhc_check.resources | length > 0 + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: self-node-remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediation + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: active_remediations + ignore_errors: true + + - name: Display active remediations info + when: + - nhc_check.resources | length > 0 + - active_remediations.resources | length > 0 + ansible.builtin.debug: + msg: | + Found {{ active_remediations.resources | length }} active SelfNodeRemediation(s): + {% for remediation in active_remediations.resources %} + - Name: {{ remediation.metadata.name }} + Node: {{ remediation.spec.nodeName | default('Unknown') }} + Status: {{ remediation.status.phase | default('Unknown') }} + {% endfor %} + + - name: Disable NodeHealthCheck to stop active remediations + when: nhc_check.resources | length > 0 + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + name: nodehealthcheck-sample + state: present + resource_definition: + apiVersion: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + metadata: + name: nodehealthcheck-sample + spec: + minHealthy: 100% + remediationTemplate: + apiVersion: self-node-remediation.medik8s.io/v1alpha1 + name: self-node-remediation-automatic-strategy-template + namespace: "{{ cifmw_snr_nhc_namespace }}" + kind: SelfNodeRemediationTemplate + selector: + matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + unhealthyConditions: + - type: Ready + status: "False" + duration: 999999s + - type: Ready + status: Unknown + duration: 999999s + failed_when: false + + - name: Wait for active remediations to stop + when: nhc_check.resources | length > 0 + ansible.builtin.pause: + seconds: 30 + failed_when: false + + - name: Delete existing NodeHealthCheck resources + when: nhc_check.resources | length > 0 + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + name: nodehealthcheck-sample + state: absent + failed_when: false + register: nhc_deletion + + - name: Check for blocking remediations when deletion fails + when: + - nhc_check.resources | length > 0 + - nhc_deletion is failed + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: self-node-remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediation + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: blocking_remediations + failed_when: false + + - name: Display blocking remediation details + when: + - nhc_check.resources | length > 0 + - nhc_deletion is failed + - blocking_remediations.resources | length > 0 + ansible.builtin.debug: + msg: | + BLOCKING REMEDIATIONS DETAILS: + The following {{ blocking_remediations.resources | length }} remediation(s) are preventing NodeHealthCheck deletion: + {% for remediation in blocking_remediations.resources %} + - Name: {{ remediation.metadata.name }} + Node: {{ remediation.spec.nodeName | default('Unknown') }} + Status: {{ remediation.status.phase | default('Unknown') }} + Created: {{ remediation.metadata.creationTimestamp | default('Unknown') }} + {% if remediation.status.conditions is defined %} + Conditions: + {% for condition in remediation.status.conditions %} + - Type: {{ condition.type }} + Status: {{ condition.status }} + Reason: {{ condition.reason | default('N/A') }} + Message: {{ condition.message | default('N/A') }} + Last Transition: {{ condition.lastTransitionTime | default('N/A') }} + {% endfor %} + {% endif %} + {% endfor %} + + - name: Display warning if NodeHealthCheck deletion failed due to active remediation + when: + - nhc_check.resources | length > 0 + - nhc_deletion is failed + ansible.builtin.debug: + msg: | + WARNING: NodeHealthCheck 'nodehealthcheck-sample' could not be deleted due to active remediation. + The webhook 'vnodehealthcheck.kb.io' is preventing deletion. + {% if blocking_remediations.resources | length > 0 %} + Found {{ blocking_remediations.resources | length }} active remediation(s) blocking deletion. + {% else %} + No active remediations found, but webhook is still blocking deletion. + {% endif %} + The NodeHealthCheck will remain active and the installation will continue. + You may need to manually delete it later when no remediations are running. + + - name: Skip NodeHealthCheck deletion retry if webhook blocks it + when: + - nhc_check.resources | length > 0 + - nhc_deletion is failed + ansible.builtin.debug: + msg: "Skipping NodeHealthCheck deletion retry - webhook protection is active" + + - name: Check if SelfNodeRemediationConfig exists + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: self-node-remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediationConfig + name: self-node-remediation-config + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: snr_config_check + failed_when: false + + - name: Delete existing SelfNodeRemediationConfig resources + when: snr_config_check.resources | length > 0 + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: self-node-remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediationConfig + name: self-node-remediation-config + namespace: "{{ cifmw_snr_nhc_namespace }}" + state: absent + failed_when: false + + - name: Check if SelfNodeRemediationTemplate exists + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: self-node-remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediationTemplate + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: snr_template_check + failed_when: false + + - name: Delete existing SelfNodeRemediationTemplate resources + when: snr_template_check.resources | length > 0 + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: self-node-remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediationTemplate + namespace: "{{ cifmw_snr_nhc_namespace }}" + state: absent + failed_when: false + + - name: Check if Subscriptions exist + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: subscription_check + failed_when: false + + - name: Delete existing Subscriptions + when: subscription_check.resources | length > 0 + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + name: "{{ item }}" + namespace: "{{ cifmw_snr_nhc_namespace }}" + state: absent + loop: + - self-node-remediation-operator + - node-health-check-operator + failed_when: false + + - name: Check if OperatorGroup exists + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1 + kind: OperatorGroup + name: workload-availability-operator-group + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: operator_group_check + failed_when: false + + - name: Delete existing OperatorGroup + when: operator_group_check.resources | length > 0 + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1 + kind: OperatorGroup + name: workload-availability-operator-group + namespace: "{{ cifmw_snr_nhc_namespace }}" + state: absent + failed_when: false + +- name: Cleanup entire namespace + when: cifmw_snr_nhc_cleanup_namespace | bool + block: + - name: Delete the entire workload-availability namespace + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: v1 + kind: Namespace + name: "{{ cifmw_snr_nhc_namespace }}" + state: absent + failed_when: false + + - name: Wait for namespace deletion to complete + when: cifmw_snr_nhc_cleanup_namespace | bool + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: v1 + kind: Namespace + name: "{{ cifmw_snr_nhc_namespace }}" + register: namespace_deletion_check + until: namespace_deletion_check.resources | length == 0 + retries: 10 + delay: 5 + failed_when: false + +- name: Create the workload-availability namespace + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ cifmw_snr_nhc_namespace }}" + register: namespace_result + +- name: Switch to namespace {{ cifmw_snr_nhc_namespace }} + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + kind: ConfigMap + namespace: kube-system + resource_definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: kube-public + data: + namespace: "{{ cifmw_snr_nhc_namespace }}" + +- name: Create the workload-availability-operator-group resource + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1 + kind: OperatorGroup + metadata: + name: workload-availability-operator-group + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: operator_group_result + +- name: Check if the OperatorGroup exists + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1 + kind: OperatorGroup + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: operator_group_check + +- name: Create the self-node-remediation Subscription + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: self-node-remediation-operator + namespace: "{{ cifmw_snr_nhc_namespace }}" + spec: + channel: stable + installPlanApproval: Automatic + name: self-node-remediation + package: self-node-remediation + source: redhat-operators + sourceNamespace: openshift-marketplace + register: subscription_result + +- name: Check if the Subscription exists + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + namespace: "{{ cifmw_snr_nhc_namespace }}" + name: self-node-remediation-operator + register: subscription_check + +- name: Check Subscription status + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + name: self-node-remediation-operator + namespace: openshift-operators + register: snr_subscription + +- name: Verify SelfNodeRemediationTemplate CR exists + kubernetes.core.k8s_info: + api_version: remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediationTemplate + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: snr_template + +- name: Check ClusterServiceVersion (CSV) status for remediation + kubernetes.core.k8s_info: + api_version: operators.coreos.com/v1alpha1 + kind: ClusterServiceVersion + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: csv_status + +- name: Verify Self Node Remediation Operator deployment is running + kubernetes.core.k8s_info: + api_version: apps/v1 + kind: Deployment + namespace: "{{ cifmw_snr_nhc_namespace }}" + name: self-node-remediation-controller-manager + register: snr_deployment + +- name: Wait for Self Node Remediation deployment to be ready + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: apps/v1 + kind: Deployment + namespace: "{{ cifmw_snr_nhc_namespace }}" + name: self-node-remediation-controller-manager + register: snr_deployment_check + until: >- + snr_deployment_check.resources[0].status.availableReplicas is defined and + snr_deployment_check.resources[0].status.availableReplicas > 0 + retries: 20 + delay: 15 + +- name: Check SelfNodeRemediationConfig CR + kubernetes.core.k8s_info: + api_version: remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediationConfig + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: snr_config + +- name: Verify Self Node Remediation DaemonSet status + kubernetes.core.k8s_info: + api_version: apps/v1 + kind: DaemonSet + namespace: "{{ cifmw_snr_nhc_namespace }}" + name: self-node-remediation-ds + register: snr_daemonset + +- name: Verify SelfNodeRemediationConfig CR exists + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: self-node-remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediationConfig + namespace: "{{ cifmw_snr_nhc_namespace }}" + name: self-node-remediation-config + register: snr_config_detail + +- name: Verify SelfNodeRemediationTemplate exists + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: self-node-remediation.medik8s.io/v1alpha1 + kind: SelfNodeRemediationTemplate + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: snr_template_detail + +- name: Debug SNR deployment status + when: ansible_verbosity > 0 + ansible.builtin.debug: + msg: | + SNR Deployment Status: + - Namespace: {{ cifmw_snr_nhc_namespace }} + - OperatorGroup: {{ operator_group_check.resources | length > 0 }} + - Subscription: {{ subscription_check.resources | length > 0 }} + - Template: {{ snr_template_detail.resources | length > 0 }} + - Deployment Ready: {{ snr_deployment_check.resources[0].status.availableReplicas | default(0) > 0 if snr_deployment_check.resources | length > 0 else false }} + +- name: Create the Node Health Check Subscription + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: node-health-check-operator + namespace: "{{ cifmw_snr_nhc_namespace }}" + spec: + channel: stable + installPlanApproval: Automatic + name: node-healthcheck-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + package: node-healthcheck-operator + register: nhc_subscription_result + +- name: Check if the Node Health Check Subscription exists + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + namespace: "{{ cifmw_snr_nhc_namespace }}" + name: node-health-check-operator + register: nhc_subscription_check + +- name: Verify Node Health Check Subscription + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1alpha1 + kind: Subscription + namespace: "{{ cifmw_snr_nhc_namespace }}" + name: node-health-check-operator + register: nhc_subscription_status + +- name: Check ClusterServiceVersion (CSV) for Node Health Check Operator + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1alpha1 + kind: ClusterServiceVersion + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: nhc_csv_status + +- name: Wait for CSV to reach Succeeded phase + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: operators.coreos.com/v1alpha1 + kind: ClusterServiceVersion + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: csv_check + until: csv_check.resources | selectattr('status.phase', 'equalto', 'Succeeded') | list | length > 0 + retries: 20 + delay: 15 + +- name: Verify Node Health Check Operator Deployment + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: apps/v1 + kind: Deployment + namespace: "{{ cifmw_snr_nhc_namespace }}" + register: nhc_deployments + +- name: Debug NHC deployment status + when: ansible_verbosity > 0 + ansible.builtin.debug: + msg: | + NHC Deployment Status: + - Subscription: {{ nhc_subscription_check.resources | length > 0 }} + - CSV Phase: {{ csv_check.resources | selectattr('status.phase', 'equalto', 'Succeeded') | list | length > 0 }} + - Deployments: {{ nhc_deployments.resources | selectattr('metadata.name', 'search', 'node-healthcheck') | list | length }} + +- name: Check if NodeHealthCheck CR already exists before creating + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + name: nodehealthcheck-sample + register: existing_nhc_cr_check + ignore_errors: true + +- name: Create Node Health Check CR to use SNR + when: existing_nhc_cr_check.resources | length == 0 + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + state: present + resource_definition: + apiVersion: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + metadata: + name: nodehealthcheck-sample + spec: + minHealthy: 51% + remediationTemplate: + apiVersion: self-node-remediation.medik8s.io/v1alpha1 + name: self-node-remediation-automatic-strategy-template + namespace: "{{ cifmw_snr_nhc_namespace }}" + kind: SelfNodeRemediationTemplate + selector: + matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + unhealthyConditions: + - type: Ready + status: "False" + duration: 30s + - type: Ready + status: Unknown + duration: 30s + register: nhc_cr_creation + +- name: Display info if NodeHealthCheck CR already exists + when: existing_nhc_cr_check.resources | length > 0 + ansible.builtin.debug: + msg: | + NodeHealthCheck CR 'nodehealthcheck-sample' already exists and will not be recreated. + This is expected if cleanup was skipped due to active remediations. + +- name: Wait for Node Health Check CR to be created + when: existing_nhc_cr_check.resources | length == 0 + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + name: nodehealthcheck-sample + register: nhc_cr_ready + until: nhc_cr_ready.resources | length > 0 + retries: 10 + delay: 10 + +- name: Verify Node Health Check CR existence + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + name: nodehealthcheck-sample + register: nhc_cr_check + +- name: Check if existing NodeHealthCheck still exists after installation + when: + - cifmw_snr_nhc_cleanup_before_install | bool + - nhc_check.resources | length > 0 + - nhc_deletion is failed + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" + api_version: remediation.medik8s.io/v1alpha1 + kind: NodeHealthCheck + name: nodehealthcheck-sample + register: existing_nhc_final_check + ignore_errors: true + +- name: Summary of deployment status + when: ansible_verbosity > 0 + ansible.builtin.debug: + msg: | + Deployment Summary: + - Namespace: {{ cifmw_snr_nhc_namespace }} + - SNR Operator: {{ 'Ready' if (snr_deployment_check.resources | length > 0 and snr_deployment_check.resources[0].status.availableReplicas | default(0) > 0) else 'Not Ready' }} + - NHC Operator: {{ 'Ready' if (csv_check.resources | selectattr('status.phase', 'equalto', 'Succeeded') | list | length > 0) else 'Not Ready' }} + - NHC CR: {{ 'Created' if (nhc_cr_check.resources | length > 0) else 'Not Created' }} + - Remediation Template: {{ 'Available' if (snr_template_detail.resources | length > 0) else 'Not Available' }} + {% if cifmw_snr_nhc_cleanup_before_install | bool and nhc_check.resources | length > 0 and nhc_deletion is failed %} + - Existing NHC Status: {{ 'Still Exists' if (existing_nhc_final_check.resources | length > 0) else 'Removed' }} + {% endif %} diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index e95075a946..195b5ad616 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -191,6 +191,17 @@ parent: cifmw-molecule-base vars: TEST_RUN: cifmw_ntp +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/cifmw_snr_nhc/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-cifmw_snr_nhc + parent: cifmw-molecule-base + vars: + TEST_RUN: cifmw_snr_nhc - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 7866c54463..84625642a1 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -36,6 +36,7 @@ - cifmw-molecule-cifmw_nfs - cifmw-molecule-cifmw_ntp - cifmw-molecule-cifmw_setup + - cifmw-molecule-cifmw_snr_nhc - cifmw-molecule-cifmw_test_role - cifmw-molecule-cleanup_openstack - cifmw-molecule-compliance From 3dc98be7f3f660ad48cd8b492aa828ca6642c87a Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Thu, 24 Jul 2025 17:37:46 +0200 Subject: [PATCH 242/480] Change default lvms ns Follow-up the similar install_yamls change https://github.com/openstack-k8s-operators/install_yamls/pull/1077 Signed-off-by: Bohdan Dobrelia --- roles/ci_lvms_storage/README.md | 2 +- roles/ci_lvms_storage/defaults/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/ci_lvms_storage/README.md b/roles/ci_lvms_storage/README.md index ca81babfa9..3a7efbb371 100644 --- a/roles/ci_lvms_storage/README.md +++ b/roles/ci_lvms_storage/README.md @@ -63,7 +63,7 @@ the `test_operator` role, will also set its `storageClass` value to ### Kubernetes parameters -* `cifmw_lvms_namespace`: (String) The Kubernetes namespace where the LVMS cluster and operator pods will run (default `openshift-storage`) +* `cifmw_lvms_namespace`: (String) The Kubernetes namespace where the LVMS cluster and operator pods will run (default `openshift-lvm-storage`) ### kubernetes.core.k8s_info parameters diff --git a/roles/ci_lvms_storage/defaults/main.yml b/roles/ci_lvms_storage/defaults/main.yml index bede34cd31..bf7cd3e71f 100644 --- a/roles/ci_lvms_storage/defaults/main.yml +++ b/roles/ci_lvms_storage/defaults/main.yml @@ -19,7 +19,7 @@ cifmw_lvms_disk_list: [] cifmw_lvms_cluster_name: lvmcluster -cifmw_lvms_namespace: openshift-storage +cifmw_lvms_namespace: openshift-lvm-storage cifmw_lvms_basedir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}" cifmw_lvms_manifests_dir: "{{ cifmw_manifests | default(cifmw_lvms_basedir ~ '/artifacts/manifests') }}/lvms" # The "lvms-" prefix is prepended to the cifmw_lvms_storage_class by the lvm-operator From ba911c813220f78ce59c17d26b8731666e6edb98 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Fri, 25 Jul 2025 08:22:49 +0200 Subject: [PATCH 243/480] Fix lvms csv deploy We should be consistent and pass the namespace to the csv deployment command. Signed-off-by: Francesco Pantano --- roles/ci_lvms_storage/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ci_lvms_storage/tasks/main.yml b/roles/ci_lvms_storage/tasks/main.yml index e3699aba4a..03bee78c3e 100644 --- a/roles/ci_lvms_storage/tasks/main.yml +++ b/roles/ci_lvms_storage/tasks/main.yml @@ -113,7 +113,7 @@ cmd: >- oc get ClusterServiceVersion -n "{{ cifmw_lvms_namespace }}" - -l operators.coreos.com/lvms-operator.openshift-storage + -l operators.coreos.com/lvms-operator."{{ cifmw_lvms_namespace}}" -o jsonpath='{.items[*].status.phase}' changed_when: false register: _cifmw_lvms_storage_cluster_csv_phase_out From 401c7fb11852d024dc90e82b5f314a10d93d808e Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Tue, 29 Jul 2025 22:57:40 +0200 Subject: [PATCH 244/480] Avoid switching OpenShift project in hook I encountered a case where the hook related to cinder multi-attach volume type fails, as on some race condition between commands execution the default OpenShift project was changed and the assumed pod was not found. To make the shell call more predictable, it is better to run `oc` command with `-n` parameter, rather than relying on switching the default project in earlier command. --- hooks/playbooks/cinder_multiattach_volume_type.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hooks/playbooks/cinder_multiattach_volume_type.yml b/hooks/playbooks/cinder_multiattach_volume_type.yml index 69c5832a50..d58befb5cf 100644 --- a/hooks/playbooks/cinder_multiattach_volume_type.yml +++ b/hooks/playbooks/cinder_multiattach_volume_type.yml @@ -16,12 +16,11 @@ PATH: "{{ cifmw_path }}" ansible.builtin.shell: | set -xe -o pipefail - oc project {{ namespace }} - oc rsh openstackclient \ + oc -n {{ namespace }} rsh openstackclient \ openstack volume type show {{ cifmw_volume_multiattach_type }} &>/dev/null || \ oc rsh openstackclient \ openstack volume type create {{ cifmw_volume_multiattach_type }} - oc rsh openstackclient \ + oc -n {{ namespace }} rsh openstackclient \ openstack volume type set --property multiattach=" True" \ {{ cifmw_volume_multiattach_type }} From 86587cc6ad3e2a123b7a757477d51ae233627894 Mon Sep 17 00:00:00 2001 From: Oleg Belo0lipetskii Date: Tue, 8 Jul 2025 07:04:46 -0400 Subject: [PATCH 245/480] Replace deprecated 'cifmw_test_operator_concurrency' Move parameter to section 2: tempest parameters --- roles/test_operator/README.md | 1 - roles/test_operator/defaults/main.yml | 5 ++--- zuul.d/whitebox_neutron_tempest_jobs.yaml | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 3aa5e8783e..81c2ddbb7f 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -10,7 +10,6 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `cifmw_test_operator_version`: (String) The commit hash corresponding to the version of test-operator the user wants to use. This parameter is only used when `cifmw_test_operator_bundle` is also set. * `cifmw_test_operator_timeout`: (Integer) Timeout in seconds for the execution of the tests. Default value: `3600` * `cifmw_test_operator_logs_image`: (String) Image that should be used to collect logs from the pods spawned by the test-operator. Default value: `quay.io/quay/busybox` -* `cifmw_test_operator_concurrency`: (Integer) Tempest concurrency value. NOTE: This parameter is deprecated, please use `cifmw_test_operator_tempest_concurrency` instead. Default value: `8` * `cifmw_test_operator_clean_last_run`: (Bool) Delete all resources created by the previous run at the beginning of the role. Default value: `false` * `cifmw_test_operator_cleanup`: (Bool) Delete all resources created by the role at the end of the testing. Default value: `false` * `cifmw_test_operator_tempest_cleanup`: (Bool) Run tempest cleanup after test execution (tempest run) to delete any resources created by tempest that may have been left out. diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 703dc2f1c0..0792d1ea00 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -29,7 +29,6 @@ cifmw_test_operator_controller_namespace: openstack-operators cifmw_test_operator_bundle: "" cifmw_test_operator_timeout: 3600 cifmw_test_operator_logs_image: quay.io/quay/busybox -cifmw_test_operator_concurrency: 8 cifmw_test_operator_cleanup: false cifmw_test_operator_clean_last_run: false cifmw_test_operator_dry_run: false @@ -66,6 +65,7 @@ cifmw_test_operator_default_image_tag: current-podified # Section 2: tempest parameters - used when run_test_fw is 'tempest' cifmw_test_operator_tempest_name: "tempest-tests" +cifmw_test_operator_tempest_concurrency: 8 cifmw_test_operator_tempest_registry: "{{ cifmw_test_operator_default_registry }}" cifmw_test_operator_tempest_namespace: "{{ cifmw_test_operator_default_namespace }}" cifmw_test_operator_tempest_container: openstack-tempest-all @@ -157,8 +157,7 @@ cifmw_test_operator_tempest_config: {{ stage_vars_dict.cifmw_test_operator_tempest_exclude_list | default('') }} expectedFailuresList: | {{ stage_vars_dict.cifmw_test_operator_tempest_expected_failures_list | default('') }} - # NOTE: cifmw_test_operator_concurrency is deprecated, use cifmw_test_operator_tempest_concurrency instead - concurrency: "{{ stage_vars_dict.cifmw_test_operator_tempest_concurrency | default(cifmw_test_operator_concurrency) }}" + concurrency: "{{ stage_vars_dict.cifmw_test_operator_tempest_concurrency }}" externalPlugin: "{{ stage_vars_dict.cifmw_test_operator_tempest_external_plugin | default([]) }}" extraRPMs: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_rpms | default([]) }}" extraImages: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_images | default([]) }}" diff --git a/zuul.d/whitebox_neutron_tempest_jobs.yaml b/zuul.d/whitebox_neutron_tempest_jobs.yaml index c47bca126d..4e5b442e0c 100644 --- a/zuul.d/whitebox_neutron_tempest_jobs.yaml +++ b/zuul.d/whitebox_neutron_tempest_jobs.yaml @@ -18,7 +18,7 @@ cifmw_os_must_gather_timeout: 28800 cifmw_test_operator_timeout: 14400 cifmw_block_device_size: 40G - cifmw_test_operator_concurrency: 6 + cifmw_test_operator_tempest_concurrency: 6 cifmw_test_operator_tempest_network_attachments: - ctlplane cifmw_test_operator_tempest_container: openstack-tempest-all From e8007f7f182c8f2b9a18602a5fe5f40ac94c97ad Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Tue, 29 Jul 2025 18:15:53 +0530 Subject: [PATCH 246/480] [podman] Add tasks to configure namespace for CS10 podman In Current CS10 zuul node, namespaces are not configured for podman leading to failure while pulling the image. This pr adds task for configuring namespace for podman. This pr also modifies build_containers and registry_deploy role to use podman role to install podman. Resolves: OSPCIX-995 Signed-off-by: Chandan Kumar (raukadah) --- roles/build_containers/tasks/install.yml | 5 +++- roles/build_containers/tasks/tag.yml | 4 +++ roles/podman/tasks/main.yml | 33 ++++++++++++++++++++++++ roles/registry_deploy/tasks/main.yml | 4 +-- 4 files changed, 42 insertions(+), 4 deletions(-) diff --git a/roles/build_containers/tasks/install.yml b/roles/build_containers/tasks/install.yml index 8fd79a9bd7..f3b5f4de86 100644 --- a/roles/build_containers/tasks/install.yml +++ b/roles/build_containers/tasks/install.yml @@ -1,4 +1,8 @@ --- +- name: Install podman + ansible.builtin.include_role: + name: podman + - name: Install required packages tags: - packages @@ -7,7 +11,6 @@ name: - python3-devel - python3-pip - - podman - buildah state: latest # noqa: package-latest diff --git a/roles/build_containers/tasks/tag.yml b/roles/build_containers/tasks/tag.yml index e46dcfc15c..eacc8bab5b 100644 --- a/roles/build_containers/tasks/tag.yml +++ b/roles/build_containers/tasks/tag.yml @@ -70,6 +70,10 @@ awk '{ print $2 }' | head -n 1 register: images_tag_from_file +- name: Make sure podman is installed + ansible.builtin.include_role: + name: podman + - name: Set variables for looping ansible.builtin.set_fact: built_images: "{{ built_images_from_file.stdout_lines }}" diff --git a/roles/podman/tasks/main.yml b/roles/podman/tasks/main.yml index fc46ce9ba8..defff3a0b0 100644 --- a/roles/podman/tasks/main.yml +++ b/roles/podman/tasks/main.yml @@ -25,3 +25,36 @@ - cifmw_podman_enable_linger | bool ansible.builtin.command: cmd: "loginctl enable-linger {{ cifmw_podman_user_linger }}" + +- name: Configure User Namespace for EL 10 + when: ansible_distribution_major_version is version('10', '==') + vars: + target_user: "{{ ansible_user }}" + sub_id_start: 100000 + sub_id_count: 65536 + block: + - name: "Ensure subordinate UID entry exists for {{ target_user }}" + become: true + ansible.builtin.lineinfile: + path: /etc/subuid + line: "{{ target_user }}:{{ sub_id_start }}:{{ sub_id_count }}" + state: present + create: true + mode: '0644' + register: subuid_status + + - name: "Ensure subordinate GID entry exists for {{ target_user }}" + become: true + ansible.builtin.lineinfile: + path: /etc/subgid + line: "{{ target_user }}:{{ sub_id_start }}:{{ sub_id_count }}" + state: present + create: true + mode: '0644' + register: subgid_status + + - name: "Run podman system migrate if subuid/subgid files were changed" + ansible.builtin.command: + cmd: podman system migrate + when: subuid_status.changed or subgid_status.changed + changed_when: true diff --git a/roles/registry_deploy/tasks/main.yml b/roles/registry_deploy/tasks/main.yml index 929b99dba1..2fd0319b82 100644 --- a/roles/registry_deploy/tasks/main.yml +++ b/roles/registry_deploy/tasks/main.yml @@ -15,13 +15,11 @@ # under the License. - name: Install Podman package - become: true tags: - bootstrap - packages - ansible.builtin.package: + ansible.builtin.include_role: name: podman - state: present - name: Deploy the local registry block: From 184bb00e2d18469e38b1aefc21a809ec1782256d Mon Sep 17 00:00:00 2001 From: Adrian Fusco Arnejo Date: Wed, 23 Jul 2025 16:41:29 +0200 Subject: [PATCH 247/480] Add KRB5_TRACE variable for trace logging output Adding KRB5_TRACE to have more information about the authentication. OSPCIX-936 --- roles/dlrn_report/tasks/dlrn_report_results.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/dlrn_report/tasks/dlrn_report_results.yml b/roles/dlrn_report/tasks/dlrn_report_results.yml index 7e474c1c29..0d17970d01 100644 --- a/roles/dlrn_report/tasks/dlrn_report_results.yml +++ b/roles/dlrn_report/tasks/dlrn_report_results.yml @@ -20,6 +20,8 @@ kinit {{ cifmw_dlrn_report_krb_user_realm }} -k -t {{ cifmw_dlrn_report_keytab }} + environment: + KRB5_TRACE: /dev/stdout retries: 5 delay: 60 register: _kinit_status From 225d9d2f4b38a8d8e7e56bd431bb056462aab8c6 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Thu, 31 Jul 2025 11:18:04 +0530 Subject: [PATCH 248/480] [podman] Set the default value for ansible_user Sometime meta content provider is failing with ansible_user udefined. Let's set the default value to avoid issue. Signed-off-by: Chandan Kumar (raukadah) --- roles/podman/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/podman/tasks/main.yml b/roles/podman/tasks/main.yml index defff3a0b0..13cbcda805 100644 --- a/roles/podman/tasks/main.yml +++ b/roles/podman/tasks/main.yml @@ -29,7 +29,7 @@ - name: Configure User Namespace for EL 10 when: ansible_distribution_major_version is version('10', '==') vars: - target_user: "{{ ansible_user }}" + target_user: "{{ ansible_user | default(lookup('env', 'USER')) }}" sub_id_start: 100000 sub_id_count: 65536 block: From 1de971e7af36662b3ad06761fa622af1aae413e7 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Thu, 31 Jul 2025 09:02:07 +0200 Subject: [PATCH 249/480] Add required-projects to cifmw-pod-base job In the run playbook `ci/playbooks/pod-jobs.yml` there is a hard-coded dependency on CI-Framework in Zuul projects. However, if that job is called in other repositories, such as `edpm-ansible`, the ci-framework will not exist in the Zuul environment. This change ensures the `ci-framework` repository to be always present in job environment. --- zuul.d/pods.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zuul.d/pods.yaml b/zuul.d/pods.yaml index 86be32e9d6..c65aa6de7a 100644 --- a/zuul.d/pods.yaml +++ b/zuul.d/pods.yaml @@ -7,6 +7,8 @@ label: pod-centos-9-stream description: | Run lightweight jobs in pods + required-projects: + - openstack-k8s-operators/ci-framework run: ci/playbooks/pod-jobs.yml - job: From 188baa6c4ea1699c19936e81d73364a4ff56991a Mon Sep 17 00:00:00 2001 From: Maxim Sava Date: Thu, 31 Jul 2025 11:56:27 +0300 Subject: [PATCH 250/480] Fix multi attach volume hook Missing namespace var in shell script --- hooks/playbooks/cinder_multiattach_volume_type.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/playbooks/cinder_multiattach_volume_type.yml b/hooks/playbooks/cinder_multiattach_volume_type.yml index d58befb5cf..49245f450d 100644 --- a/hooks/playbooks/cinder_multiattach_volume_type.yml +++ b/hooks/playbooks/cinder_multiattach_volume_type.yml @@ -18,7 +18,7 @@ set -xe -o pipefail oc -n {{ namespace }} rsh openstackclient \ openstack volume type show {{ cifmw_volume_multiattach_type }} &>/dev/null || \ - oc rsh openstackclient \ + oc -n {{ namespace }} rsh openstackclient \ openstack volume type create {{ cifmw_volume_multiattach_type }} oc -n {{ namespace }} rsh openstackclient \ openstack volume type set --property multiattach=" True" \ From 412a7b648027f8fda98cd8b332af57bbbf489708 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Thu, 24 Jul 2025 10:06:40 +0300 Subject: [PATCH 251/480] libvirt multiple interfaces to the same network - Allow multiple interfaces to be attached from the same network. - Use bm mapping for mac mapping in edpm values Jira: OSPRH-18516 --- .../common/edpm-nodeset-values/values.yaml.j2 | 6 ++++++ .../libvirt_manager/tasks/attach_interface.yml | 18 +++++++++--------- roles/rhol_crc/tasks/main.yml | 2 ++ 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/roles/ci_gen_kustomize_values/templates/common/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/common/edpm-nodeset-values/values.yaml.j2 index cf676889a4..369d94988b 100644 --- a/roles/ci_gen_kustomize_values/templates/common/edpm-nodeset-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/common/edpm-nodeset-values/values.yaml.j2 @@ -26,10 +26,16 @@ data: edpm_network_config_os_net_config_mappings: {% for instance in instances_names %} edpm-{{ instance }}: +{% if cifmw_baremetal_hosts is defined %} +{% for interface in cifmw_baremetal_hosts[instance].nics %} + nic{{ loop.index }}: "{{ interface.mac }}" +{% endfor %} +{% else %} {% if hostvars[instance] is defined %} nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" {% endif %} nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane.mac_addr }}" +{% endif %} {% endfor %} {% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} edpm_sshd_allowed_ranges: diff --git a/roles/libvirt_manager/tasks/attach_interface.yml b/roles/libvirt_manager/tasks/attach_interface.yml index 346c184b40..322e573707 100644 --- a/roles/libvirt_manager/tasks/attach_interface.yml +++ b/roles/libvirt_manager/tasks/attach_interface.yml @@ -60,6 +60,13 @@ - name: "Attach interface {{ network.name }} on {{ vm_name }}" # noqa: name[template] vars: + _net_index: >- + {{ + _extracted_xml.matches | default([]) | + selectattr('source.' + _type, 'defined') | + selectattr('source.' + _type, 'equalto', _local_bridge_name) | + length | int + }} _net_name: >- {{ (cifmw_libvirt_manager_net_prefix_add | bool) | @@ -72,12 +79,6 @@ ternary(_net_name, _net_bridge_map[_net_name]) }} _type: "{{ cifmw_libvirt_manager_network_interface_types[network.name] | default('bridge') }}" - _attached_bridges: >- - {{ - _extracted_xml.matches | default([]) | - selectattr('source.' + _type, 'defined') | - selectattr('source.' + _type, 'equalto', _local_bridge_name) - }} _clean_vm: "{{ vm_name | replace('cifmw-', '') }}" _mac_seed: "{{ '52:54:%02i' % vm_item|default(0)|int }}" _lm_mac_address: >- @@ -88,12 +89,11 @@ -%} {% endif -%} {% if known_mac is defined and known_mac | length > 0 -%} - {{ known_mac | first }} + {{ known_mac[_net_index | int] }} {% else -%} {{ _mac_seed | community.general.random_mac }} {% endif -%} - when: - - _attached_bridges | length == 0 + when: networks | default([]) | select('regex', network.name) | length > _net_index | int ansible.builtin.command: cmd: >- virsh -c qemu:///system diff --git a/roles/rhol_crc/tasks/main.yml b/roles/rhol_crc/tasks/main.yml index d7ec04c54d..eed9b0f447 100644 --- a/roles/rhol_crc/tasks/main.yml +++ b/roles/rhol_crc/tasks/main.yml @@ -136,6 +136,8 @@ var: _net_list - name: Attach default network + vars: + networks: "{{ _net_list.list_nets }}" ansible.builtin.import_role: name: libvirt_manager tasks_from: attach_interface.yml From 5dc1a92f3d5b277de4b18047c791bbd82d1c0ae5 Mon Sep 17 00:00:00 2001 From: Fiorella Yanac Date: Thu, 26 Jun 2025 19:13:58 +0200 Subject: [PATCH 252/480] Add controllers as networker into vars.yaml --- .../templates/adoption_vars.yaml.j2 | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 index 61cd49d2e5..699c262efd 100644 --- a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 +++ b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 @@ -46,8 +46,8 @@ edpm_nodes: {% endfor %} {% endif %} -{% if _vm_groups['osp-networkers'] | default([]) | length > 0 %} edpm_nodes_networker: +{% if _vm_groups['osp-networkers'] | default([]) | length > 0 %} {% for networker in _vm_groups['osp-networkers'] | default([]) %} {% set node_nets = cifmw_networking_env_definition.instances[networker] %} {{ networker }}: @@ -65,6 +65,23 @@ edpm_nodes_networker: {% endfor %} {% endfor %} {% endif %} + {% for controller in _vm_groups['osp-controllers'] %} + {% set node_nets = cifmw_networking_env_definition.instances[controller] %} + {{ controller }}: + hostName: {{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }} + ansible: + ansibleHost: {{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }} + networks: + {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %} + - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }} + name: {{ net }} + subnetName: subnet1 +{% if net == 'ctlplane' %} + defaultRoute: true +{% endif %} + {% endfor %} + {% endfor %} + upstream_dns: {{ cifmw_networking_env_definition.networks.ctlplane[dns_version|default('dns_v4')] | first }} os_cloud_name: {{ cifmw_adoption_osp_deploy_scenario.stacks[0].stackname }} From 5b26cd6b64880c23380c4b6168dbe8aea28cc587 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Thu, 20 Feb 2025 10:31:37 +0100 Subject: [PATCH 253/480] Fix network config for DCN - tenant networks should not be in the public zone because nat is enabled and breaks geneve tunnels between AZs - fixed the name of the tenant vlan network --- scenarios/reproducers/dt-dcn.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scenarios/reproducers/dt-dcn.yml b/scenarios/reproducers/dt-dcn.yml index 97cab60742..bdec27c3f2 100644 --- a/scenarios/reproducers/dt-dcn.yml +++ b/scenarios/reproducers/dt-dcn.yml @@ -586,9 +586,10 @@ cifmw_libvirt_manager_extra_network_configuration: address: - ip: "{{ cifmw_networking_definition.networks.storagedcn2.gateway }}" prefix-length: "{{ cifmw_networking_definition.networks.storagedcn2.network | ansible.utils.ipaddr('prefix') }}" - - name: "vlan{{ cifmw_networking_definition.networks.storagemgmt.vlan }}" + - name: "vlan{{ cifmw_networking_definition.networks.tenant.vlan }}" type: vlan state: up + cifmw_firewall_zone: libvirt vlan: base-iface: cifmw-osp_trunk id: "{{ cifmw_networking_definition.networks.tenant.vlan }}" @@ -602,6 +603,7 @@ cifmw_libvirt_manager_extra_network_configuration: - name: "vlan{{ cifmw_networking_definition.networks.tenantdcn1.vlan }}" type: vlan state: up + cifmw_firewall_zone: libvirt vlan: base-iface: cifmw-dcn1_tr id: "{{ cifmw_networking_definition.networks.tenantdcn1.vlan }}" @@ -615,6 +617,7 @@ cifmw_libvirt_manager_extra_network_configuration: - name: "vlan{{ cifmw_networking_definition.networks.tenantdcn2.vlan }}" type: vlan state: up + cifmw_firewall_zone: libvirt vlan: base-iface: cifmw-dcn2_tr id: "{{ cifmw_networking_definition.networks.tenantdcn2.vlan }}" From 6a4fdd38a0f7218fadc5c38439eae7a08df60445 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Thu, 20 Feb 2025 15:38:06 +0100 Subject: [PATCH 254/480] Enable Octavia in DCN - Enable Octavia in ACTIVE_STANDBY mode - Configure Management network on all the AZs except az0 (the control plane) JIRA: OSPRH-14215 --- .../templates/service-values.yaml.j2 | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/roles/ci_dcn_site/templates/service-values.yaml.j2 b/roles/ci_dcn_site/templates/service-values.yaml.j2 index 75a430776e..7cc750d2c8 100644 --- a/roles/ci_dcn_site/templates/service-values.yaml.j2 +++ b/roles/ci_dcn_site/templates/service-values.yaml.j2 @@ -145,6 +145,50 @@ data: network_vlan_ranges = datacentre:1:1000,leaf1:1:1000,leaf2:1:1000 [neutron] physnets = datacentre,leaf1,leaf2 + + octavia: + enabled: true + template: + amphoraImageContainerImage: quay.io/gthiemonge/octavia-amphora-image +{% if cifmw_ci_dcn_site_enable_network_az is true %} + lbMgmtNetwork: + # az0 not used for Octavia LBs + createDefaultLbMgmtNetwork: false + lbMgmtRouterGateway: 172.23.0.150 + availabilityZoneCIDRs: +{% for az in _all_azs | sort %} +{% if az != _az_to_scaledown and az != "az0" %} + {{ az }}: 172.{{ loop.index + 1 }}4.0.0/16 +{% endif %} +{% endfor %} + availabilityZones: + - az0 +{% endif %} + octaviaAPI: + networkAttachments: + - internalapi + customServiceConfig: | + [controller_worker] + loadbalancer_topology=ACTIVE_STANDBY + octaviaHousekeeping: + networkAttachments: + - octavia + customServiceConfig: | + [controller_worker] + loadbalancer_topology=ACTIVE_STANDBY + octaviaHealthManager: + networkAttachments: + - octavia + customServiceConfig: | + [controller_worker] + loadbalancer_topology=ACTIVE_STANDBY + octaviaWorker: + networkAttachments: + - octavia + customServiceConfig: | + [controller_worker] + loadbalancer_topology=ACTIVE_STANDBY + ovn: template: ovnController: @@ -159,6 +203,9 @@ data: {% else %} availability-zones: [] {% endif %} + nicMappings: + datacentre: ocpbr + octavia: octbr nova: customServiceConfig: | [DEFAULT] From 8b605234393ace25131bd566d44135b0ea785b76 Mon Sep 17 00:00:00 2001 From: Andrew Bays Date: Tue, 29 Jul 2025 12:33:13 +0000 Subject: [PATCH 255/480] [OSPRH-18411] Remove default bridged control plane network for HCI VA --- .../hci/network-values/values.yaml.j2 | 126 ++++++++++++++++++ scenarios/reproducers/va-hci.yml | 1 + 2 files changed, 127 insertions(+) create mode 100644 roles/ci_gen_kustomize_values/templates/hci/network-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/hci/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/hci/network-values/values.yaml.j2 new file mode 100644 index 0000000000..559243d8d2 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/hci/network-values/values.yaml.j2 @@ -0,0 +1,126 @@ +--- +# source: hci/network-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: +{% set ns.ocp_index = ns.ocp_index+1 %} + name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} + {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} +{% endfor %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() %} +{% set ns.lb_tools = {} %} + {{ network.network_name }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} + subnets: +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} + - allocationRanges: +{% for range in network.tools.netconfig[_ipv.ipvX_ranges] %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network[_ipv.network_vX] }} +{% if network[_ipv.gw_vX] is defined %} + gateway: {{ network[_ipv.gw_vX] }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% for lb_range in network.tools[tool][_ipv.ipvX_ranges] %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network[_ipv.network_vX] | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} + base_iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% else %} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% endif %} +{% if network.tools.multus is defined %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "macvlan", +{% if network.vlan_id is defined%} + "master": "{{ network.network_name }}", +{% else %} + "master": "{{ ns.interfaces[network.network_name] }}", +{% endif %} + "ipam": { + "type": "whereabouts", + "range": "{{ network[_ipv.network_vX] }}", + "range_start": "{{ network.tools.multus[_ipv.ipvX_ranges].0.start }}", + "range_end": "{{ network.tools.multus[_ipv.ipvX_ranges].0.end }}" + } + } +{% endif %} +{% endfor %} + + dns-resolver: + config: + server: + - "{{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }}" + search: [] + options: + - key: server + values: + - {{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }} +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + + routes: + config: [] + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: {{ cifmw_ci_gen_kustomize_values_storage_class }} diff --git a/scenarios/reproducers/va-hci.yml b/scenarios/reproducers/va-hci.yml index 5065c19fee..ae40dc1f91 100644 --- a/scenarios/reproducers/va-hci.yml +++ b/scenarios/reproducers/va-hci.yml @@ -65,6 +65,7 @@ cifmw_libvirt_manager_configuration: - ocppr - ocpbm - osp_trunk + - osp_trunk compute: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" From 8f4273b14fa4c6584c889719f60f55e92751258c Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 24 Jul 2025 10:04:44 +0200 Subject: [PATCH 256/480] Call tasks from roles if play host is same After we migrate some playbooks into roles, we can drop the convention how it was done earlier - few plays where each was done on same host and each was calling one (or few) task in role. It is not needed, we can call other tasks in role in one play, because all is done on same host. Signed-off-by: Daniel Pawlik --- deploy-edpm.yml | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/deploy-edpm.yml b/deploy-edpm.yml index ed7dd92489..51c47cf761 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -43,7 +43,7 @@ tags: - infra -- name: Run cifmw_setup infra.yml +- name: Run cifmw_setup infra, build package, container and operators, deploy EDPM hosts: "{{ cifmw_target_host | default('localhost') }}" tasks: - name: Prepare the platform @@ -55,10 +55,6 @@ tags: - infra -- name: Build package playbook - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - tasks: - name: Build package playbook ansible.builtin.import_role: name: cifmw_setup @@ -66,10 +62,6 @@ tags: - build-packages -- name: Build container playbook - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - tasks: - name: Build container playbook ansible.builtin.import_role: name: cifmw_setup @@ -77,23 +69,15 @@ tags: - build-containers -- name: Build operators playbook - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - environment: - PATH: "{{ cifmw_path }}" - tasks: - name: Build operators playbook ansible.builtin.import_role: name: cifmw_setup tasks_from: build_operators.yml tags: - build-operators + environment: + PATH: "{{ cifmw_path }}" -- name: Deploy EDPM - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - tasks: - name: Deploy EDPM ansible.builtin.import_role: name: cifmw_setup @@ -139,7 +123,7 @@ storage_mgmt_network_range: 172.20.0.0/24 ansible.builtin.import_playbook: playbooks/ceph.yml -- name: Continue HCI deploy +- name: Continue HCI deploy, deploy architecture and validate workflow hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false tasks: @@ -150,10 +134,6 @@ tags: - edpm -- name: Deploy architecture and validate workflow - hosts: "{{ cifmw_target_host | default('localhost') }}" - gather_facts: false - tasks: - name: Run pre_deploy hooks when: cifmw_architecture_scenario is defined vars: From 4d8cdf9c0d89a2bbd43d2f6e7a1f38a145b9acf9 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 25 Jul 2025 08:57:32 +0200 Subject: [PATCH 257/480] Set bool value instead of string bool value There is no need to write bool values as string. They should be written directly as bool: true, false. Example command how it was done: find ci docs hooks playbooks roles scenarios -type f -exec sed -i "s@('true')@(true)@g" {} \; find ci docs hooks playbooks roles scenarios -type f -exec sed -i "s@('false')@(false)@g" {} \; Signed-off-by: Daniel Pawlik --- ci/playbooks/e2e-prepare.yml | 2 +- ci/playbooks/kuttl/e2e-kuttl.yml | 2 +- .../meta_content_provider.yml | 2 +- deploy-edpm.yml | 6 +++--- .../cinder_multiattach_volume_type.yml | 2 +- .../control_plane_hci_pre_deploy.yml | 2 +- hooks/playbooks/control_plane_ironic.yml | 2 +- .../templates/config_ceph_backends.yaml.j2 | 4 ++-- .../validate_podified_deployment.yml | 2 +- playbooks/06-deploy-architecture.yml | 2 +- playbooks/06-deploy-edpm.yml | 20 +++++++++---------- playbooks/07-admin-setup.yml | 2 +- playbooks/08-run-tests.yml | 2 +- playbooks/09-compliance.yml | 4 ++-- playbooks/99-logs.yml | 2 +- .../multi-namespace/ns2_osp_networks.yaml | 2 +- playbooks/nfs.yml | 2 +- post-deployment.yml | 4 ++-- reproducer.yml | 2 +- roles/build_containers/tasks/main.yml | 2 +- roles/cifmw_nfs/README.md | 2 +- roles/cifmw_setup/tasks/admin_setup.yml | 2 +- roles/cifmw_setup/tasks/deploy_edpm.yml | 10 +++++----- roles/cifmw_setup/tasks/hci_deploy.yml | 2 +- roles/cifmw_setup/tasks/run_tests.yml | 4 ++-- .../templates/content-provider.yml.j2 | 4 ++-- scenarios/centos-9/edpm_ci.yml | 2 +- update-edpm.yml | 4 ++-- 28 files changed, 49 insertions(+), 49 deletions(-) diff --git a/ci/playbooks/e2e-prepare.yml b/ci/playbooks/e2e-prepare.yml index 62fedc8a08..da9309a99d 100644 --- a/ci/playbooks/e2e-prepare.yml +++ b/ci/playbooks/e2e-prepare.yml @@ -55,4 +55,4 @@ when: - zuul_change_list is defined - "'edpm-ansible' in zuul_change_list" - - registry_login_enabled | default('false') | bool + - registry_login_enabled | default(false) | bool diff --git a/ci/playbooks/kuttl/e2e-kuttl.yml b/ci/playbooks/kuttl/e2e-kuttl.yml index 20addb7061..7af2d758b6 100644 --- a/ci/playbooks/kuttl/e2e-kuttl.yml +++ b/ci/playbooks/kuttl/e2e-kuttl.yml @@ -26,7 +26,7 @@ - name: Attach default network to CRC when: - - kuttl_make_crc_attach_default_interface | default ('true') | bool + - kuttl_make_crc_attach_default_interface | default (true) | bool ansible.builtin.include_role: name: "install_yamls_makes" tasks_from: "make_crc_attach_default_interface" diff --git a/ci/playbooks/meta_content_provider/meta_content_provider.yml b/ci/playbooks/meta_content_provider/meta_content_provider.yml index 533e663eb1..b2696f0d00 100644 --- a/ci/playbooks/meta_content_provider/meta_content_provider.yml +++ b/ci/playbooks/meta_content_provider/meta_content_provider.yml @@ -43,7 +43,7 @@ - "'os-net-config' not in zuul_change_list" # Note: cifmw_build_containers_force var is used to run build_containers # role in the meta content provider irrespective of gating repo. - - _gating_repo.stat.exists or cifmw_build_containers_force | default('false') + - _gating_repo.stat.exists or cifmw_build_containers_force | default(false) block: # It is needed to install built python-tcib package on the controller - name: Populate gating repo in /etc/yum.repos.d diff --git a/deploy-edpm.yml b/deploy-edpm.yml index 51c47cf761..db604b65ba 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -94,7 +94,7 @@ nftables_path: /etc/nftables nftables_conf: /etc/sysconfig/nftables.conf when: - - cifmw_edpm_deploy_nfs | default('false') | bool + - cifmw_edpm_deploy_nfs | default(false) | bool ansible.builtin.import_role: name: cifmw_nfs @@ -108,7 +108,7 @@ ansible.builtin.meta: end_play - name: Clear ceph target hosts facts - when: cifmw_edpm_deploy_hci | default('false') | bool + when: cifmw_edpm_deploy_hci | default(false) | bool ansible.builtin.meta: clear_facts # TODO: replace this import_playbook with cifmw_ceph role @@ -116,7 +116,7 @@ vars: _deploy_ceph: >- {{ - (cifmw_edpm_deploy_hci | default('false') | bool) and + (cifmw_edpm_deploy_hci | default(false) | bool) and cifmw_architecture_scenario is undefined }} storage_network_range: 172.18.0.0/24 diff --git a/hooks/playbooks/cinder_multiattach_volume_type.yml b/hooks/playbooks/cinder_multiattach_volume_type.yml index 49245f450d..4ccd6b4729 100644 --- a/hooks/playbooks/cinder_multiattach_volume_type.yml +++ b/hooks/playbooks/cinder_multiattach_volume_type.yml @@ -26,7 +26,7 @@ # This block is needed for octavia because the Amphora image needs to be created on a multiattach volume - name: Block to configure cinder_volume_type when needed - when: configure_cinder_volume_type | default('false') | bool + when: configure_cinder_volume_type | default(false) | bool block: - name: Create tempfile ansible.builtin.tempfile: diff --git a/hooks/playbooks/control_plane_hci_pre_deploy.yml b/hooks/playbooks/control_plane_hci_pre_deploy.yml index 7bf373686e..c24493bb2c 100644 --- a/hooks/playbooks/control_plane_hci_pre_deploy.yml +++ b/hooks/playbooks/control_plane_hci_pre_deploy.yml @@ -31,5 +31,5 @@ - op: add path: /spec/swift/enabled - value: {{ cifmw_services_swift_enabled | default('false') }} + value: {{ cifmw_services_swift_enabled | default(false) }} mode: "0644" diff --git a/hooks/playbooks/control_plane_ironic.yml b/hooks/playbooks/control_plane_ironic.yml index b0faee9fb1..74153ed924 100644 --- a/hooks/playbooks/control_plane_ironic.yml +++ b/hooks/playbooks/control_plane_ironic.yml @@ -23,5 +23,5 @@ patch: |- - op: add path: /spec/ironic/enabled - value: {{ cifmw_services_ironic_enabled | default('false') }} + value: {{ cifmw_services_ironic_enabled | default(false) }} mode: "0644" diff --git a/hooks/playbooks/templates/config_ceph_backends.yaml.j2 b/hooks/playbooks/templates/config_ceph_backends.yaml.j2 index 87175505cb..a1a2fcd229 100644 --- a/hooks/playbooks/templates/config_ceph_backends.yaml.j2 +++ b/hooks/playbooks/templates/config_ceph_backends.yaml.j2 @@ -84,9 +84,9 @@ patches: - op: add path: /spec/manila/enabled - value: {{ cifmw_services_manila_enabled | default('false') }} + value: {{ cifmw_services_manila_enabled | default(false) }} -{% if cifmw_services_manila_enabled | default('false') | bool -%} +{% if cifmw_services_manila_enabled | default(false) | bool -%} {% set _manila_backends = [] -%} {% set _manila_protocols = [] -%} {% if cifmw_ceph_daemons_layout.cephfs_enabled | default(true) | bool -%} diff --git a/hooks/playbooks/validate_podified_deployment.yml b/hooks/playbooks/validate_podified_deployment.yml index 7d0f3105ba..c38f08d69d 100644 --- a/hooks/playbooks/validate_podified_deployment.yml +++ b/hooks/playbooks/validate_podified_deployment.yml @@ -17,7 +17,7 @@ - name: List compute and network resources when: - - podified_validation | default('false') | bool + - podified_validation | default(false) | bool environment: KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" PATH: "{{ cifmw_path }}" diff --git a/playbooks/06-deploy-architecture.yml b/playbooks/06-deploy-architecture.yml index 71d8edf0e8..c407144e0c 100644 --- a/playbooks/06-deploy-architecture.yml +++ b/playbooks/06-deploy-architecture.yml @@ -300,4 +300,4 @@ - name: Run validations ansible.builtin.include_role: name: validations - when: cifmw_execute_validations | default('false') | bool + when: cifmw_execute_validations | default(false) | bool diff --git a/playbooks/06-deploy-edpm.yml b/playbooks/06-deploy-edpm.yml index 75eb8c9f9b..4a36f3635b 100644 --- a/playbooks/06-deploy-edpm.yml +++ b/playbooks/06-deploy-edpm.yml @@ -63,8 +63,8 @@ - name: Create virtual baremetal and deploy EDPM when: - - cifmw_edpm_deploy_baremetal | default('false') | bool - - cifmw_deploy_edpm | default('false') | bool + - cifmw_edpm_deploy_baremetal | default(false) | bool + - cifmw_deploy_edpm | default(false) | bool ansible.builtin.import_role: name: edpm_deploy_baremetal @@ -84,8 +84,8 @@ - name: Create VMs and Deploy EDPM when: - - not cifmw_edpm_deploy_baremetal | default('false') | bool - - cifmw_deploy_edpm | default('false') | bool + - not cifmw_edpm_deploy_baremetal | default(false) | bool + - cifmw_deploy_edpm | default(false) | bool block: - name: Create and provision external computes when: @@ -96,7 +96,7 @@ tasks_from: deploy_edpm_compute.yml - name: Prepare for HCI deploy phase 1 - when: cifmw_edpm_deploy_hci | default('false') | bool + when: cifmw_edpm_deploy_hci | default(false) | bool ansible.builtin.include_role: name: hci_prepare tasks_from: phase1.yml @@ -114,7 +114,7 @@ nftables_path: /etc/nftables nftables_conf: /etc/sysconfig/nftables.conf when: - - cifmw_edpm_deploy_nfs | default('false') | bool + - cifmw_edpm_deploy_nfs | default(false) | bool ansible.builtin.import_role: name: cifmw_nfs @@ -128,14 +128,14 @@ ansible.builtin.meta: end_play - name: Clear ceph target hosts facts - when: cifmw_edpm_deploy_hci | default('false') | bool + when: cifmw_edpm_deploy_hci | default(false) | bool ansible.builtin.meta: clear_facts - name: Deploy Ceph on target nodes vars: _deploy_ceph: >- {{ - (cifmw_edpm_deploy_hci | default('false') | bool) and + (cifmw_edpm_deploy_hci | default(false) | bool) and cifmw_architecture_scenario is undefined }} storage_network_range: 172.18.0.0/24 @@ -153,7 +153,7 @@ ansible.builtin.meta: end_play - name: Create Ceph secrets and retrieve FSID info - when: cifmw_edpm_deploy_hci | default('false') | bool + when: cifmw_edpm_deploy_hci | default(false) | bool block: - name: Prepare for HCI deploy phase 2 ansible.builtin.include_role: @@ -185,4 +185,4 @@ name: validations when: - cifmw_architecture_scenario is not defined - - cifmw_execute_validations | default('false') | bool + - cifmw_execute_validations | default(false) | bool diff --git a/playbooks/07-admin-setup.yml b/playbooks/07-admin-setup.yml index 03ed112a32..82e98097f9 100644 --- a/playbooks/07-admin-setup.yml +++ b/playbooks/07-admin-setup.yml @@ -20,7 +20,7 @@ - name: Create openstack network elements ansible.builtin.import_role: name: os_net_setup - when: not cifmw_skip_os_net_setup | default('false') | bool + when: not cifmw_skip_os_net_setup | default(false) | bool - name: Run post_admin_setup hooks vars: diff --git a/playbooks/08-run-tests.yml b/playbooks/08-run-tests.yml index 4004d66299..656d72bc67 100644 --- a/playbooks/08-run-tests.yml +++ b/playbooks/08-run-tests.yml @@ -16,7 +16,7 @@ # end_play will end only current play, not the main edpm-deploy.yml - name: Early exit if no tests when: - - not cifmw_run_tests | default('false') | bool + - not cifmw_run_tests | default(false) | bool ansible.builtin.meta: end_play - name: "Run tests" diff --git a/playbooks/09-compliance.yml b/playbooks/09-compliance.yml index 2cb51525e4..6e9f9dc013 100644 --- a/playbooks/09-compliance.yml +++ b/playbooks/09-compliance.yml @@ -13,7 +13,7 @@ vars: cifmw_compliance_podman_username: "{{ cifmw_registry_token.credentials.username }}" cifmw_compliance_podman_password: "{{ cifmw_registry_token.credentials.password }}" - when: cifmw_run_operators_compliance_scans | default('false') | bool + when: cifmw_run_operators_compliance_scans | default(false) | bool - name: Run compliance scan for computes hosts: "{{ groups['computes'] | default ([]) }}" @@ -24,4 +24,4 @@ name: compliance tasks_from: run_compute_node_scans.yml run_once: true - when: cifmw_run_compute_compliance_scans | default('false') | bool + when: cifmw_run_compute_compliance_scans | default(false) | bool diff --git a/playbooks/99-logs.yml b/playbooks/99-logs.yml index d6ebe76532..8f38935622 100644 --- a/playbooks/99-logs.yml +++ b/playbooks/99-logs.yml @@ -5,7 +5,7 @@ # end_play will end only current play, not the main edpm-deploy.yml - name: Early exit if no tests when: - - zuul_log_collection | default('false') | bool + - zuul_log_collection | default(false) | bool ansible.builtin.meta: end_play - name: Ensure cifmw_basedir param is set diff --git a/playbooks/multi-namespace/ns2_osp_networks.yaml b/playbooks/multi-namespace/ns2_osp_networks.yaml index e006a0d8db..cb8f9408e3 100644 --- a/playbooks/multi-namespace/ns2_osp_networks.yaml +++ b/playbooks/multi-namespace/ns2_osp_networks.yaml @@ -37,4 +37,4 @@ is_shared: true ansible.builtin.import_role: name: os_net_setup - when: not cifmw_skip_os_net_setup | default('false') | bool + when: not cifmw_skip_os_net_setup | default(false) | bool diff --git a/playbooks/nfs.yml b/playbooks/nfs.yml index 3789b0a763..34dbc62524 100644 --- a/playbooks/nfs.yml +++ b/playbooks/nfs.yml @@ -25,7 +25,7 @@ pre_tasks: - name: End play early if no NFS is needed when: - - not cifmw_edpm_deploy_nfs | default('false') | bool + - not cifmw_edpm_deploy_nfs | default(false) | bool ansible.builtin.meta: end_play vars: nftables_path: /etc/nftables diff --git a/post-deployment.yml b/post-deployment.yml index cf82ad98fe..c15cd52b8b 100644 --- a/post-deployment.yml +++ b/post-deployment.yml @@ -25,7 +25,7 @@ vars: cifmw_compliance_podman_username: "{{ cifmw_registry_token.credentials.username }}" cifmw_compliance_podman_password: "{{ cifmw_registry_token.credentials.password }}" - when: cifmw_run_operators_compliance_scans | default('false') | bool + when: cifmw_run_operators_compliance_scans | default(false) | bool tags: - compliance @@ -38,7 +38,7 @@ name: compliance tasks_from: run_compute_node_scans.yml run_once: true - when: cifmw_run_compute_compliance_scans | default('false') | bool + when: cifmw_run_compute_compliance_scans | default(false) | bool tags: - compliance diff --git a/reproducer.yml b/reproducer.yml index e308ba7261..54b9c0f476 100644 --- a/reproducer.yml +++ b/reproducer.yml @@ -77,7 +77,7 @@ post_tasks: - name: Allow traffic from OSP VMs to OSP API (needed for shiftstack) become: true - when: cifmw_allow_vms_to_reach_osp_api | default ('false') | bool + when: cifmw_allow_vms_to_reach_osp_api | default (false) | bool block: - name: Allow traffic from OSP VMs to OSP API for ipv4 ansible.builtin.command: # noqa: command-instead-of-module diff --git a/roles/build_containers/tasks/main.yml b/roles/build_containers/tasks/main.yml index a361aa5a30..f308f68e46 100644 --- a/roles/build_containers/tasks/main.yml +++ b/roles/build_containers/tasks/main.yml @@ -87,7 +87,7 @@ buildah push {{ item }}:{{ cifmw_build_containers_image_tag }} loop: "{{ built_images.stdout_lines }}" when: - - cifmw_build_containers_buildah_push | default ('false') | bool + - cifmw_build_containers_buildah_push | default (false) | bool - not cifmw_build_containers_push_containers | bool - name: "Retag each image and push to registry: {{ item }}" diff --git a/roles/cifmw_nfs/README.md b/roles/cifmw_nfs/README.md index 5e23f0d6bb..f92fa7f63f 100644 --- a/roles/cifmw_nfs/README.md +++ b/roles/cifmw_nfs/README.md @@ -17,7 +17,7 @@ sudo privilege is required for this role. nftables_path: /etc/nftables nftables_conf: /etc/sysconfig/nftables.conf when: - - cifmw_edpm_deploy_nfs | default('false') | bool + - cifmw_edpm_deploy_nfs | default(false) | bool ansible.builtin.import_role: name: cifmw_nfs ``` diff --git a/roles/cifmw_setup/tasks/admin_setup.yml b/roles/cifmw_setup/tasks/admin_setup.yml index 53222c311a..76c43da5d7 100644 --- a/roles/cifmw_setup/tasks/admin_setup.yml +++ b/roles/cifmw_setup/tasks/admin_setup.yml @@ -12,7 +12,7 @@ - name: Create openstack network elements ansible.builtin.import_role: name: os_net_setup - when: not cifmw_skip_os_net_setup | default('false') | bool + when: not cifmw_skip_os_net_setup | default(false) | bool - name: Run post_admin_setup hooks vars: diff --git a/roles/cifmw_setup/tasks/deploy_edpm.yml b/roles/cifmw_setup/tasks/deploy_edpm.yml index 34de902f43..7e87fef091 100644 --- a/roles/cifmw_setup/tasks/deploy_edpm.yml +++ b/roles/cifmw_setup/tasks/deploy_edpm.yml @@ -40,8 +40,8 @@ - name: Create virtual baremetal and deploy EDPM when: - - cifmw_edpm_deploy_baremetal | default('false') | bool - - cifmw_deploy_edpm | default('false') | bool + - cifmw_edpm_deploy_baremetal | default(false) | bool + - cifmw_deploy_edpm | default(false) | bool ansible.builtin.import_role: name: edpm_deploy_baremetal @@ -51,8 +51,8 @@ - name: Create VMs and Deploy EDPM when: - - not cifmw_edpm_deploy_baremetal | default('false') | bool - - cifmw_deploy_edpm | default('false') | bool + - not cifmw_edpm_deploy_baremetal | default(false) | bool + - cifmw_deploy_edpm | default(false) | bool block: - name: Create and provision external computes when: @@ -63,7 +63,7 @@ tasks_from: deploy_edpm_compute.yml - name: Prepare for HCI deploy phase 1 - when: cifmw_edpm_deploy_hci | default('false') | bool + when: cifmw_edpm_deploy_hci | default(false) | bool ansible.builtin.include_role: name: hci_prepare tasks_from: phase1.yml diff --git a/roles/cifmw_setup/tasks/hci_deploy.yml b/roles/cifmw_setup/tasks/hci_deploy.yml index 0279f5587a..4fb752d43e 100644 --- a/roles/cifmw_setup/tasks/hci_deploy.yml +++ b/roles/cifmw_setup/tasks/hci_deploy.yml @@ -3,7 +3,7 @@ when: cifmw_architecture_scenario is not defined block: - name: Create Ceph secrets and retrieve FSID info - when: cifmw_edpm_deploy_hci | default('false') | bool + when: cifmw_edpm_deploy_hci | default(false) | bool block: - name: Prepare for HCI deploy phase 2 ansible.builtin.include_role: diff --git a/roles/cifmw_setup/tasks/run_tests.yml b/roles/cifmw_setup/tasks/run_tests.yml index 2a19bf3fc0..64d002673c 100644 --- a/roles/cifmw_setup/tasks/run_tests.yml +++ b/roles/cifmw_setup/tasks/run_tests.yml @@ -10,11 +10,11 @@ - tests ansible.builtin.import_role: name: "{{ cifmw_run_test_role | default('tempest') }}" - when: cifmw_run_tests | default('false') | bool + when: cifmw_run_tests | default(false) | bool - name: Run post_tests hooks vars: step: post_tests ansible.builtin.import_role: name: run_hook - when: cifmw_run_tests | default('false') | bool + when: cifmw_run_tests | default(false) | bool diff --git a/roles/reproducer/templates/content-provider.yml.j2 b/roles/reproducer/templates/content-provider.yml.j2 index 8c94957e15..713754959b 100644 --- a/roles/reproducer/templates/content-provider.yml.j2 +++ b/roles/reproducer/templates/content-provider.yml.j2 @@ -47,7 +47,7 @@ job_id: "{{ job_id }}" _cifmw_reproducer_framework_location: "{{ _cifmw_reproducer_framework_location }}" tasks: -{% if operator_content_provider | default('false') | bool %} +{% if operator_content_provider | default(false) | bool %} {% raw %} - name: Load env variables ansible.builtin.include_vars: @@ -108,7 +108,7 @@ }} {% endraw %} {% endif %} -{% if openstack_content_provider | default('false') | bool %} +{% if openstack_content_provider | default(false) | bool %} {% raw %} - name: Run tcib playbook environment: diff --git a/scenarios/centos-9/edpm_ci.yml b/scenarios/centos-9/edpm_ci.yml index d200e65d35..f803557f6a 100644 --- a/scenarios/centos-9/edpm_ci.yml +++ b/scenarios/centos-9/edpm_ci.yml @@ -40,7 +40,7 @@ post_ctlplane_deploy: type: playbook source: validate_podified_deployment.yml extra_vars: - podified_validation: "{{ podified_validation | default ('false') | bool }}" + podified_validation: "{{ podified_validation | default (false) | bool }}" cifmw_openshift_kubeconfig: "{{ cifmw_openshift_kubeconfig }}" cifmw_path: "{{ cifmw_path }}" openstack_namespace: "{{ cifmw_install_yamls_defaults['NAMESPACE'] }}" diff --git a/update-edpm.yml b/update-edpm.yml index cad55172d2..573e27f446 100644 --- a/update-edpm.yml +++ b/update-edpm.yml @@ -16,7 +16,7 @@ - name: Import update related playbook ansible.builtin.import_playbook: playbooks/update.yml - when: cifmw_run_update | default('false') | bool + when: cifmw_run_update | default(false) | bool tags: - update @@ -44,6 +44,6 @@ - name: Run log related tasks ansible.builtin.import_playbook: playbooks/99-logs.yml - when: not zuul_log_collection | default('false') | bool + when: not zuul_log_collection | default(false) | bool tags: - logs From 45bbd8df1ba5c4b99a89ab8e36d1a95ee5898083 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Mon, 11 Aug 2025 10:30:57 +0200 Subject: [PATCH 258/480] Support podified-ci-testing-tcib repo in ebi role This DRLN tag would be used just for building the containers, so the `podified-ci-testing` then will be only updated after we ensure we can properly build the containers. The reason for this is that we have a lot of jobs that expect to find container tag for `podified-ci-testing` already in repository, while the containers may never be pushed due to tcib issues. The intermediate tag would allow us to bypass this without redesigning all existing jobs. --- roles/edpm_build_images/tasks/post.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/edpm_build_images/tasks/post.yaml b/roles/edpm_build_images/tasks/post.yaml index 8c958531d8..c4066986e3 100644 --- a/roles/edpm_build_images/tasks/post.yaml +++ b/roles/edpm_build_images/tasks/post.yaml @@ -12,7 +12,8 @@ - ironic-python-agent - name: Retag and push the images with podified-ci-testing tag - when: cifmw_repo_setup_promotion == "podified-ci-testing" + when: cifmw_repo_setup_promotion + in ("podified-ci-testing", "podified-ci-testing-tcib") block: - name: Retag the images with podified-ci-testing tag containers.podman.podman_tag: From 12a1914d0ac46227a06c8fcf1c9135e85114677c Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Thu, 7 Aug 2025 13:54:56 +0200 Subject: [PATCH 259/480] Add option to update Watcher containers in OpenStackVersion Watcher is now integrated into the openstack-controlplane [1]. This patch is adding a new option to the update_containers role to enable updating the watcher containers in the OpenStackVersion based on the same parameters as the rest of OpenStack operators. I am adding them with a different parameter cifmw_update_containers_watcher for two reason: - In case ci-framework is also used in environments where watcher is still not integrated. - In watcher-operator pipelines there are some jobs where it is desired to update only the watcher containers while keeping the rest by default Signed-off-by: Alfredo Moralejo --- docs/dictionary/en-custom.txt | 1 + roles/edpm_prepare/tasks/kustomize_and_deploy.yml | 4 +++- roles/update_containers/README.md | 3 ++- roles/update_containers/defaults/main.yml | 1 + roles/update_containers/templates/update_containers.j2 | 5 +++++ 5 files changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index d308eed2c0..e52ac936e8 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -409,6 +409,7 @@ openstackdataplanenodeset openstackdataplanenodesets openstackprovisioner openstacksdk +openstackversion operatorgroup opn orchestrator diff --git a/roles/edpm_prepare/tasks/kustomize_and_deploy.yml b/roles/edpm_prepare/tasks/kustomize_and_deploy.yml index fc4d24fb71..335647dbf6 100644 --- a/roles/edpm_prepare/tasks/kustomize_and_deploy.yml +++ b/roles/edpm_prepare/tasks/kustomize_and_deploy.yml @@ -33,7 +33,9 @@ (cifmw_update_containers_edpm_image_url is defined) or (cifmw_update_containers_ansibleee_image_url is defined) or ((cifmw_update_containers_openstack is defined and - cifmw_update_containers_openstack | bool)) + cifmw_update_containers_openstack | bool)) or + ((cifmw_update_containers_watcher is defined and + cifmw_update_containers_watcher | bool)) vars: cifmw_update_containers_metadata: "{{ _ctlplane_name }}" ansible.builtin.include_role: diff --git a/roles/update_containers/README.md b/roles/update_containers/README.md index 0df26d7fe2..99634e8124 100644 --- a/roles/update_containers/README.md +++ b/roles/update_containers/README.md @@ -26,7 +26,8 @@ If apply, please explain the privilege escalation done in this role. * `cifmw_update_containers_edpm_image_url`: Full EDPM Image url for updating EDPM OS image. * `cifmw_update_containers_ipa_image_url`: Full Ironic Python Agent url needed in Ironic specific podified deployment * `cifmw_update_containers_rollback`: Rollback the container update changes. Default to `false`. It will be used with cleanup. -* `cifmw_update_containers_barbican_custom_tag: Custom tag for barbican API and worker images. Used for HSM deployments. +* `cifmw_update_containers_barbican_custom_tag`: Custom tag for barbican API and worker images. Used for HSM deployments. +* `cifmw_update_containers_watcher`: Whether to update the Watcher service containers in the openstackversion. Default to `false`. ## Examples ### 1 - Update OpenStack container diff --git a/roles/update_containers/defaults/main.yml b/roles/update_containers/defaults/main.yml index e7eeccefde..90aa83d19b 100644 --- a/roles/update_containers/defaults/main.yml +++ b/roles/update_containers/defaults/main.yml @@ -44,6 +44,7 @@ cifmw_update_containers_cindervolumes: - default cifmw_update_containers_manilashares: - default +cifmw_update_containers_watcher: false # cifmw_update_containers_ansibleee_image_url: # cifmw_update_containers_edpm_image_url: # cifmw_update_containers_ipa_image_url: diff --git a/roles/update_containers/templates/update_containers.j2 b/roles/update_containers/templates/update_containers.j2 index 1b73aa774c..6d0fb6a099 100644 --- a/roles/update_containers/templates/update_containers.j2 +++ b/roles/update_containers/templates/update_containers.j2 @@ -108,3 +108,8 @@ spec: {% if cifmw_update_containers_agentimage is defined %} agentImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-baremetal-operator-agent:{{ cifmw_update_containers_tag }} {% endif %} +{% if cifmw_update_containers_watcher | bool %} + watcherAPIImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-watcher-api:{{ cifmw_update_containers_tag }} + watcherApplierImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-watcher-applier:{{ cifmw_update_containers_tag }} + watcherDecisionEngineImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-watcher-decision-engine:{{ cifmw_update_containers_tag }} +{% endif %} From 48a8dddbd9fff09bb8b64e5e1bb596c0294dd819 Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Mon, 21 Jul 2025 15:45:45 +0200 Subject: [PATCH 260/480] Allow start infra VMs early Override no start for VMs layout except those explicitly configured to start early. Signed-off-by: Bohdan Dobrelia --- create-infra.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/create-infra.yml b/create-infra.yml index 6aac66b364..1ab3524866 100644 --- a/create-infra.yml +++ b/create-infra.yml @@ -31,17 +31,20 @@ hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true tasks: - # Inject "start: false" in the layout to not start any VM yet. + # Inject "start: false" in the layout to not start any VM yet, + # except vms explicitly configured to start early. # Starting the VM will be done later, either by the tool deploying # OSP, or the one deploy RHOSO. # VM initial configuration, when managed, is done using cloud-init. - name: Ensure no VM is started when we create them during this run vars: _no_start: >- - {% set _vms = {} -%} - {% for _type in _cifmw_libvirt_manager_layout.vms.keys() -%} - {% set _ = _vms.update({_type: {'start': false}}) -%} - {% endfor -%} + {% set _vms = {} -%} + {% for _type in _cifmw_libvirt_manager_layout.vms.keys() -%} + {% if not (_cifmw_libvirt_manager_layout.vms[_type]["start"] | default(false)) -%} + {% set _ = _vms.update({_type: {'start': false}}) -%} + {% endif -%} + {% endfor -%} {{ _vms }} ansible.builtin.set_fact: _cifmw_libvirt_manager_layout: >- From a3d29154c8918a9e316952bceb5bfaefea04637d Mon Sep 17 00:00:00 2001 From: rabi Date: Wed, 30 Jul 2025 06:55:26 +0530 Subject: [PATCH 261/480] Fix ansible 2.19 jinja incompatibility Signed-off-by: rabi --- hooks/playbooks/fetch_compute_facts.yml | 2 +- .../ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/playbooks/fetch_compute_facts.yml b/hooks/playbooks/fetch_compute_facts.yml index 8be29fdf61..de5224bd02 100644 --- a/hooks/playbooks/fetch_compute_facts.yml +++ b/hooks/playbooks/fetch_compute_facts.yml @@ -211,7 +211,7 @@ --- {% set mtu_list = [ctlplane_mtu] %} {% for network in nodeset_networks %} - {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {% set _ = mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) %} {%- endfor %} {% set min_viable_mtu = mtu_list | max %} network_config: diff --git a/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 b/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 index 90dc6a16b6..70ff62e84d 100644 --- a/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 +++ b/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 @@ -40,7 +40,7 @@ data: --- {% set mtu_list = [ctlplane_mtu] %} {% for network in nodeset_networks %} - {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {% set _ = mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) %} {%- endfor %} {% set min_viable_mtu = mtu_list | max %} network_config: From 5179a3b68004e3097b8d1b605fea2ee181a9a57e Mon Sep 17 00:00:00 2001 From: Roman Safronov Date: Mon, 28 Jul 2025 14:19:21 +0300 Subject: [PATCH 262/480] Add registry.k8s.io to allowed registries Openstack operator needs access to registry.k8s.io for downloading kube-state-metrics image [1]. [1] https://github.com/openstack-k8s-operators/openstack-operator/blob/main/config/operator/default_images.yaml#L47 --- roles/openshift_setup/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/openshift_setup/tasks/main.yml b/roles/openshift_setup/tasks/main.yml index 38315d3aa6..59e52ffde9 100644 --- a/roles/openshift_setup/tasks/main.yml +++ b/roles/openshift_setup/tasks/main.yml @@ -174,6 +174,7 @@ default_allowed_registries: - "quay.io" - "gcr.io" + - "registry.k8s.io" - "registry.redhat.io" - "registry-proxy.engineering.redhat.com" - "images.paas.redhat.com" From 28affd161d703c9773117fb792cc72221962c89e Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Fri, 25 Jul 2025 13:20:24 +0200 Subject: [PATCH 263/480] Change reproducer ANSIBLE_LOG_PATH With this we'll able to retrieve that log file --- roles/reproducer/tasks/configure_architecture.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/reproducer/tasks/configure_architecture.yml b/roles/reproducer/tasks/configure_architecture.yml index 29e1c5d377..3eb0e70d03 100644 --- a/roles/reproducer/tasks/configure_architecture.yml +++ b/roles/reproducer/tasks/configure_architecture.yml @@ -6,7 +6,7 @@ vars: run_directory: "{{ _cifmw_reproducer_framework_location }}" exports: - ANSIBLE_LOG_PATH: "~/ansible-deploy-architecture.log" + ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/ci-framework-data/logs/ansible-deploy-architecture.log" default_extravars: - "@~/ci-framework-data/parameters/reproducer-variables.yml" - "@~/ci-framework-data/parameters/openshift-environment.yml" From 495e2a94b4455e0b220414fdaf142c5b843587ac Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Thu, 24 Jul 2025 16:20:52 +0530 Subject: [PATCH 264/480] Add README for cifmw_setup role Good to have a brief doc explaining the purpose of the role --- roles/cifmw_setup/README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 roles/cifmw_setup/README.md diff --git a/roles/cifmw_setup/README.md b/roles/cifmw_setup/README.md new file mode 100644 index 0000000000..5af39b1b27 --- /dev/null +++ b/roles/cifmw_setup/README.md @@ -0,0 +1,18 @@ +# cifmw_setup + +Generic role to contain various cifmw setup-related tasks. + +**NOTE:** Refrain from adding tasks that could have their own dedicated role. + +## Example + +Since this role does not contain `main.yml`, you must use `tasks_from` to select the specific task you want to run. + +```YAML +- name: Run cifmw_setup admin_setup.yml + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: admin_setup.yml + tags: + - admin-setup +``` From dcc938838db9899d8265a2edc9ab109c8d580272 Mon Sep 17 00:00:00 2001 From: Andrew Bays Date: Mon, 11 Aug 2025 16:13:33 +0000 Subject: [PATCH 265/480] [OSPCIX-1017] Add custom network-values template for HCI adoption scenario --- roles/ci_gen_kustomize_values/templates/hci-adoption | 1 + 1 file changed, 1 insertion(+) create mode 120000 roles/ci_gen_kustomize_values/templates/hci-adoption diff --git a/roles/ci_gen_kustomize_values/templates/hci-adoption b/roles/ci_gen_kustomize_values/templates/hci-adoption new file mode 120000 index 0000000000..eea3880a05 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/hci-adoption @@ -0,0 +1 @@ +hci \ No newline at end of file From 248d4c4dcd248b687a02a887fc3675fa5af897ea Mon Sep 17 00:00:00 2001 From: John Fulton Date: Wed, 6 Aug 2025 18:33:13 -0400 Subject: [PATCH 266/480] Do not fail if secret is not on remote host Add ignore_errors=true to task which checks if the remote secret is on the remote host so that the play will not fail if `stat` gets permission denied. Default the remote_src to false if the previous task does not set _ps_exists as expected. This is a follow to PR 3116, which helps to run reproducer playbook locally and not on remote host directly. Jira: https://issues.redhat.com/browse/OSPRH-18840 Signed-off-by: John Fulton --- roles/manage_secrets/tasks/_push_secret.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/manage_secrets/tasks/_push_secret.yml b/roles/manage_secrets/tasks/_push_secret.yml index 453811358b..7c339ad250 100644 --- a/roles/manage_secrets/tasks/_push_secret.yml +++ b/roles/manage_secrets/tasks/_push_secret.yml @@ -42,10 +42,11 @@ ansible.builtin.stat: path: "{{ _secret_file }}" register: _ps_exists + ignore_errors: true - name: Copy file to location ansible.builtin.copy: - remote_src: "{{ _ps_exists.stat.exists }}" + remote_src: "{{ _ps_exists.stat.exists | default(false) }}" dest: "{{ _secret_dest }}" src: "{{ _secret_file }}" mode: "0600" From ef0eaf10327658ac6460e13c653e28d37d1363b2 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Thu, 7 Aug 2025 08:58:22 -0400 Subject: [PATCH 267/480] Document test-operator key file parameter Recently, there was a problem with test-operator failing and the debugging exposed that it was because test-operator searches for the cifmw private key locally. Which means if it's being run on a remote host, it fails. This patch adds a warning in form of documentation to help users that might encounter this issue in the future. --- roles/test_operator/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 81c2ddbb7f..1bd9ba9351 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -6,6 +6,7 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ * `cifmw_test_operator_artifacts_basedir`: (String) Directory where we will have all test-operator related files. Default value: `{{ cifmw_basedir }}/tests/test_operator` which defaults to `~/ci-framework-data/tests/test_operator` * `cifmw_test_operator_namespace`: (String) Namespace inside which all the resources are created. Default value: `openstack` * `cifmw_test_operator_controller_namespace`: (String) Namespace inside which the test-operator-controller-manager is created. Default value: `openstack-operators` +* `cifmw_test_operator_controller_priv_key_file_path`: (String) Specifies the path to the CIFMW private key file. Note: Please ensure this file is available in the environment where the ci-framework test-operator role is executed. Default value: `~/.ssh/id_cifw` * `cifmw_test_operator_bundle`: (String) Full name of container image with bundle that contains the test-operator. Default value: `""` * `cifmw_test_operator_version`: (String) The commit hash corresponding to the version of test-operator the user wants to use. This parameter is only used when `cifmw_test_operator_bundle` is also set. * `cifmw_test_operator_timeout`: (Integer) Timeout in seconds for the execution of the tests. Default value: `3600` From 9fc08b2d93a48738e6cf02c95ee2071e013c2198 Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Thu, 17 Jul 2025 13:02:42 +0200 Subject: [PATCH 268/480] Swap wait order for pod and deployment at openshift_obs role This patch is oriented to fix a race condition where sometimes observability-operator-pod raise: "AttributeError: ''NoneType'' object has no attribute ''status'''" Definetely waiting first for the deployment and then for the pod is more correct and might help in this situation, despite the fact that this is a rare race condition. --- roles/openshift_obs/tasks/main.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/roles/openshift_obs/tasks/main.yml b/roles/openshift_obs/tasks/main.yml index 86688b90e4..40e2b67b1c 100644 --- a/roles/openshift_obs/tasks/main.yml +++ b/roles/openshift_obs/tasks/main.yml @@ -20,27 +20,27 @@ kubeconfig: "{{ cifmw_openshift_kubeconfig }}" state: present -- name: Wait for observability-operator pod +- name: Wait for observability operator deployment kubernetes.core.k8s_info: - kind: Pod + kind: Deployment namespace: openshift-operators - label_selectors: - - app.kubernetes.io/name = observability-operator + name: observability-operator wait: true wait_timeout: 300 wait_condition: - type: Ready + type: Available status: "True" kubeconfig: "{{ cifmw_openshift_kubeconfig }}" -- name: Wait for observability operator deployment +- name: Wait for observability-operator pod kubernetes.core.k8s_info: - kind: Deployment + kind: Pod namespace: openshift-operators - name: observability-operator + label_selectors: + - app.kubernetes.io/name = observability-operator wait: true wait_timeout: 300 wait_condition: - type: Available + type: Ready status: "True" kubeconfig: "{{ cifmw_openshift_kubeconfig }}" From cddba525607e728620c021f067fa8041fd9c0ed0 Mon Sep 17 00:00:00 2001 From: lkuchlan Date: Sun, 27 Jul 2025 15:12:26 +0300 Subject: [PATCH 269/480] Add generic hooks for RBD mirror replication and Cinder replication configuration This commit introduces hook playbooks to enable RBD mirror replication between Ceph clusters and configure Cinder volume replication using configurable variables for multi-site deployment scenarios. The playbooks: - enable_rbd_mirror_replication.yml: Sets up RBD mirroring infrastructure - apply_cinder_replication_kustomization.yml: Configures and applies Cinder replication Key features: - Variable-driven configuration (no hardcoded values) - Creates and exchanges bootstrap tokens between configurable clusters - Dynamically appends replication settings to existing Cinder backend config - Direct Kubernetes API application for reliable configuration deployment - Handles cephadm container filesystem mapping correctly - Includes proper validation and error handling - Single-hook workflow eliminating file dependency issues The hooks integrate into post-deployment workflows with scenario-specific variable files to enable cross-site Ceph replication for disaster recovery. --- ...apply_cinder_replication_kustomization.yml | 73 ++++++++ .../enable_rbd_mirror_replication.yml | 160 ++++++++++++++++++ 2 files changed, 233 insertions(+) create mode 100644 hooks/playbooks/apply_cinder_replication_kustomization.yml create mode 100644 hooks/playbooks/enable_rbd_mirror_replication.yml diff --git a/hooks/playbooks/apply_cinder_replication_kustomization.yml b/hooks/playbooks/apply_cinder_replication_kustomization.yml new file mode 100644 index 0000000000..8626e0b932 --- /dev/null +++ b/hooks/playbooks/apply_cinder_replication_kustomization.yml @@ -0,0 +1,73 @@ +--- +- name: Configure and Apply Cinder Replication + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Find all ceph variable files + register: _ceph_vars_files + ansible.builtin.find: + paths: "/tmp" + patterns: "{{ cifmw_ceph_client_pattern | default('ceph_client_az*.yml') }}" + recurse: false + + - name: Load all ceph vars from files + loop: "{{ _ceph_vars_files.files | map(attribute='path') | list }}" + register: _ceph_vars + ansible.builtin.include_vars: + file: "{{ item }}" + + - name: Combine ceph variables into a list of dictionaries + loop: "{{ _ceph_vars.results }}" + ansible.builtin.set_fact: + _ceph_vars_list: "{{ _ceph_vars_list | default([]) | union([item.ansible_facts]) }}" + + - name: Get FSID for secondary cluster + ansible.builtin.set_fact: + secondary_fsid: "{{ _ceph_vars_list | selectattr('cifmw_ceph_client_cluster', 'equalto', cifmw_replication_secondary_cluster) | map(attribute='cifmw_ceph_client_fsid') | first }}" + + - name: Get current OpenStackControlPlane configuration + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: oc get openstackcontrolplane -n openstack -o yaml + register: current_controlplane_config + + - name: Parse existing Cinder backend configuration + ansible.builtin.set_fact: + existing_backend_config: "{{ current_controlplane_config.stdout | from_yaml | json_query('items[0].spec.cinder.template.cinderVolumes.' + cifmw_replication_primary_backend + '.customServiceConfig') }}" + + - name: Add replication device line to existing configuration + ansible.builtin.set_fact: + updated_backend_config: | + {{ existing_backend_config | trim }} + replication_device = backend_id:{{ cifmw_replication_backend_id | default('replication') }},conf:/etc/ceph/{{ cifmw_replication_secondary_cluster }}.conf,user:{{ cifmw_replication_ceph_user | default('openstack') }},secret_uuid:{{ secondary_fsid }} + + - name: Build cinder volumes configuration directly + ansible.builtin.set_fact: + cinder_volumes_dict: "{{ cinder_volumes_dict | default({}) | combine({cifmw_replication_primary_backend: {'customServiceConfig': updated_backend_config}}) }}" + + - name: Apply replication configuration directly + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + kubernetes.core.k8s: + state: present + definition: + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: controlplane + namespace: openstack + spec: + cinder: + template: + cinderVolumes: "{{ cinder_volumes_dict }}" + merge_type: merge + + - name: Wait for OpenStackControlPlane to reconcile + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: oc wait --for=condition=Ready openstackcontrolplane/controlplane -n openstack --timeout=1200s diff --git a/hooks/playbooks/enable_rbd_mirror_replication.yml b/hooks/playbooks/enable_rbd_mirror_replication.yml new file mode 100644 index 0000000000..7a94941769 --- /dev/null +++ b/hooks/playbooks/enable_rbd_mirror_replication.yml @@ -0,0 +1,160 @@ +--- +- name: Add Ceph replication target hosts to one group + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: false + tasks: + # Load Ceph client variables to get FSIDs for each cluster + - name: Find all ceph variable files + register: _ceph_vars_files + ansible.builtin.find: + paths: "/tmp" + patterns: "{{ cifmw_ceph_client_pattern | default('ceph_client_az*.yml') }}" + recurse: false + + - name: Load all ceph vars from files + loop: "{{ _ceph_vars_files.files | map(attribute='path') | list }}" + register: _ceph_vars + ansible.builtin.include_vars: + file: "{{ item }}" + + - name: Combine ceph variables into a list of dictionaries + loop: "{{ _ceph_vars.results }}" + ansible.builtin.set_fact: + _ceph_vars_list: "{{ _ceph_vars_list | default([]) | union([item.ansible_facts]) }}" + + - name: Get FSID for primary cluster + ansible.builtin.set_fact: + primary_fsid: "{{ _ceph_vars_list | selectattr('cifmw_ceph_client_cluster', 'equalto', cifmw_replication_primary_cluster) | map(attribute='cifmw_ceph_client_fsid') | first }}" + + - name: Get FSID for secondary cluster + ansible.builtin.set_fact: + secondary_fsid: "{{ _ceph_vars_list | selectattr('cifmw_ceph_client_cluster', 'equalto', cifmw_replication_secondary_cluster) | map(attribute='cifmw_ceph_client_fsid') | first }}" + + - name: Add primary host to ceph_replication_targets + ansible.builtin.add_host: + name: "{{ groups[cifmw_replication_primary_group] | first }}" + groups: ceph_replication_targets + site_role: primary + ceph_fsid: "{{ primary_fsid }}" + + - name: Add secondary host to ceph_replication_targets + ansible.builtin.add_host: + name: "{{ groups[cifmw_replication_secondary_group] | first }}" + groups: ceph_replication_targets + site_role: secondary + ceph_fsid: "{{ secondary_fsid }}" + +- name: Enable mirroring and setup peer between clusters + hosts: ceph_replication_targets + become: true + vars: + # Host filesystem paths (what Ansible sees) + bootstrap_token_path_host: /tmp/bootstrap_token_site + token_tmp_path: /tmp/rbd_mirror_bootstrap_token + # Container filesystem paths (what cephadm container sees) + bootstrap_token_path_container: /rootfs/tmp/bootstrap_token_site + # Configurable pool name + replication_pool: "{{ cifmw_replication_pool | default('volumes') }}" + tasks: + # Add validation that cephadm is available + - name: Verify cephadm is available + ansible.builtin.command: + cmd: which cephadm + register: cephadm_check + failed_when: false + changed_when: false + + - name: Fail if cephadm not found + ansible.builtin.fail: + msg: "cephadm command not found on {{ inventory_hostname }}" + when: cephadm_check.rc != 0 + + - name: Enable image mirroring + ansible.builtin.command: + cmd: cephadm shell -- rbd mirror pool enable {{ replication_pool }} image + register: enable_mirror_result + failed_when: enable_mirror_result.rc != 0 + changed_when: "'already enabled' not in enable_mirror_result.stderr" + + - name: Create bootstrap token (only on primary) + ansible.builtin.shell: + cmd: cephadm shell -- sh -c "rbd mirror pool peer bootstrap create --site-name {{ ceph_fsid }} {{ replication_pool }}" > {{ bootstrap_token_path_host }} + when: site_role == "primary" + register: create_token_result + + - name: Verify token file was created on primary + ansible.builtin.stat: + path: "{{ bootstrap_token_path_host }}" + register: token_file_stat + when: site_role == "primary" + + - name: Fail if token creation failed + ansible.builtin.fail: + msg: "Bootstrap token file was not created successfully" + when: + - site_role == "primary" + - not token_file_stat.stat.exists + + - name: Fetch token from primary + ansible.builtin.fetch: + src: "{{ bootstrap_token_path_host }}" + dest: "{{ token_tmp_path }}" + flat: true + when: site_role == "primary" + + - name: Verify token file exists on controller (debug) + ansible.builtin.stat: + path: "{{ token_tmp_path }}" + register: controller_token_stat + delegate_to: localhost + when: site_role == "secondary" + + - name: Fail if token not available on controller + ansible.builtin.fail: + msg: "Bootstrap token file not found on controller at {{ token_tmp_path }}" + when: + - site_role == "secondary" + - not controller_token_stat.stat.exists + + - name: Copy token to secondary + ansible.builtin.copy: + src: "{{ token_tmp_path }}" + dest: "{{ bootstrap_token_path_host }}" + mode: '0600' + owner: root + group: root + when: site_role == "secondary" + + - name: Verify token file was copied to secondary + ansible.builtin.stat: + path: "{{ bootstrap_token_path_host }}" + register: secondary_token_stat + when: site_role == "secondary" + + - name: Fail if token copy failed + ansible.builtin.fail: + msg: "Bootstrap token file was not copied to secondary at {{ bootstrap_token_path_host }}" + when: + - site_role == "secondary" + - not secondary_token_stat.stat.exists + + - name: Import token (only on secondary) - using container path + ansible.builtin.command: + cmd: cephadm shell -- rbd mirror pool peer bootstrap import --site-name {{ ceph_fsid }} {{ replication_pool }} {{ bootstrap_token_path_container }} + when: site_role == "secondary" + register: import_token_result + failed_when: import_token_result.rc != 0 + + # Cleanup files + - name: Clean up token file on remote hosts + ansible.builtin.file: + path: "{{ bootstrap_token_path_host }}" + state: absent + when: site_role in ['primary', 'secondary'] + + - name: Clean up controller file + ansible.builtin.file: + path: "{{ token_tmp_path }}" + state: absent + delegate_to: localhost + run_once: true From 2b52784fa3d0619f4527ce32a398d0666d0e2aaa Mon Sep 17 00:00:00 2001 From: mkatari Date: Wed, 2 Jul 2025 19:28:01 +0530 Subject: [PATCH 270/480] update ipv6 when conditions ipv6 task when conditions should rely on ansible_all_ipv6_addresses but not ansible_all_ipv4_addresses parameter. --- playbooks/ceph.yml | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index e06bd772c3..020921de74 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -161,7 +161,9 @@ ansible.builtin.meta: end_play - name: Set IPv4 facts - when: ansible_all_ipv4_addresses | length > 0 + when: + - ansible_all_ipv4_addresses | length > 0 + - not ceph_ipv6 | default(false) ansible.builtin.set_fact: ssh_network_range: 192.168.122.0/24 # storage_network_range: 172.18.0.0/24 @@ -171,7 +173,9 @@ ms_bind_ipv6: false - name: Set IPv6 facts - when: ansible_all_ipv4_addresses | length == 0 + when: + - ansible_all_ipv6_addresses | length > 0 + - ceph_ipv6 | default(false) ansible.builtin.set_fact: ssh_network_range: "2620:cf:cf:aaaa::/64" # storage_network_range: "2620:cf:cf:cccc::/64" @@ -210,6 +214,7 @@ when: - cifmw_networking_env_definition is defined - ansible_all_ipv4_addresses | length > 0 + - not ceph_ipv6 | default(false) ansible.builtin.set_fact: storage_network_range: >- {{ @@ -223,7 +228,8 @@ - name: Set IPv6 network ranges vars when: - cifmw_networking_env_definition is defined - - ansible_all_ipv4_addresses | length == 0 + - ansible_all_ipv6_addresses | length > 0 + - ceph_ipv6 | default(false) ansible.builtin.set_fact: storage_network_range: >- {{ @@ -310,13 +316,17 @@ ansible.builtin.meta: end_play - name: Set IPv4 facts - when: ansible_all_ipv4_addresses | length > 0 + when: + - ansible_all_ipv4_addresses | length > 0 + - not ceph_ipv6 | default(false) ansible.builtin.set_fact: all_addresses: ansible_all_ipv4_addresses cidr: 24 - name: Set IPv6 facts - when: ansible_all_ipv4_addresses | length == 0 + when: + - ansible_all_ipv6_addresses | length > 0 + - ceph_ipv6 | default(false) ansible.builtin.set_fact: all_addresses: ansible_all_ipv6_addresses cidr: 64 From 69685213d24468def90a702c05cc2076b6e18f3b Mon Sep 17 00:00:00 2001 From: mkatari Date: Thu, 7 Aug 2025 00:24:15 +0530 Subject: [PATCH 271/480] Add support for nfs ganesha adoption This patch adds support for deploying nfs ganesha during 17.1 deployment in the adoption context. JIRA: https://issues.redhat.com/browse/OSPRH-17207 --- playbooks/ceph.yml | 23 +++++++++++++++++++++++ roles/cifmw_cephadm/tasks/pools.yml | 6 ++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml index 020921de74..dee6093e77 100644 --- a/playbooks/ceph.yml +++ b/playbooks/ceph.yml @@ -351,6 +351,29 @@ pools: "{{ cifmw_cephadm_pools | map(attribute='name') | list }}" no_log: true + # for deploying external ceph for 17.1 using cifmw, we need this playbook to create keyring + # for manila client and manila_data pool + - name: Add client.manila key and manila_data pool for tripleo deployment + ansible.builtin.set_fact: + cifmw_cephadm_keys: "{{ cifmw_cephadm_keys + [ manila_key ] }}" + cifmw_cephadm_pools: "{{ cifmw_cephadm_pools + [ manila_pool ] }}" + vars: + manila_key: + name: client.manila + key: "{{ cephx.key }}" + mode: '0600' + caps: + mgr: allow rw + mon: allow r + osd: allow rw pool=manila_data + manila_pool: + name: manila_data + target_size_ratio: 0.1 + pg_autoscale_mode: true + application: cephfs + when: adoption_deploy_ceph_for_tripleo | default (false) + no_log: true + # public network always exist because is provided by the ceph_spec role - name: Get Storage network range ansible.builtin.set_fact: diff --git a/roles/cifmw_cephadm/tasks/pools.yml b/roles/cifmw_cephadm/tasks/pools.yml index d88ff0525d..bfc9007001 100644 --- a/roles/cifmw_cephadm/tasks/pools.yml +++ b/roles/cifmw_cephadm/tasks/pools.yml @@ -24,7 +24,9 @@ ansible.builtin.include_tasks: ceph_cli.yml - name: Create RBD pools - when: item.application == 'rbd' + # Good to have pool creation for cephfs application type so that + # it helps nfs ganesha in adoption context + when: item.application in ['rbd', 'cephfs'] ansible.builtin.command: cmd: >- {{ cifmw_cephadm_ceph_cli }} @@ -36,7 +38,7 @@ changed_when: false - name: Enable application on Ceph RBD pools - when: item.application == 'rbd' + when: item.application in ['rbd', 'cephfs'] ansible.builtin.command: cmd: >- {{ cifmw_cephadm_ceph_cli }} From 52a50a410a72dc1a1098dada59b9043164e0384e Mon Sep 17 00:00:00 2001 From: Jiri Stransky Date: Fri, 8 Aug 2025 16:32:13 +0200 Subject: [PATCH 272/480] Introduce the split update (alternative to monolithic) This commit refactors the update role to support two distinct update variants, controlled by the new `cifmw_update_variant` variable. The variants are: - `monolithic`: (Default) Preserves the existing behavior by running the single `make openstack_update_run` target. - `split`: Implements a new two-phase update by running the `make update_services` and `make update_system` targets. To achieve this, the main task has been modified to dynamically include either `update_variant_monolithic.yml` or `update_variant_split.yml` based on the selected variant. Co-authored-by: aider (gemini/gemini-2.5-pro) --- roles/update/README.md | 1 + roles/update/defaults/main.yml | 5 +++++ roles/update/tasks/main.yml | 12 +++--------- .../update/tasks/update_variant_monolithic.yml | 9 +++++++++ roles/update/tasks/update_variant_split.yml | 18 ++++++++++++++++++ 5 files changed, 36 insertions(+), 9 deletions(-) create mode 100644 roles/update/tasks/update_variant_monolithic.yml create mode 100644 roles/update/tasks/update_variant_split.yml diff --git a/roles/update/README.md b/roles/update/README.md index 3a199a8c8b..97f1fc1d9c 100644 --- a/roles/update/README.md +++ b/roles/update/README.md @@ -6,6 +6,7 @@ Role to run update * `cifmw_update_openstack_update_run_operators_updated`: (Boolean) Set if openstack_update_run make target should not modify openstack-operator csv to fake openstack services container change. Default to `True`. * `cifmw_update_openstack_update_run_target_version`: (String) Define openstack target version to run update to. * `cifmw_update_openstack_update_run_timeout`: (String) Define `oc wait` global timeout passed to each step of update procedure. It should be a value of a longest step of the procedure. Defaults to `600s`. +* `cifmw_update_variant`: (String) Defines the update procedure. Can be `'monolithic'` for a single update step, or `'split'` for a two-step (services, system) update. Defaults to `'monolithic'`. * `cifmw_update_run_dryrun`: (Boolean) Do a dry run on make openstack_update_run command. Defaults to `False`. * `cifmw_update_ping_test`: (Boolean) Activate the ping test during update. Default to `False`. * `cifmw_update_create_volume`: (Boolean) Attach a volume to the test OS instance when set to true. Default to `False` diff --git a/roles/update/defaults/main.yml b/roles/update/defaults/main.yml index 38b171087c..384f47cc07 100644 --- a/roles/update/defaults/main.yml +++ b/roles/update/defaults/main.yml @@ -24,6 +24,11 @@ cifmw_update_openstack_update_run_containers_namespace: "podified-antelope-cento cifmw_update_openstack_update_run_containers_target_tag: "current-podified" cifmw_update_openstack_update_run_timeout: "600s" +# Update variant. Can be 'monolithic' or 'split'. +# 'monolithic' uses the single openstack_update_run make target. +# 'split' uses the update_services and update_system make targets. +cifmw_update_variant: "monolithic" + # Avoid certain tasks during molecule run cifmw_update_run_dryrun: false diff --git a/roles/update/tasks/main.yml b/roles/update/tasks/main.yml index 7fb74f76e5..e54fcb7db2 100644 --- a/roles/update/tasks/main.yml +++ b/roles/update/tasks/main.yml @@ -149,7 +149,7 @@ tags: - always ansible.builtin.set_fact: - _make_openstack_update_run_params: | + _make_update_params: | TIMEOUT: {{ cifmw_update_openstack_update_run_timeout }} {% if _cifmw_update_use_fake_update | bool -%} FAKE_UPDATE: true @@ -180,14 +180,8 @@ {{ cifmw_update_artifacts_basedir }}/update_event.sh Update to start the Update sequence -- name: Run make openstack_update_run - vars: - make_openstack_update_run_env: "{{ cifmw_install_yamls_environment | combine({'PATH': cifmw_path }) }}" - make_openstack_update_run_params: "{{ _make_openstack_update_run_params | from_yaml }}" - make_openstack_update_run_dryrun: "{{ cifmw_update_run_dryrun | bool }}" - ansible.builtin.include_role: - name: 'install_yamls_makes' - tasks_from: 'make_openstack_update_run' +- name: Run the selected update variant + ansible.builtin.include_tasks: "update_variant_{{ cifmw_update_variant }}.yml" - name: Set update step to Update Sequence complete ansible.builtin.command: diff --git a/roles/update/tasks/update_variant_monolithic.yml b/roles/update/tasks/update_variant_monolithic.yml new file mode 100644 index 0000000000..ff15aa1c60 --- /dev/null +++ b/roles/update/tasks/update_variant_monolithic.yml @@ -0,0 +1,9 @@ +--- +- name: Run make openstack_update_run + vars: + make_openstack_update_run_env: "{{ cifmw_install_yamls_environment | combine({'PATH': cifmw_path }) }}" + make_openstack_update_run_params: "{{ _make_update_params | from_yaml }}" + make_openstack_update_run_dryrun: "{{ cifmw_update_run_dryrun | bool }}" + ansible.builtin.include_role: + name: 'install_yamls_makes' + tasks_from: 'make_openstack_update_run' diff --git a/roles/update/tasks/update_variant_split.yml b/roles/update/tasks/update_variant_split.yml new file mode 100644 index 0000000000..32f977dd4e --- /dev/null +++ b/roles/update/tasks/update_variant_split.yml @@ -0,0 +1,18 @@ +--- +- name: Run make update_services + vars: + make_update_services_env: "{{ cifmw_install_yamls_environment | combine({'PATH': cifmw_path }) }}" + make_update_services_params: "{{ _make_update_params | from_yaml }}" + make_update_services_dryrun: "{{ cifmw_update_run_dryrun | bool }}" + ansible.builtin.include_role: + name: 'install_yamls_makes' + tasks_from: 'make_update_services' + +- name: Run make update_system + vars: + make_update_system_env: "{{ cifmw_install_yamls_environment | combine({'PATH': cifmw_path }) }}" + make_update_system_params: "{{ _make_update_params | from_yaml }}" + make_update_system_dryrun: "{{ cifmw_update_run_dryrun | bool }}" + ansible.builtin.include_role: + name: 'install_yamls_makes' + tasks_from: 'make_update_system' From e4e342cd8b47543ab0a614f919a8c4f8429c123d Mon Sep 17 00:00:00 2001 From: Jiri Stransky Date: Mon, 11 Aug 2025 14:23:33 +0200 Subject: [PATCH 273/480] Better update event markers for monolithic/split variants --- roles/update/tasks/main.yml | 12 ---------- .../tasks/update_variant_monolithic.yml | 12 ++++++++++ roles/update/tasks/update_variant_split.yml | 24 +++++++++++++++++++ 3 files changed, 36 insertions(+), 12 deletions(-) diff --git a/roles/update/tasks/main.yml b/roles/update/tasks/main.yml index e54fcb7db2..49e21a576c 100644 --- a/roles/update/tasks/main.yml +++ b/roles/update/tasks/main.yml @@ -174,21 +174,9 @@ default(cifmw_update_openstack_update_run_target_version) }} -- name: Set update step to About to start the Update sequence - ansible.builtin.command: - cmd: > - {{ cifmw_update_artifacts_basedir }}/update_event.sh - Update to start the Update sequence - - name: Run the selected update variant ansible.builtin.include_tasks: "update_variant_{{ cifmw_update_variant }}.yml" -- name: Set update step to Update Sequence complete - ansible.builtin.command: - cmd: > - {{ cifmw_update_artifacts_basedir }}/update_event.sh - Update Sequence complete - - name: Stop the ping test ansible.builtin.include_tasks: l3_agent_connectivity_check_stop.yml when: diff --git a/roles/update/tasks/update_variant_monolithic.yml b/roles/update/tasks/update_variant_monolithic.yml index ff15aa1c60..c8eb306277 100644 --- a/roles/update/tasks/update_variant_monolithic.yml +++ b/roles/update/tasks/update_variant_monolithic.yml @@ -1,4 +1,10 @@ --- +- name: Set update step to Starting the update sequence + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Starting the update sequence + - name: Run make openstack_update_run vars: make_openstack_update_run_env: "{{ cifmw_install_yamls_environment | combine({'PATH': cifmw_path }) }}" @@ -7,3 +13,9 @@ ansible.builtin.include_role: name: 'install_yamls_makes' tasks_from: 'make_openstack_update_run' + +- name: Set update step to Update sequence complete + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Update sequence complete diff --git a/roles/update/tasks/update_variant_split.yml b/roles/update/tasks/update_variant_split.yml index 32f977dd4e..f366a1a481 100644 --- a/roles/update/tasks/update_variant_split.yml +++ b/roles/update/tasks/update_variant_split.yml @@ -1,4 +1,10 @@ --- +- name: Set update step to Starting the services update sequence + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Starting the services update sequence + - name: Run make update_services vars: make_update_services_env: "{{ cifmw_install_yamls_environment | combine({'PATH': cifmw_path }) }}" @@ -8,6 +14,18 @@ name: 'install_yamls_makes' tasks_from: 'make_update_services' +- name: Set update step to Services update sequence complete + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Services update sequence complete + +- name: Set update step to Starting the system update sequence + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + Starting the system update sequence + - name: Run make update_system vars: make_update_system_env: "{{ cifmw_install_yamls_environment | combine({'PATH': cifmw_path }) }}" @@ -16,3 +34,9 @@ ansible.builtin.include_role: name: 'install_yamls_makes' tasks_from: 'make_update_system' + +- name: Set update step to System update sequence complete + ansible.builtin.command: + cmd: > + {{ cifmw_update_artifacts_basedir }}/update_event.sh + System update sequence complete From cc87f54332d129fc6af4e048870bfd293c1b7318 Mon Sep 17 00:00:00 2001 From: bshewale Date: Tue, 3 Jun 2025 15:29:45 +0530 Subject: [PATCH 274/480] Use role instead of playbooks - 99-logs.yml It is continuation of simplification job execution [1]. [1]: https://github.com/openstack-k8s-operators/ci-framework/pull/2929 --- .../content_provider/content_provider.yml | 18 +-- ci/playbooks/e2e-collect-logs.yml | 18 ++- .../edpm_build_images_content_provider.yaml | 18 +-- ci/playbooks/kuttl/e2e-kuttl.yml | 18 +-- .../meta_content_provider.yml | 18 +-- ci/playbooks/tcib/tcib.yml | 18 +-- playbooks/99-logs.yml | 5 + post-deployment.yml | 12 +- roles/cifmw_setup/tasks/run_logs.yml | 104 ++++++++++++++++++ update-edpm.yml | 16 ++- 10 files changed, 186 insertions(+), 59 deletions(-) create mode 100644 roles/cifmw_setup/tasks/run_logs.yml diff --git a/ci/playbooks/content_provider/content_provider.yml b/ci/playbooks/content_provider/content_provider.yml index 3bd34b4189..9034fa2f4f 100644 --- a/ci/playbooks/content_provider/content_provider.yml +++ b/ci/playbooks/content_provider/content_provider.yml @@ -54,12 +54,12 @@ mode: "0644" - name: Run log related tasks - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '99-logs.yml' - ] | ansible.builtin.path_join - }} + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run logging + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs diff --git a/ci/playbooks/e2e-collect-logs.yml b/ci/playbooks/e2e-collect-logs.yml index 63371fe3ba..2afd68f488 100644 --- a/ci/playbooks/e2e-collect-logs.yml +++ b/ci/playbooks/e2e-collect-logs.yml @@ -20,12 +20,18 @@ - not cifmw_status.stat.exists ansible.builtin.meta: end_host - - name: Run log collection - ansible.builtin.command: - chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" - cmd: >- - ansible-playbook playbooks/99-logs.yml - -e @scenarios/centos-9/base.yml +- name: Run log related tasks + hosts: "{{ cifmw_target_host | default('all') }}" + gather_facts: true + vars_files: + - ../../scenarios/centos-9/base.yml + tasks: + - name: Run logging + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs - name: "Run ci/playbooks/collect-logs.yml on CRC host" hosts: crc diff --git a/ci/playbooks/edpm_build_images/edpm_build_images_content_provider.yaml b/ci/playbooks/edpm_build_images/edpm_build_images_content_provider.yaml index e1f8e4972f..cb1e390441 100644 --- a/ci/playbooks/edpm_build_images/edpm_build_images_content_provider.yaml +++ b/ci/playbooks/edpm_build_images/edpm_build_images_content_provider.yaml @@ -65,12 +65,12 @@ mode: "0644" - name: Run log related tasks - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '99-logs.yml' - ] | ansible.builtin.path_join - }} + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run logging + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs diff --git a/ci/playbooks/kuttl/e2e-kuttl.yml b/ci/playbooks/kuttl/e2e-kuttl.yml index 7af2d758b6..c4b5653caf 100644 --- a/ci/playbooks/kuttl/e2e-kuttl.yml +++ b/ci/playbooks/kuttl/e2e-kuttl.yml @@ -52,12 +52,12 @@ name: run_hook - name: Run log related tasks - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '99-logs.yml' - ] | ansible.builtin.path_join - }} + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run logging + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs diff --git a/ci/playbooks/meta_content_provider/meta_content_provider.yml b/ci/playbooks/meta_content_provider/meta_content_provider.yml index b2696f0d00..5954f93e9c 100644 --- a/ci/playbooks/meta_content_provider/meta_content_provider.yml +++ b/ci/playbooks/meta_content_provider/meta_content_provider.yml @@ -143,12 +143,12 @@ mode: "0644" - name: Run log related tasks - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '99-logs.yml' - ] | ansible.builtin.path_join - }} + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run logging + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs diff --git a/ci/playbooks/tcib/tcib.yml b/ci/playbooks/tcib/tcib.yml index ca1f6380e0..54a422ad8b 100644 --- a/ci/playbooks/tcib/tcib.yml +++ b/ci/playbooks/tcib/tcib.yml @@ -77,12 +77,12 @@ mode: "0644" - name: Run log related tasks - ansible.builtin.import_playbook: >- - {{ - [ - ansible_user_dir, - zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir, - 'playbooks', - '99-logs.yml' - ] | ansible.builtin.path_join - }} + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run logging + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs diff --git a/playbooks/99-logs.yml b/playbooks/99-logs.yml index 8f38935622..54bbe6d57b 100644 --- a/playbooks/99-logs.yml +++ b/playbooks/99-logs.yml @@ -1,3 +1,8 @@ +# +# NOTE: Playbook migrated to: cifmw_setup/tasks/run_logs.yml +# DO NOT EDIT THAT PLAYBOOK. IT WOULD BE REMOVED IN NEAR FUTURE. +# + - name: Logging playbook hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true diff --git a/post-deployment.yml b/post-deployment.yml index c15cd52b8b..a1bd51425e 100644 --- a/post-deployment.yml +++ b/post-deployment.yml @@ -61,6 +61,12 @@ mode: "0644" - name: Run log related tasks - ansible.builtin.import_playbook: playbooks/99-logs.yml - tags: - - logs + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run logging + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs diff --git a/roles/cifmw_setup/tasks/run_logs.yml b/roles/cifmw_setup/tasks/run_logs.yml new file mode 100644 index 0000000000..2d0ef72130 --- /dev/null +++ b/roles/cifmw_setup/tasks/run_logs.yml @@ -0,0 +1,104 @@ +- name: Check if the logging requires + when: not zuul_log_collection | default('false') | bool + block: + - name: Ensure cifmw_basedir param is set + when: + - cifmw_basedir is not defined + ansible.builtin.set_fact: + cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" + + - name: Try to load parameters files + block: + - name: Check directory availabilty + register: param_dir + ansible.builtin.stat: + path: "{{ cifmw_basedir }}/artifacts/parameters" + + - name: Load parameters files + when: + - param_dir.stat.exists | bool + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + always: + - name: Set custom cifmw PATH reusable fact + when: + - cifmw_path is not defined + ansible.builtin.set_fact: + cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" + cacheable: true + + - name: Set destination folder for the logs + ansible.builtin.set_fact: + logfiles_dest_dir: >- + {{ + ( + cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data'), + 'logs/', + now(fmt='%Y-%m-%d_%H-%M') + ) | path_join + }} + - name: Generate artifacts + ansible.builtin.import_role: + name: artifacts + + - name: Collect container images used in the environment + ansible.builtin.import_role: + name: env_op_images + + - name: Create a versioned log folder + ansible.builtin.file: + path: "{{ logfiles_dest_dir }}" + state: directory + mode: "0775" + + - name: Return a list of log files in home directory + ansible.builtin.find: + paths: "{{ ansible_user_dir }}" + patterns: '*.log' + register: _log_files + + - name: Ensure ansible facts cache exists + register: ansible_facts_cache_state + ansible.builtin.stat: + path: "{{ ansible_user_dir }}/ansible_facts_cache" + + - name: Copy log files + when: + - _log_files.matched > 0 + block: + - name: Copy logs to proper location + ansible.builtin.copy: + src: "{{ item.path }}" + dest: "{{ [ logfiles_dest_dir , item.path | basename ] | path_join }}" + remote_src: true + mode: "0666" + loop: "{{ _log_files.files }}" + + - name: Remove original log from home directory + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ _log_files.files }}" + + - name: Copy Ansible facts if exists + when: + - ansible_facts_cache_state.stat.exists is defined + - ansible_facts_cache_state.stat.exists | bool + block: + - name: Copy facts to dated directory + ansible.builtin.copy: + src: "{{ ansible_user_dir }}/ansible_facts_cache" + dest: >- + {{ + ( + cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data'), + "artifacts/ansible_facts." + now(fmt='%Y-%m-%d_%H-%M') + ) | path_join + }} + mode: "0777" + remote_src: true + + - name: Clean ansible fact cache + ansible.builtin.file: + path: "{{ ansible_user_dir }}/ansible_facts_cache" + state: absent diff --git a/update-edpm.yml b/update-edpm.yml index 573e27f446..fff0d65cf5 100644 --- a/update-edpm.yml +++ b/update-edpm.yml @@ -42,8 +42,14 @@ state: touch mode: "0644" -- name: Run log related tasks - ansible.builtin.import_playbook: playbooks/99-logs.yml - when: not zuul_log_collection | default(false) | bool - tags: - - logs +- name: Run logging + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Run logs + when: not zuul_log_collection | default(false) | bool + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs From 80dec54ffba4da4e04173a6fcef033ed4340be0f Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Mon, 18 Aug 2025 23:45:01 +0200 Subject: [PATCH 275/480] Add retries to more kustomize_deploy tasks We still observe the Internal Server Errors randomly occurring from the underlying OpenShift cluster. This commit extends the original attempt [1] that made the situation a little bit better in some places, such as when there is attempt to call webhook "nodenetworkconfigurationpolicies-mutate.nmstate.io". So, TL;DR we want to guard few more sections with retries, to give the cluster some more time to stabilize and reply with expected value. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/3068 --- roles/kustomize_deploy/tasks/execute_step.yml | 3 +++ .../tasks/install_operators.yml | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/roles/kustomize_deploy/tasks/execute_step.yml b/roles/kustomize_deploy/tasks/execute_step.yml index bfa89a3488..6478a63f6a 100644 --- a/roles/kustomize_deploy/tasks/execute_step.yml +++ b/roles/kustomize_deploy/tasks/execute_step.yml @@ -285,6 +285,9 @@ PATH: "{{ cifmw_path }}" ansible.builtin.command: cmd: "oc apply -f {{ _cr }}" + retries: 3 + delay: 60 + until: oc_apply is success - name: "Build Wait Conditions for {{ stage.path }}" when: diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index 862c358ab4..b2545d23df 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -242,6 +242,10 @@ type: Ready status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _controller_manager_pods + until: _controller_manager_pods is success - name: Wait for webhook-server pods kubernetes.core.k8s_info: @@ -255,6 +259,10 @@ type: Ready status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _webhook_server_pods + until: _webhook_server_pods is success - name: Wait until NMstate operator resources are deployed kubernetes.core.k8s_info: @@ -272,6 +280,10 @@ cifmw_kustomize_deploy_check_mode | default(false, true) }} + retries: 3 + delay: 60 + register: _nmstate_operator_pods + until: _nmstate_operator_pods is success - name: Generate MetalLB kustomization file ansible.builtin.copy: @@ -314,6 +326,10 @@ type: Ready status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _metallb_speaker_pods + until: _metallb_speaker_pods is success - name: Generate NMstate kustomization file ansible.builtin.copy: @@ -351,6 +367,10 @@ type: Ready status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _nmstate_handler_pods + until: _nmstate_handler_pods is success - name: Wait for NMstate webhook deployment kubernetes.core.k8s_info: @@ -363,6 +383,10 @@ type: Available status: "True" wait_timeout: 300 + retries: 3 + delay: 60 + register: _nmstate_webhook_pods + until: _nmstate_webhook_pods is success - name: Check if the OpenStack initialization CRD exists kubernetes.core.k8s_info: From 9b1958087ed594951221f90eee9afa7067ce0a1d Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Thu, 7 Aug 2025 14:07:51 +0200 Subject: [PATCH 276/480] Fix replacing osp network names Do not overwrite the replaced 'storage_mgmt' back to 'storagemgmt'. That happens, when the network_name is compared to 'internalapi' after its 'storagemgmt' was fixed. As this name is not 'internalapi', it gets rewritten back to the original network name. Signed-off-by: Bohdan Dobrelia --- roles/adoption_osp_deploy/tasks/config_files.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/adoption_osp_deploy/tasks/config_files.yml b/roles/adoption_osp_deploy/tasks/config_files.yml index 8273db4ae8..28eab22e97 100644 --- a/roles/adoption_osp_deploy/tasks/config_files.yml +++ b/roles/adoption_osp_deploy/tasks/config_files.yml @@ -115,11 +115,11 @@ {%- set nodeport = {node: {}} -%} {% for network, net_info in _node_instance_net.networks.items() if network != 'ocpbm' %} {%- set subnet = cifmw_networking_env_definition.networks[network][network_version|default("network_v4")] -%} - {%- set network_name = ['storage_mgmt'] if network == 'storagemgmt' else [network] -%} - {%- set network_name = ['internal_api'] if network == 'internalapi' else [network] -%} + {%- set network_name = network.replace('storagemgmt', 'storage_mgmt') -%} + {%- set network_name = network_name.replace('internalapi', 'internal_api') -%} {%- set _ = nodeport[node].update( { - network_name[0]: { + network_name: { 'ip_address': net_info[ip_version|default("ip_v4")], 'ip_address_uri': net_info[ip_version|default("ip_v4")], 'ip_subnet': subnet From 08aa1a77a7581054f60b062e6bb01c641254f447 Mon Sep 17 00:00:00 2001 From: Alan Bishop Date: Wed, 13 Aug 2025 07:13:31 -0700 Subject: [PATCH 277/480] Support restarting iscsid running on host With [1] the iscsid service will move from running in a container onto the EDPM host. This PR ensures iscsid can be restarted regardless of where it's running. [1] https://github.com/openstack-k8s-operators/edpm-ansible/pull/984 Signed-off-by: Alan Bishop --- hooks/playbooks/compute-iscsi-config.yml | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/hooks/playbooks/compute-iscsi-config.yml b/hooks/playbooks/compute-iscsi-config.yml index 2bca333d3f..9dfd4a47ad 100644 --- a/hooks/playbooks/compute-iscsi-config.yml +++ b/hooks/playbooks/compute-iscsi-config.yml @@ -12,8 +12,27 @@ - 'node.session.initial_login_retry_max = 3' - 'node.conn[0].timeo.login_timeout = 5' - - name: Restart iscsid container to refresh /etcd/iscsid.conf + # Traditionally, iscsid runs in a container via the edpm_iscsid service, + # but there's an effort to move it onto the EDPM host. This restarts + # the daemon regardless of where it's running. + + - name: Gather services facts + ansible.builtin.service_facts: + + - name: Restart iscsid container to refresh /etc/iscsi/iscsid.conf become: true - ansible.builtin.systemd: + ansible.builtin.systemd_service: name: edpm_iscsid state: restarted + when: + - ansible_facts.services["edpm_iscsid.service"] is defined + - ansible_facts.services["edpm_iscsid.service"]["status"] == "enabled" + + - name: Restart iscsid on the host to refresh /etc/iscsi/iscsid.conf + become: true + ansible.builtin.systemd_service: + name: iscsid + state: restarted + when: + - ansible_facts.services["iscsid.service"] is defined + - ansible_facts.services["iscsid.service"]["status"] == "enabled" From 9987028fba2d0607fc8efc1dd4b204b1ad7d02e7 Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Tue, 19 Aug 2025 11:07:18 +0200 Subject: [PATCH 278/480] Add retries to openshift_obs deployment and pod We observe the following errors randomly occurring from `kubernetes.core.k8s_info` module: `AttributeError: ''NoneType'' object has no attribute ''status''`. This may be because the OpenShift cluster gets overwhelmed and sometimes the module receives the unexpected response, so in `custom_condition()` in `plugins/module_utils/k8s/waiter.py` there is no `status` field in `resource` [1]. As a mitigation, let's try retrying the task after some delay. [1] https://github.com/ansible-collections/kubernetes.core/blob/main/plugins/module_utils/k8s/waiter.py#L86 --- roles/openshift_obs/tasks/main.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/openshift_obs/tasks/main.yml b/roles/openshift_obs/tasks/main.yml index 40e2b67b1c..c5ceff1e40 100644 --- a/roles/openshift_obs/tasks/main.yml +++ b/roles/openshift_obs/tasks/main.yml @@ -31,6 +31,10 @@ type: Available status: "True" kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + retries: 3 + delay: 60 + register: _openshift_obs_deployment + until: _openshift_obs_deployment is success - name: Wait for observability-operator pod kubernetes.core.k8s_info: @@ -44,3 +48,7 @@ type: Ready status: "True" kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + retries: 3 + delay: 60 + register: _openshift_obs_pods + until: _openshift_obs_pods is success From 0e95afa34050e7b6537e7d6a229449097ed5277f Mon Sep 17 00:00:00 2001 From: bshewale Date: Wed, 20 Aug 2025 17:33:09 +0530 Subject: [PATCH 279/480] Fix to get os-must-gather logs Due to this change [1] os-must-gather logs was not logged because of some var issues. so reverting that partial part of code in this PR will fix the issue[2]. [1]: https://github.com/openstack-k8s-operators/ci-framework/pull/3032/files#diff-691dae244b4fb4bfc1270fb5d3b708c6be6073b0e33d997db15850edfa3fd976L23 [2]: https://logserver.rdoproject.org/88f/rdoproject.org/88f54e6882384b2a88e9bfadffa3ff02/controller/ci-framework-data/logs/ --- ci/playbooks/e2e-collect-logs.yml | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/ci/playbooks/e2e-collect-logs.yml b/ci/playbooks/e2e-collect-logs.yml index 2afd68f488..63371fe3ba 100644 --- a/ci/playbooks/e2e-collect-logs.yml +++ b/ci/playbooks/e2e-collect-logs.yml @@ -20,18 +20,12 @@ - not cifmw_status.stat.exists ansible.builtin.meta: end_host -- name: Run log related tasks - hosts: "{{ cifmw_target_host | default('all') }}" - gather_facts: true - vars_files: - - ../../scenarios/centos-9/base.yml - tasks: - - name: Run logging - ansible.builtin.import_role: - name: cifmw_setup - tasks_from: run_logs.yml - tags: - - logs + - name: Run log collection + ansible.builtin.command: + chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" + cmd: >- + ansible-playbook playbooks/99-logs.yml + -e @scenarios/centos-9/base.yml - name: "Run ci/playbooks/collect-logs.yml on CRC host" hosts: crc From b9435eb47f6ae1c6290cf7853e21fb089edb498b Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Wed, 20 Aug 2025 17:25:25 +0200 Subject: [PATCH 280/480] Enable horizon in upstream CI jobs to test Glance CORS configuration This patch enables horizon as part of the control plane update where glance is deployed with Ceph to test that the CORS configuration is automatically setup. Jira: https://issues.redhat.com/browse/OSPRH-19261 Signed-off-by: Francesco Pantano --- hooks/playbooks/templates/config_ceph_backends.yaml.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hooks/playbooks/templates/config_ceph_backends.yaml.j2 b/hooks/playbooks/templates/config_ceph_backends.yaml.j2 index a1a2fcd229..01eaeb9c24 100644 --- a/hooks/playbooks/templates/config_ceph_backends.yaml.j2 +++ b/hooks/playbooks/templates/config_ceph_backends.yaml.j2 @@ -29,6 +29,10 @@ patches: mountPath: "/etc/ceph" readOnly: true + - op: replace + path: /spec/horizon/enabled + value: true + - op: replace path: /spec/cinder/template/cinderBackup/replicas value: {{ cifmw_services_cinder_bkp_replicas | default(1) }} From 5cb05417b267d15e213ce15efa5e3aa633e5cc1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 21 Aug 2025 12:48:09 +0200 Subject: [PATCH 281/480] Pin nat64 CentOS image The CentOS images are broken, let's pin image to an older CS9 image until the CentOS image issues[1] has been resolved. [1] https://issues.redhat.com/browse/CS-2983 Jira: OSPCIX-1020 --- roles/nat64_appliance/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/nat64_appliance/tasks/main.yml b/roles/nat64_appliance/tasks/main.yml index 2a9aa941ce..71e151b291 100644 --- a/roles/nat64_appliance/tasks/main.yml +++ b/roles/nat64_appliance/tasks/main.yml @@ -72,6 +72,7 @@ ELEMENTS_PATH: "{{ cifmw_nat64_appliance_workdir }}/elements:{{ cifmw_nat64_appliance_workdir }}/edpm-image-builder/dib/" DIB_IMAGE_CACHE: "{{ cifmw_nat64_appliance_workdir }}/cache" DIB_DEBUG_TRACE: '1' + BASE_IMAGE_FILE: CentOS-Stream-GenericCloud-x86_64-9-20250812.1.x86_64.qcow2 cifmw.general.ci_script: chdir: "{{ cifmw_nat64_appliance_workdir }}" output_dir: "{{ cifmw_nat64_appliance_basedir }}/artifacts" From 5703bdde587294121499365c3da7a82a063fdb90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 21 Aug 2025 16:15:53 +0200 Subject: [PATCH 282/480] nat64_appliance: Bump image size from 2 to 3 GB Seeing Curl error (23) when downloading packages, indicating it is running out of disk space. --- roles/nat64_appliance/files/nat64-appliance.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/nat64_appliance/files/nat64-appliance.yaml b/roles/nat64_appliance/files/nat64-appliance.yaml index 76163ff4a0..93fd78e091 100644 --- a/roles/nat64_appliance/files/nat64-appliance.yaml +++ b/roles/nat64_appliance/files/nat64-appliance.yaml @@ -12,7 +12,7 @@ environment: DIB_RELEASE: '9-stream' DIB_PYTHON_VERSION: '3' - DIB_IMAGE_SIZE: '2' + DIB_IMAGE_SIZE: '3' COMPRESS_IMAGE: '1' DIB_BLOCK_DEVICE_CONFIG: | - local_loop: From 2661a7befa130ad72a87aa808e43100331695ded Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Sat, 23 Aug 2025 22:46:38 +0200 Subject: [PATCH 283/480] Add roles for fixing Python encodings On the freshly reprovisioned nodes in our CI, not very often, but still from time to time, we seem to lack the necessary encoding file to invoke Ansible modules properly. I suspect that all this happens due to some rogue cleaning script that removes too many files, such as the compiled Python codes (`*.pyc` files) and hence from time to time we hit this error (the distro images we use are being rebuild quite often). By including the new role as first task in the CI jobs, we should never ever encounter this problem again. --- docs/dictionary/en-custom.txt | 5 ++ roles/fix_python_encodings/README.md | 86 ++++++++++++++++++++++ roles/fix_python_encodings/tasks/main.yaml | 71 ++++++++++++++++++ 3 files changed, 162 insertions(+) create mode 100644 roles/fix_python_encodings/README.md create mode 100644 roles/fix_python_encodings/tasks/main.yaml diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index e52ac936e8..1856049772 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -101,7 +101,9 @@ containerfile controlplane coredns coreos +CP cpus +CPython crashloopbackoff crb crc @@ -168,6 +170,7 @@ edploy edpm edpmnodeexporter ee +encodings eno enp env @@ -578,6 +581,7 @@ uoyt uri usermod usr +UTF utils uuid vbibob @@ -633,6 +637,7 @@ ytm yxivcnvul yyoje yyyy +ZipFile zlcbwcm zm zpbgugcmjkihbvb diff --git a/roles/fix_python_encodings/README.md b/roles/fix_python_encodings/README.md new file mode 100644 index 0000000000..348094e09c --- /dev/null +++ b/roles/fix_python_encodings/README.md @@ -0,0 +1,86 @@ +Fix Python encodings +==================== + +This role ensures the `python3-libs` package is installed, +as well as verifies the necessary encoding file is in the system +– and if not, it is fetched directly from the CPython repository. + +**Important!** +Make sure to call this role from a playbook **without** gathering facts! \ +(Set `gather_facts: false` ~ otherwise it makes no sense to use this role!) + + +Details +------- + +When Ansible tries to invoke modules on target machines, it relies +on the call [^1] to ZipFile module from the Python standard library [^2]. +The handling of zip files requires to support necessary encodings, +which should typically be CP437 (Code Page 437 [^3]) and UTF-8 +(but sometimes it can be also CP1252/Windows-1252 or ISO-8859-1 [^4]). + +When attempting to run Ansible modules against some freshly provisioned +hypervisors, sometimes, rarely, but still from time to time, we encounter: + +``` +PLAY [Prepare the hypervisor.] ************************************************ + +TASK [Create zuul user name=zuul, state=present] ****************************** +fatal: [hypervisor]: FAILED! => { + "changed": false, + "module_stderr": " + Warning: Permanently added '(...)' (ED25519) to the list of known hosts. + Traceback (most recent call last): + File \"\", line 107, in + File \"\", line 99, in _ansiballz_main + File \"\", line 35, in invoke_module + File \"/usr/lib64/python3.9/zipfile.py\", line 1286, in __init__ + self._RealGetContents() + File \"/usr/lib64/python3.9/zipfile.py\", line 1371, in _RealGetContents + filename = filename.decode('cp437') + LookupError: unknown encoding: cp437 + ", + "module_stdout": "", + "msg": "MODULE FAILURE See stdout/stderr for the exact error", + "rc": 1 +} +``` + +In Red Hat distributions it should come from the `python3-libs` package, +where it is shipped as just compiled Python file: + +``` +# rpm -qal python3-libs | grep -i 'encodings/cp437' +/usr/lib64/python3.9/encodings/cp437.pyc +``` + +However, in some installations we either seem to lack `python3-libs` +or simply that file is removed accidentally by some cleaning tool. +Unfortunately, it looks like a problem that occur from time to time [^5]. + +This role ensures the `python3-libs` package is installed, +as well as verifies the necessary encoding file is in the system +– and if not, it is fetched directly from the CPython repository [^6]. +To make sure it is all doable, everything in this role is performed via Ansible +raw action plugin [^7] that does not invoke the modules subsystem [^8] +on the target host. + + +References +---------- + +[^1]: https://github.com/ansible/ansible/blob/stable-2.19/lib/ansible/_internal/_ansiballz/_wrapper.py#L121 + +[^2]: https://docs.python.org/3/library/zipfile.html + +[^3]: https://en.wikipedia.org/wiki/Code_page_437 + +[^4]: https://marcosc.com/2008/12/zip-files-and-encoding-i-hate-you/ + +[^5]: https://github.com/pypa/pip/issues/11449 + +[^6]: https://raw.githubusercontent.com/python/cpython/main/Lib/encodings/cp437.py + +[^7]: https://docs.ansible.com/ansible/latest/collections/ansible/builtin/raw_module.html + +[^8]: https://stackoverflow.com/a/37079451 diff --git a/roles/fix_python_encodings/tasks/main.yaml b/roles/fix_python_encodings/tasks/main.yaml new file mode 100644 index 0000000000..ff23964c94 --- /dev/null +++ b/roles/fix_python_encodings/tasks/main.yaml @@ -0,0 +1,71 @@ +--- +- name: Check if cp437 is available + ansible.builtin.raw: |- + python3 -c 'from encodings import cp437; print(cp437)' + register: _import_cp437 + changed_when: false + ignore_errors: true + +- name: Fix missing cp437 + when: _import_cp437 is not success + block: + - name: Install python3-libs + ansible.builtin.raw: |- + dnf install --refresh --nobest --allowerasing --assumeyes python3-libs + become: true + ignore_errors: true + register: _dnf_install + changed_when: + - "'Installed:' in _dnf_install.stdout" + - "'Complete!' in _dnf_install.stdout" + - "'Nothing to do.' not in _dnf_install.stdout" + + - name: Reinstall python3-libs + ansible.builtin.raw: |- + dnf reinstall --nobest --allowerasing --assumeyes python3-libs + become: true + register: _dnf_reinstall + changed_when: + - "'Reinstalled:' in _dnf_reinstall.stdout" + - "'Complete!' in _dnf_reinstall.stdout" + + - name: Check if cp437 is available now + ansible.builtin.raw: |- + python3 -c 'from encodings import cp437; print(cp437)' + register: _import_cp437 + changed_when: false + ignore_errors: true + + # NOTE(sdatko): the tasks below should never be reached hopefully + # (i.e. a success in the register above overrides the check within block) + - name: Find Python3 installations + ansible.builtin.raw: |- + find /usr -path '/*/python3*/encodings' -type d + register: _python3_encodings + changed_when: false + + - name: Show Python3 installations + ansible.builtin.debug: + msg: "{{ _python3_encodings.stdout_lines }}" + + - name: Fetch cp437.py if needed + ansible.builtin.raw: |- + cd "{{ item }}" + if ! [ -s 'cp437.py' -o -s 'cp437.pyc' ]; then + curl --location --remote-name "{{ cp437_url }}" + fi + become: true + vars: + cp437_url: https://raw.githubusercontent.com/python/cpython/main/Lib/encodings/cp437.py + loop: "{{ _python3_encodings.stdout_lines }}" + + - name: Check if cp437 is finally available + ansible.builtin.raw: |- + python3 -c 'from encodings import cp437; print(cp437)' + register: _import_cp437 + changed_when: false + ignore_errors: true + + - name: Fail due to cp437 still not available + ansible.builtin.fail: + msg: 'Unable to fix the target host' From 9d1709ba7ce35805fdf57fc76f88b00b25e9f927 Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Tue, 26 Aug 2025 12:19:15 +0200 Subject: [PATCH 284/480] Fix LVMCluster until clause We have faced some times a race error because of: The error was: error while evaluating conditional (_cifmw_lvms_storage_cluster_lvmscluster_out.resources | length == 1): ''dict object'' has no attribute ''resources''. ''dict object'' With this we should fix this and make the task more robust --- roles/ci_lvms_storage/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ci_lvms_storage/tasks/main.yml b/roles/ci_lvms_storage/tasks/main.yml index 03bee78c3e..d42e13d87e 100644 --- a/roles/ci_lvms_storage/tasks/main.yml +++ b/roles/ci_lvms_storage/tasks/main.yml @@ -150,7 +150,7 @@ retries: "{{ cifmw_lvms_retries }}" delay: "{{ cifmw_lvms_delay }}" until: - - _cifmw_lvms_storage_cluster_lvmscluster_out.resources | length == 1 + - _cifmw_lvms_storage_cluster_lvmscluster_out.resources | default([]) | length == 1 - _cifmw_lvms_storage_cluster_lvmscluster_out.failed is false - _cifmw_lvms_storage_cluster_lvmscluster_out.resources[0].status is defined - _cifmw_lvms_storage_cluster_lvmscluster_out.resources[0].status.ready is defined From b870dff4a764ae082a7aada0b0433444bc86e2ab Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 26 Aug 2025 19:36:58 +0200 Subject: [PATCH 285/480] Fix cifmw-pod-zuul-files after adding fix_python_encodings role The role was added [1], but the command: make role_molecule was not executed, so the cifmw-pod-zuul-files CI job is failing. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/3218 Signed-off-by: Daniel Pawlik --- zuul.d/molecule.yaml | 9 +++++++++ zuul.d/projects.yaml | 1 + 2 files changed, 10 insertions(+) diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 195b5ad616..60e2eb4ec2 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -922,6 +922,15 @@ - ^.config/molecule/.* name: cifmw-molecule-federation parent: cifmw-molecule-noop +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/fix_python_encodings/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-fix_python_encodings + parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 84625642a1..edc113847d 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -55,6 +55,7 @@ - cifmw-molecule-edpm_prepare - cifmw-molecule-env_op_images - cifmw-molecule-federation + - cifmw-molecule-fix_python_encodings - cifmw-molecule-hci_prepare - cifmw-molecule-hive - cifmw-molecule-idrac_configuration From a57bdbde5891ec73de3b30497447c470e5c777c3 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Mon, 18 Aug 2025 15:24:40 +0200 Subject: [PATCH 286/480] feat: enhance check if local common-requirements.txt exists --- roles/reproducer/tasks/configure_controller.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 14a4bc6028..65748352e3 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -369,6 +369,13 @@ - cifmw_reproducer_src_dir_stat.stat.exists - cifmw_reproducer_src_dir_stat.stat.isdir + - name: Check if local common-requirements.txt exists + delegate_to: localhost + ansible.builtin.stat: + path: "{{ cifmw_reproducer_src_dir }}/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + register: _local_common_requirements_check + run_once: true + - name: Install ansible dependencies register: _async_dep_install async: 600 # 10 minutes should be more than enough @@ -376,8 +383,8 @@ ansible.builtin.pip: requirements: "{{ have_local | ternary(local, remote) }}" vars: - have_local: "{{ cifmw_reproducer_src_dir_stat.stat.exists and cifmw_reproducer_src_dir_stat.stat.isdir }}" - local: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + have_local: "{{ _local_common_requirements_check.stat.exists }}" + local: "{{ cifmw_reproducer_src_dir }}/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" remote: https://raw.githubusercontent.com/openstack-k8s-operators/ci-framework/main/common-requirements.txt - name: Inject most of the cifmw_ parameters passed to the reproducer run From 955c126cb63f4e3459f9adeb69fded8e226d0485 Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Tue, 19 Aug 2025 16:11:54 +0200 Subject: [PATCH 287/480] Move BGP playbooks to ci-fmw These BGP playbooks were duplicated in both ci-fmw and ci-fmw-jobs (downstream project). They had been recently updated in downstream only. With this patch the playbooks are updated in upstream. They will be removed from the downstream repo too. OSPRH-19224 --- playbooks/bgp/discover-hosts-loop.yaml | 24 ++++++++++++++++++++ playbooks/bgp/prepare-bgp-computes.yaml | 4 +++- playbooks/bgp/prepare-bgp-spines-leaves.yaml | 15 +++++++----- playbooks/bgp/templates/leaf-frr.conf.j2 | 3 +++ 4 files changed, 39 insertions(+), 7 deletions(-) create mode 100644 playbooks/bgp/discover-hosts-loop.yaml diff --git a/playbooks/bgp/discover-hosts-loop.yaml b/playbooks/bgp/discover-hosts-loop.yaml new file mode 100644 index 0000000000..7069a398b1 --- /dev/null +++ b/playbooks/bgp/discover-hosts-loop.yaml @@ -0,0 +1,24 @@ +--- +- name: BGP discover hosts + hosts: controller-0 + gather_facts: true + tasks: + - name: Wait for expected number of discovered hypervisor + vars: + num_computes: >- + {{ + groups['r0-computes'] | length + + groups['r1-computes'] | length + + groups['r2-computes'] | length + }} + ansible.builtin.shell: + cmd: > + oc rsh -n openstack nova-cell0-conductor-0 nova-manage cell_v2 discover_hosts > /dev/null && + oc -n openstack rsh openstackclient openstack hypervisor list -f value -c State + register: hv_result + changed_when: false + retries: 50 + delay: 2 + until: + - hv_result.rc == 0 + - hv_result.stdout_lines == ["up"] * (num_computes | int) diff --git a/playbooks/bgp/prepare-bgp-computes.yaml b/playbooks/bgp/prepare-bgp-computes.yaml index c23c69ab3d..401b104785 100644 --- a/playbooks/bgp/prepare-bgp-computes.yaml +++ b/playbooks/bgp/prepare-bgp-computes.yaml @@ -1,6 +1,8 @@ --- - name: Configure computes - hosts: "computes{{ networkers_bool | default(false) | bool | ternary(',networkers', '') }}" + hosts: >- + r0-computes,r1-computes,r2-computes + {{ networkers_bool | default(false) | bool | ternary(',r0-networkers,r1-networkers,r2-networkers', '') }}" tasks: - name: Check default route corresponds with BGP ansible.builtin.command: diff --git a/playbooks/bgp/prepare-bgp-spines-leaves.yaml b/playbooks/bgp/prepare-bgp-spines-leaves.yaml index 7b020507fc..4007d3425b 100644 --- a/playbooks/bgp/prepare-bgp-spines-leaves.yaml +++ b/playbooks/bgp/prepare-bgp-spines-leaves.yaml @@ -39,11 +39,9 @@ sysctl_file: /etc/sysctl.d/sysctl.conf state: present reload: true - loop: "{{ sysctls | dict2items }}" - vars: - sysctls: - net.ipv4.conf.all.rp_filter: '0' - net.ipv4.conf.default.rp_filter: '0' + with_dict: + net.ipv4.conf.all.rp_filter: '0' + net.ipv4.conf.default.rp_filter: '0' register: result retries: 3 timeout: 60 @@ -94,6 +92,10 @@ ansible.builtin.package: name: frr state: present + register: frr_present + retries: 10 + delay: 2 + until: frr_present is success - name: Enable FRR BGP daemon become: true @@ -189,7 +191,8 @@ community.general.nmcli: autoconnect: true conn_name: "{{ router_uplink_conn }}" - ip4: "{{ router_uplink_ip }}/30" + # mask changed to /24 due to https://github.com/openstack-k8s-operators/architecture/pull/466 + ip4: "{{ router_uplink_ip }}/24" method4: manual method6: link-local state: present diff --git a/playbooks/bgp/templates/leaf-frr.conf.j2 b/playbooks/bgp/templates/leaf-frr.conf.j2 index e300f0f8f2..d65b1e84b7 100644 --- a/playbooks/bgp/templates/leaf-frr.conf.j2 +++ b/playbooks/bgp/templates/leaf-frr.conf.j2 @@ -27,7 +27,10 @@ router bgp 64999 neighbor downlink remote-as internal neighbor downlink bfd neighbor downlink bfd profile tripleo +{# TODO: remove the next if when RHEL-63205 is fixed #} +{% if not (fips_mode | bool) %} neighbor downlink password f00barZ +{% endif %} ! neighbor downlink capability extended-nexthop {% for iface in downlink_interfaces %} neighbor {{iface}} interface peer-group downlink From 8b753f533cfa3bb2121df8b31d673ae72aca4f2d Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 23 Jul 2025 18:01:00 +0200 Subject: [PATCH 288/480] Move Ceph playbook into hooks; symlink ceph play to old location We would like to avoid tasks, where we do 'import_playbook'. In that case, instead of split ceph playbook into role (now), let's keep the playbook as it is (because it is used in few places) and run that play using hooks, like we do with other additional services. Until migration process is not done (many projects are using that playbook), there is a symlink done to old location, so we should avoid outage. Related-to: https://github.com/openstack-k8s-operators/install_yamls/pull/1083 Related-to: https://github.com/openstack-k8s-operators/architecture/pull/604 Signed-off-by: Daniel Pawlik --- deploy-edpm.yml | 26 +- hooks/playbooks/README.md | 17 + hooks/playbooks/ceph-bm.yml | 4 +- hooks/playbooks/ceph.yml | 526 ++++++++++++++++++++++ playbooks/06-deploy-edpm.yml | 4 +- playbooks/ceph.yml | 527 +---------------------- roles/ci_dcn_site/tasks/ceph.yml | 2 +- roles/cifmw_cephadm/README.md | 44 +- roles/hci_prepare/README.md | 2 +- scenarios/centos-9/hci_ceph_backends.yml | 7 + zuul.d/edpm.yaml | 2 + zuul.d/edpm_multinode.yaml | 2 +- 12 files changed, 609 insertions(+), 554 deletions(-) create mode 100644 hooks/playbooks/ceph.yml mode change 100644 => 120000 playbooks/ceph.yml diff --git a/deploy-edpm.yml b/deploy-edpm.yml index db604b65ba..eee017477d 100644 --- a/deploy-edpm.yml +++ b/deploy-edpm.yml @@ -111,17 +111,21 @@ when: cifmw_edpm_deploy_hci | default(false) | bool ansible.builtin.meta: clear_facts -# TODO: replace this import_playbook with cifmw_ceph role -- name: Deploy Ceph on target nodes - vars: - _deploy_ceph: >- - {{ - (cifmw_edpm_deploy_hci | default(false) | bool) and - cifmw_architecture_scenario is undefined - }} - storage_network_range: 172.18.0.0/24 - storage_mgmt_network_range: 172.20.0.0/24 - ansible.builtin.import_playbook: playbooks/ceph.yml +- name: Deploy ceph using hooks + hosts: "{{ cifmw_target_host | default('localhost') }}" + tasks: + - name: Run post_ceph hooks - deploy Ceph on target nodes + vars: + step: post_ceph + _deploy_ceph: >- + {{ + (cifmw_edpm_deploy_hci | default(false) | bool) and + cifmw_architecture_scenario is undefined + }} + storage_network_range: 172.18.0.0/24 + storage_mgmt_network_range: 172.20.0.0/24 + ansible.builtin.import_role: + name: run_hook - name: Continue HCI deploy, deploy architecture and validate workflow hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/hooks/playbooks/README.md b/hooks/playbooks/README.md index 6c67966bb9..56e64b60b6 100644 --- a/hooks/playbooks/README.md +++ b/hooks/playbooks/README.md @@ -7,6 +7,23 @@ This hook allow to deploy the "toy ceph" as explained [here](https://github.com/ consumers (base64 encoded). * `cifmw_ceph_fsid`: ceph FSID generated by the ceph installation. +## ceph.yml +This playbook was moved from `playbooks/ceph.yml` to `hooks/playbooks` location +on removing "import_playbook" usage in ci-framework project. +### Input +* `cifmw_admin_user`: (string) The administrative user account +* `cifmw_ceph_target`: (string) The target host or node where the Ceph cluster will be deployed +* `storage_network_range`: (string) The IP address range for the Ceph public network +* `storage_mgmt_network_range`: (string) The IP address range for the Ceph cluster management network +* `cifmw_cephadm_pools`: (list) A list of Ceph storage pools to be created during deployment +* `cifmw_cephadm_keys`: (list): A list of Ceph client keys to be generated for accessing the Ceph cluster +* `cifmw_cephadm_vip`: (string): The virtual IP address for the Ceph monitor +* `cifmw_cephadm_certificate` (string): The path to or content of the SSL certificate +* `cifmw_cephadm_key`: (string) The path to or content of the SSL key associated with the cifmw_cephadm_certificate. +* `cifmw_cephadm_cluster`: (string) The name or identifier of the Ceph cluster to be deployed +### Output +None + ## kustomize_cr.yml This hook enables customization of CR files, using oc kustomize. ### Input diff --git a/hooks/playbooks/ceph-bm.yml b/hooks/playbooks/ceph-bm.yml index c82c9f3a55..fb2e8270b6 100644 --- a/hooks/playbooks/ceph-bm.yml +++ b/hooks/playbooks/ceph-bm.yml @@ -35,5 +35,7 @@ {% endfor -%} {{ hosts }} +# NOTE: hooks would not call run_hooks role. +# Run playbook directly. - name: Deploy Ceph on target nodes - ansible.builtin.import_playbook: ../../playbooks/ceph.yml + ansible.builtin.import_playbook: ceph.yml diff --git a/hooks/playbooks/ceph.yml b/hooks/playbooks/ceph.yml new file mode 100644 index 0000000000..dee6093e77 --- /dev/null +++ b/hooks/playbooks/ceph.yml @@ -0,0 +1,526 @@ +--- +# Copyright 2023 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Create local SSH keypair + tags: keypair + hosts: localhost + gather_facts: false + vars: + cifmw_admin_user: ceph-admin + pre_tasks: + # end_play will end this current playbook and go the the next + # imported play. + - name: Early stop ceph related work + when: + - not _deploy_ceph | default(true) + ansible.builtin.meta: end_play + tasks: + - name: Set ssh key path facts + ansible.builtin.set_fact: + private_key: "{{ lookup('env', 'HOME') }}/.ssh/{{ cifmw_admin_user }}-id_rsa" + public_key: "{{ lookup('env', 'HOME') }}/.ssh/{{ cifmw_admin_user }}-id_rsa.pub" + run_once: true # noqa: run-once[task] + + - name: Stat private key + ansible.builtin.stat: + path: "{{ private_key }}" + register: private_key_stat + + - name: Stat public key + ansible.builtin.stat: + path: "{{ public_key }}" + register: public_key_stat + + - name: Create private key if it does not exist + ansible.builtin.command: + cmd: "ssh-keygen -t rsa -q -N '' -f {{ private_key }}" + no_log: true + when: + - not private_key_stat.stat.exists + + - name: Create public key if it does not exist + ansible.builtin.shell: "ssh-keygen -y -f {{ private_key }} > {{ public_key }}" + when: + - not public_key_stat.stat.exists + +- name: Distribute SSH keypair to target nodes + tags: admin + hosts: "{{ cifmw_ceph_target | default('computes') }}" + gather_facts: false + become: true + vars: + cifmw_admin_user: ceph-admin + _target_group: "{{ cifmw_ceph_target | default('computes') }}" + _target: "{{ groups[_target_group] | default([]) | first }}" + ansible_ssh_private_key_file: >- + {{ + hostvars[_target]['ansible_ssh_private_key_file'] | + default(lookup('env', 'ANSIBLE_SSH_PRIVATE_KEY')) + }} + pre_tasks: + # end_play will end this current playbook and go the the next + # imported play. + - name: Early stop ceph related work + when: + - not _deploy_ceph | default(true) + ansible.builtin.meta: end_play + + - name: Get local private key + ansible.builtin.slurp: + src: "{{ lookup('env', 'HOME') }}/.ssh/{{ cifmw_admin_user }}-id_rsa" + register: private_key_get + delegate_to: localhost + no_log: true + + - name: Get local public key + ansible.builtin.slurp: + src: "{{ lookup('env', 'HOME') }}/.ssh/{{ cifmw_admin_user }}-id_rsa.pub" + register: public_key_get + delegate_to: localhost + roles: + - role: cifmw_create_admin + cifmw_admin_user: ceph-admin + cifmw_admin_pubkey: "{{ public_key_get['content'] | b64decode }}" + cifmw_admin_prikey: "{{ private_key_get['content'] | b64decode }}" + cifmw_admin_distribute_private_key: true + no_log: true + +- name: Create Block Device on target nodes + tags: block + hosts: "{{ cifmw_ceph_target | default('computes') }}" + gather_facts: true + become: true + pre_tasks: + # If ceph is not being deployed, then skip this play + # Or if cifmw_ceph_spec_data_devices is overridden, then skip this play + # Assume cifmw_ceph_spec_data_devices implies block devices are already present + # end_play will end this current playbook and go the the next + # imported play. + - name: Early stop ceph related work + when: + - not _deploy_ceph | default(true) or + (cifmw_ceph_spec_data_devices is defined and + cifmw_ceph_spec_data_devices | length > 0) + ansible.builtin.meta: end_play + tasks: + - name: Set cifmw_num_osds_perhost + # By defualt 1 OSD is created per node in case of multinode. + # 3 OSDS will be created for single node env to accomodate + # more ceph resources and avoid PG errors. + ansible.builtin.set_fact: + cifmw_num_osds_perhost: | + {% if groups[cifmw_ceph_target | default('computes')] | length == 1 %} + {% set num_osds = 3 %} + {% else %} + {% set num_osds = 1 %} + {% endif %} + {{ num_osds }} + - name: Create Block Device on EDPM Nodes + vars: + _target_group: "{{ cifmw_ceph_target | default('computes') }}" + _target: "{{ groups[_target_group] | default([]) | first }}" + ansible_ssh_private_key_file: >- + {{ + hostvars[_target]['ansible_ssh_private_key_file'] | + default(lookup('env', 'ANSIBLE_SSH_PRIVATE_KEY')) + }} + cifmw_block_device_image_file: /var/lib/ceph-osd-{{ i }}.img + cifmw_block_device_loop: /dev/loop{{ i + 3 }} + cifmw_block_lv_name: ceph_lv{{ i }} + cifmw_block_vg_name: ceph_vg{{ i }} + cifmw_block_systemd_unit_file: /etc/systemd/system/ceph-osd-losetup-{{ i }}.service + ansible.builtin.include_role: + name: cifmw_block_device + loop_control: + loop_var: i + loop: "{{ range(0, cifmw_num_osds_perhost|int) }}" + +- name: Build Ceph spec and conf from gathered IPs of the target inventory group + tags: spec + hosts: localhost + gather_facts: true + pre_tasks: + # end_play will end this current playbook and go the the next + # imported play. + - name: Early stop ceph related work + when: + - not _deploy_ceph | default(true) + ansible.builtin.meta: end_play + + - name: Set IPv4 facts + when: + - ansible_all_ipv4_addresses | length > 0 + - not ceph_ipv6 | default(false) + ansible.builtin.set_fact: + ssh_network_range: 192.168.122.0/24 + # storage_network_range: 172.18.0.0/24 + storage_mgmt_network_range: 172.20.0.0/24 + all_addresses: ansible_all_ipv4_addresses + ms_bind_ipv4: true + ms_bind_ipv6: false + + - name: Set IPv6 facts + when: + - ansible_all_ipv6_addresses | length > 0 + - ceph_ipv6 | default(false) + ansible.builtin.set_fact: + ssh_network_range: "2620:cf:cf:aaaa::/64" + # storage_network_range: "2620:cf:cf:cccc::/64" + storage_mgmt_network_range: "2620:cf:cf:dddd::/64" + all_addresses: ansible_all_ipv6_addresses + ms_bind_ipv4: false + ms_bind_ipv6: true + + - name: Build a dict mapping hostname to its IP which is in management network range + ansible.builtin.set_fact: + host_to_ip: + "{{ host_to_ip | default({}) | + combine( + { + item : + hostvars[item][all_addresses] | ansible.utils.ipaddr(ssh_network_range) | first + } + ) + }}" + delegate_to: "{{ item }}" + loop: "{{ groups[cifmw_ceph_target | default('computes')] | default([]) }}" + + - name: Load network ranges from Networking Environment Definition if not provided + when: >- + storage_network_range is not defined or + storage_mgmt_network_range is not defined + block: + - name: Load Networking Environment Definition + vars: + cifmw_networking_mapper_assert_env_load: false + ansible.builtin.import_role: + name: networking_mapper + tasks_from: load_env_definition.yml + + - name: Set IPv4 network ranges vars + when: + - cifmw_networking_env_definition is defined + - ansible_all_ipv4_addresses | length > 0 + - not ceph_ipv6 | default(false) + ansible.builtin.set_fact: + storage_network_range: >- + {{ + cifmw_networking_env_definition.networks.storage.network_v4 + }} + storage_mgmt_network_range: >- + {{ + cifmw_networking_env_definition.networks.storagemgmt.network_v4 + }} + + - name: Set IPv6 network ranges vars + when: + - cifmw_networking_env_definition is defined + - ansible_all_ipv6_addresses | length > 0 + - ceph_ipv6 | default(false) + ansible.builtin.set_fact: + storage_network_range: >- + {{ + cifmw_networking_env_definition.networks.storage.network_v6 + }} + storage_mgmt_network_range: >- + {{ + cifmw_networking_env_definition.networks.storagemgmt.network_v6 + }} + + roles: + - role: cifmw_ceph_spec + cifmw_ceph_spec_host_to_ip: "{{ host_to_ip }}" + cifmw_ceph_spec_public_network: "{{ storage_network_range | default(ssh_network_range) }}" + cifmw_ceph_spec_private_network: "{{ storage_mgmt_network_range | default('') }}" + +- name: Fetch network facts of all computes + tags: cephadm + hosts: "{{ groups[cifmw_ceph_target | default('computes')] | default([]) }}" + gather_facts: false + pre_tasks: + # end_play will end this current playbook and go the the next + # imported play. + - name: Early stop ceph related work + when: + - not _deploy_ceph | default(true) + ansible.builtin.meta: end_play + tasks: + - name: Fetch network facts of all computes + ansible.builtin.setup: + gather_subset: + - "!all" + - "!min" + - network + +- name: Bootstrap Ceph and apply spec + tags: cephadm + hosts: "{{ (groups[cifmw_ceph_target | default('computes')] | default([]))[:1] }}" + gather_facts: false + vars: + _target_hosts: "{{ groups[cifmw_ceph_target | default('computes')] | default([]) }}" + _target: "{{ _target_hosts | first }}" + ansible_ssh_private_key_file: >- + {{ + hostvars[_target]['ansible_ssh_private_key_file'] | + default(lookup('env', 'ANSIBLE_SSH_PRIVATE_KEY')) + }} + cifmw_cephadm_spec_ansible_host: /tmp/ceph_spec.yml + cifmw_cephadm_bootstrap_conf: /tmp/initial_ceph.conf + cifmw_ceph_client_vars: /tmp/ceph_client.yml + cifmw_cephadm_default_container: true + cifmw_cephadm_pools: + - name: vms + pg_autoscale_mode: true + target_size_ratio: 0.2 + application: rbd + - name: volumes + pg_autoscale_mode: true + target_size_ratio: 0.3 + application: rbd + trash_purge_enabled: true + - name: backups + pg_autoscale_mode: true + target_size_ratio: 0.1 + application: rbd + - name: images + target_size_ratio: 0.2 + pg_autoscale_mode: true + application: rbd + - name: cephfs.cephfs.meta + target_size_ratio: 0.1 + pg_autoscale_mode: true + application: cephfs + - name: cephfs.cephfs.data + target_size_ratio: 0.1 + pg_autoscale_mode: true + application: cephfs + pre_tasks: + # end_play will end this current playbook and go the the next + # imported play. + - name: Early stop ceph related work + when: + - not _deploy_ceph | default(true) + ansible.builtin.meta: end_play + + - name: Set IPv4 facts + when: + - ansible_all_ipv4_addresses | length > 0 + - not ceph_ipv6 | default(false) + ansible.builtin.set_fact: + all_addresses: ansible_all_ipv4_addresses + cidr: 24 + + - name: Set IPv6 facts + when: + - ansible_all_ipv6_addresses | length > 0 + - ceph_ipv6 | default(false) + ansible.builtin.set_fact: + all_addresses: ansible_all_ipv6_addresses + cidr: 64 + + - name: Generate a cephx key + cephx_key: + register: cephx + no_log: true + + - name: Set cifmw_cephadm_keys with the cephx key and cifmw_cephadm_pools + ansible.builtin.set_fact: + cifmw_cephadm_keys: + - name: client.openstack + key: "{{ cephx.key }}" + mode: '0600' + caps: + mgr: allow * + mon: profile rbd + osd: "{{ pools | map('regex_replace', '^(.*)$', + 'profile rbd pool=\\1') | join(', ') }}" + vars: + pools: "{{ cifmw_cephadm_pools | map(attribute='name') | list }}" + no_log: true + + # for deploying external ceph for 17.1 using cifmw, we need this playbook to create keyring + # for manila client and manila_data pool + - name: Add client.manila key and manila_data pool for tripleo deployment + ansible.builtin.set_fact: + cifmw_cephadm_keys: "{{ cifmw_cephadm_keys + [ manila_key ] }}" + cifmw_cephadm_pools: "{{ cifmw_cephadm_pools + [ manila_pool ] }}" + vars: + manila_key: + name: client.manila + key: "{{ cephx.key }}" + mode: '0600' + caps: + mgr: allow rw + mon: allow r + osd: allow rw pool=manila_data + manila_pool: + name: manila_data + target_size_ratio: 0.1 + pg_autoscale_mode: true + application: cephfs + when: adoption_deploy_ceph_for_tripleo | default (false) + no_log: true + + # public network always exist because is provided by the ceph_spec role + - name: Get Storage network range + ansible.builtin.set_fact: + cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" + + - name: Set IP address of first monitor + ansible.builtin.set_fact: + cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first }}" + vars: + this_host: "{{ _target_hosts | first }}" + + - name: Assert if any EDPM nodes n/w interface is missing in storage network + ansible.builtin.assert: + that: + - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | length > 0 + fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_rgw_network }}" + loop: "{{ _target_hosts }}" + + - name: Get already assigned IP addresses + ansible.builtin.set_fact: + ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first ] }}" + loop: "{{ _target_hosts }}" + + # cifmw_cephadm_vip is the VIP reserved in the Storage network + - name: Set VIP var as empty string + ansible.builtin.set_fact: + cifmw_cephadm_vip: "" + + - name: Process VIP + ansible.builtin.include_role: + name: cifmw_cephadm + tasks_from: check_vip + loop: "{{ range(1, (ips | length) + 1) | list }}" + + tasks: + - name: Satisfy Ceph prerequisites + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: pre + + - name: Bootstrap Ceph + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: bootstrap + + - name: Ensure that Ceph orchestrator is responsive + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: monitor_ceph_orch + + - name: Apply Ceph spec + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: apply_spec + + - name: Create ceph pools + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: pools + + - name: Deploy RGW + when: cifmw_ceph_daemons_layout.rgw_enabled | default(true) | bool + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: rgw + vars: + # cifmw_cephadm_vip is computed or passed as an override via -e @extra.yml + cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}" + + - name: Configure Monitoring Stack + when: cifmw_ceph_daemons_layout.dashboard_enabled | default(false) | bool + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: monitoring + vars: + cifmw_cephadm_monitoring_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" + cifmw_cephadm_dashboard_crt: "{{ cifmw_cephadm_certificate }}" + cifmw_cephadm_dashboard_key: "{{ cifmw_cephadm_key }}" + + - name: Create cephfs volume + when: (cifmw_ceph_daemons_layout.cephfs_enabled | default(true) | bool) or + (cifmw_ceph_daemons_layout.ceph_nfs_enabled | default(false) | bool) + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: cephfs + + - name: Deploy cephnfs + when: cifmw_ceph_daemons_layout.ceph_nfs_enabled | default(false) | bool + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: cephnfs + vars: + # we reuse the same VIP reserved for rgw + cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" + + - name: Deploy rbd-mirror + when: cifmw_ceph_daemons_layout.ceph_rbd_mirror_enabled | default(false) | bool + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: rbd_mirror + + - name: Create Cephx Keys for OpenStack + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: keys + + - name: Export configuration as vars for cifmw_ceph_client + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: export + + - name: Ensure that Ceph orchestrator is responsive + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: monitor_ceph_orch + + - name: Show the Ceph cluster status + ansible.builtin.import_role: + name: cifmw_cephadm + tasks_from: post + vars: + cifmw_cephadm_dashboard_crt: "{{ cifmw_cephadm_certificate }}" + cifmw_cephadm_dashboard_key: "{{ cifmw_cephadm_key }}" + +- name: Render Ceph client configuration + tags: client + hosts: localhost + gather_facts: false + vars: + cifmw_ceph_client_vars: /tmp/ceph_client.yml + cifmw_ceph_client_fetch_dir: /tmp + cifmw_ceph_client_k8s_secret_name: ceph-conf-files + cifmw_ceph_client_k8s_namespace: openstack + cifmw_ceph_client_cluster: "{{ cifmw_cephadm_cluster | default('ceph') }}" + pre_tasks: + # end_play will end this current playbook and go the the next + # imported play. + - name: Early stop ceph related work + when: + - not _deploy_ceph | default(true) + ansible.builtin.meta: end_play + tasks: + - name: Export configuration for ceph client + ansible.builtin.import_role: + name: cifmw_ceph_client + - name: Output usage + ansible.builtin.debug: + msg: >- + Import ceph secret into k8s + 'kubectl create -f /tmp/k8s_ceph_secret.yml' diff --git a/playbooks/06-deploy-edpm.yml b/playbooks/06-deploy-edpm.yml index 4a36f3635b..2871748076 100644 --- a/playbooks/06-deploy-edpm.yml +++ b/playbooks/06-deploy-edpm.yml @@ -131,6 +131,8 @@ when: cifmw_edpm_deploy_hci | default(false) | bool ansible.builtin.meta: clear_facts +# NOTE: This playbook would be removed soon. +# Here is more like workaround to pass CI. - name: Deploy Ceph on target nodes vars: _deploy_ceph: >- @@ -140,7 +142,7 @@ }} storage_network_range: 172.18.0.0/24 storage_mgmt_network_range: 172.20.0.0/24 - ansible.builtin.import_playbook: ceph.yml + ansible.builtin.import_playbook: ../hooks/playbooks/ceph.yml - name: Continue HCI deploy hosts: "{{ cifmw_target_host | default('localhost') }}" diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml deleted file mode 100644 index dee6093e77..0000000000 --- a/playbooks/ceph.yml +++ /dev/null @@ -1,526 +0,0 @@ ---- -# Copyright 2023 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -- name: Create local SSH keypair - tags: keypair - hosts: localhost - gather_facts: false - vars: - cifmw_admin_user: ceph-admin - pre_tasks: - # end_play will end this current playbook and go the the next - # imported play. - - name: Early stop ceph related work - when: - - not _deploy_ceph | default(true) - ansible.builtin.meta: end_play - tasks: - - name: Set ssh key path facts - ansible.builtin.set_fact: - private_key: "{{ lookup('env', 'HOME') }}/.ssh/{{ cifmw_admin_user }}-id_rsa" - public_key: "{{ lookup('env', 'HOME') }}/.ssh/{{ cifmw_admin_user }}-id_rsa.pub" - run_once: true # noqa: run-once[task] - - - name: Stat private key - ansible.builtin.stat: - path: "{{ private_key }}" - register: private_key_stat - - - name: Stat public key - ansible.builtin.stat: - path: "{{ public_key }}" - register: public_key_stat - - - name: Create private key if it does not exist - ansible.builtin.command: - cmd: "ssh-keygen -t rsa -q -N '' -f {{ private_key }}" - no_log: true - when: - - not private_key_stat.stat.exists - - - name: Create public key if it does not exist - ansible.builtin.shell: "ssh-keygen -y -f {{ private_key }} > {{ public_key }}" - when: - - not public_key_stat.stat.exists - -- name: Distribute SSH keypair to target nodes - tags: admin - hosts: "{{ cifmw_ceph_target | default('computes') }}" - gather_facts: false - become: true - vars: - cifmw_admin_user: ceph-admin - _target_group: "{{ cifmw_ceph_target | default('computes') }}" - _target: "{{ groups[_target_group] | default([]) | first }}" - ansible_ssh_private_key_file: >- - {{ - hostvars[_target]['ansible_ssh_private_key_file'] | - default(lookup('env', 'ANSIBLE_SSH_PRIVATE_KEY')) - }} - pre_tasks: - # end_play will end this current playbook and go the the next - # imported play. - - name: Early stop ceph related work - when: - - not _deploy_ceph | default(true) - ansible.builtin.meta: end_play - - - name: Get local private key - ansible.builtin.slurp: - src: "{{ lookup('env', 'HOME') }}/.ssh/{{ cifmw_admin_user }}-id_rsa" - register: private_key_get - delegate_to: localhost - no_log: true - - - name: Get local public key - ansible.builtin.slurp: - src: "{{ lookup('env', 'HOME') }}/.ssh/{{ cifmw_admin_user }}-id_rsa.pub" - register: public_key_get - delegate_to: localhost - roles: - - role: cifmw_create_admin - cifmw_admin_user: ceph-admin - cifmw_admin_pubkey: "{{ public_key_get['content'] | b64decode }}" - cifmw_admin_prikey: "{{ private_key_get['content'] | b64decode }}" - cifmw_admin_distribute_private_key: true - no_log: true - -- name: Create Block Device on target nodes - tags: block - hosts: "{{ cifmw_ceph_target | default('computes') }}" - gather_facts: true - become: true - pre_tasks: - # If ceph is not being deployed, then skip this play - # Or if cifmw_ceph_spec_data_devices is overridden, then skip this play - # Assume cifmw_ceph_spec_data_devices implies block devices are already present - # end_play will end this current playbook and go the the next - # imported play. - - name: Early stop ceph related work - when: - - not _deploy_ceph | default(true) or - (cifmw_ceph_spec_data_devices is defined and - cifmw_ceph_spec_data_devices | length > 0) - ansible.builtin.meta: end_play - tasks: - - name: Set cifmw_num_osds_perhost - # By defualt 1 OSD is created per node in case of multinode. - # 3 OSDS will be created for single node env to accomodate - # more ceph resources and avoid PG errors. - ansible.builtin.set_fact: - cifmw_num_osds_perhost: | - {% if groups[cifmw_ceph_target | default('computes')] | length == 1 %} - {% set num_osds = 3 %} - {% else %} - {% set num_osds = 1 %} - {% endif %} - {{ num_osds }} - - name: Create Block Device on EDPM Nodes - vars: - _target_group: "{{ cifmw_ceph_target | default('computes') }}" - _target: "{{ groups[_target_group] | default([]) | first }}" - ansible_ssh_private_key_file: >- - {{ - hostvars[_target]['ansible_ssh_private_key_file'] | - default(lookup('env', 'ANSIBLE_SSH_PRIVATE_KEY')) - }} - cifmw_block_device_image_file: /var/lib/ceph-osd-{{ i }}.img - cifmw_block_device_loop: /dev/loop{{ i + 3 }} - cifmw_block_lv_name: ceph_lv{{ i }} - cifmw_block_vg_name: ceph_vg{{ i }} - cifmw_block_systemd_unit_file: /etc/systemd/system/ceph-osd-losetup-{{ i }}.service - ansible.builtin.include_role: - name: cifmw_block_device - loop_control: - loop_var: i - loop: "{{ range(0, cifmw_num_osds_perhost|int) }}" - -- name: Build Ceph spec and conf from gathered IPs of the target inventory group - tags: spec - hosts: localhost - gather_facts: true - pre_tasks: - # end_play will end this current playbook and go the the next - # imported play. - - name: Early stop ceph related work - when: - - not _deploy_ceph | default(true) - ansible.builtin.meta: end_play - - - name: Set IPv4 facts - when: - - ansible_all_ipv4_addresses | length > 0 - - not ceph_ipv6 | default(false) - ansible.builtin.set_fact: - ssh_network_range: 192.168.122.0/24 - # storage_network_range: 172.18.0.0/24 - storage_mgmt_network_range: 172.20.0.0/24 - all_addresses: ansible_all_ipv4_addresses - ms_bind_ipv4: true - ms_bind_ipv6: false - - - name: Set IPv6 facts - when: - - ansible_all_ipv6_addresses | length > 0 - - ceph_ipv6 | default(false) - ansible.builtin.set_fact: - ssh_network_range: "2620:cf:cf:aaaa::/64" - # storage_network_range: "2620:cf:cf:cccc::/64" - storage_mgmt_network_range: "2620:cf:cf:dddd::/64" - all_addresses: ansible_all_ipv6_addresses - ms_bind_ipv4: false - ms_bind_ipv6: true - - - name: Build a dict mapping hostname to its IP which is in management network range - ansible.builtin.set_fact: - host_to_ip: - "{{ host_to_ip | default({}) | - combine( - { - item : - hostvars[item][all_addresses] | ansible.utils.ipaddr(ssh_network_range) | first - } - ) - }}" - delegate_to: "{{ item }}" - loop: "{{ groups[cifmw_ceph_target | default('computes')] | default([]) }}" - - - name: Load network ranges from Networking Environment Definition if not provided - when: >- - storage_network_range is not defined or - storage_mgmt_network_range is not defined - block: - - name: Load Networking Environment Definition - vars: - cifmw_networking_mapper_assert_env_load: false - ansible.builtin.import_role: - name: networking_mapper - tasks_from: load_env_definition.yml - - - name: Set IPv4 network ranges vars - when: - - cifmw_networking_env_definition is defined - - ansible_all_ipv4_addresses | length > 0 - - not ceph_ipv6 | default(false) - ansible.builtin.set_fact: - storage_network_range: >- - {{ - cifmw_networking_env_definition.networks.storage.network_v4 - }} - storage_mgmt_network_range: >- - {{ - cifmw_networking_env_definition.networks.storagemgmt.network_v4 - }} - - - name: Set IPv6 network ranges vars - when: - - cifmw_networking_env_definition is defined - - ansible_all_ipv6_addresses | length > 0 - - ceph_ipv6 | default(false) - ansible.builtin.set_fact: - storage_network_range: >- - {{ - cifmw_networking_env_definition.networks.storage.network_v6 - }} - storage_mgmt_network_range: >- - {{ - cifmw_networking_env_definition.networks.storagemgmt.network_v6 - }} - - roles: - - role: cifmw_ceph_spec - cifmw_ceph_spec_host_to_ip: "{{ host_to_ip }}" - cifmw_ceph_spec_public_network: "{{ storage_network_range | default(ssh_network_range) }}" - cifmw_ceph_spec_private_network: "{{ storage_mgmt_network_range | default('') }}" - -- name: Fetch network facts of all computes - tags: cephadm - hosts: "{{ groups[cifmw_ceph_target | default('computes')] | default([]) }}" - gather_facts: false - pre_tasks: - # end_play will end this current playbook and go the the next - # imported play. - - name: Early stop ceph related work - when: - - not _deploy_ceph | default(true) - ansible.builtin.meta: end_play - tasks: - - name: Fetch network facts of all computes - ansible.builtin.setup: - gather_subset: - - "!all" - - "!min" - - network - -- name: Bootstrap Ceph and apply spec - tags: cephadm - hosts: "{{ (groups[cifmw_ceph_target | default('computes')] | default([]))[:1] }}" - gather_facts: false - vars: - _target_hosts: "{{ groups[cifmw_ceph_target | default('computes')] | default([]) }}" - _target: "{{ _target_hosts | first }}" - ansible_ssh_private_key_file: >- - {{ - hostvars[_target]['ansible_ssh_private_key_file'] | - default(lookup('env', 'ANSIBLE_SSH_PRIVATE_KEY')) - }} - cifmw_cephadm_spec_ansible_host: /tmp/ceph_spec.yml - cifmw_cephadm_bootstrap_conf: /tmp/initial_ceph.conf - cifmw_ceph_client_vars: /tmp/ceph_client.yml - cifmw_cephadm_default_container: true - cifmw_cephadm_pools: - - name: vms - pg_autoscale_mode: true - target_size_ratio: 0.2 - application: rbd - - name: volumes - pg_autoscale_mode: true - target_size_ratio: 0.3 - application: rbd - trash_purge_enabled: true - - name: backups - pg_autoscale_mode: true - target_size_ratio: 0.1 - application: rbd - - name: images - target_size_ratio: 0.2 - pg_autoscale_mode: true - application: rbd - - name: cephfs.cephfs.meta - target_size_ratio: 0.1 - pg_autoscale_mode: true - application: cephfs - - name: cephfs.cephfs.data - target_size_ratio: 0.1 - pg_autoscale_mode: true - application: cephfs - pre_tasks: - # end_play will end this current playbook and go the the next - # imported play. - - name: Early stop ceph related work - when: - - not _deploy_ceph | default(true) - ansible.builtin.meta: end_play - - - name: Set IPv4 facts - when: - - ansible_all_ipv4_addresses | length > 0 - - not ceph_ipv6 | default(false) - ansible.builtin.set_fact: - all_addresses: ansible_all_ipv4_addresses - cidr: 24 - - - name: Set IPv6 facts - when: - - ansible_all_ipv6_addresses | length > 0 - - ceph_ipv6 | default(false) - ansible.builtin.set_fact: - all_addresses: ansible_all_ipv6_addresses - cidr: 64 - - - name: Generate a cephx key - cephx_key: - register: cephx - no_log: true - - - name: Set cifmw_cephadm_keys with the cephx key and cifmw_cephadm_pools - ansible.builtin.set_fact: - cifmw_cephadm_keys: - - name: client.openstack - key: "{{ cephx.key }}" - mode: '0600' - caps: - mgr: allow * - mon: profile rbd - osd: "{{ pools | map('regex_replace', '^(.*)$', - 'profile rbd pool=\\1') | join(', ') }}" - vars: - pools: "{{ cifmw_cephadm_pools | map(attribute='name') | list }}" - no_log: true - - # for deploying external ceph for 17.1 using cifmw, we need this playbook to create keyring - # for manila client and manila_data pool - - name: Add client.manila key and manila_data pool for tripleo deployment - ansible.builtin.set_fact: - cifmw_cephadm_keys: "{{ cifmw_cephadm_keys + [ manila_key ] }}" - cifmw_cephadm_pools: "{{ cifmw_cephadm_pools + [ manila_pool ] }}" - vars: - manila_key: - name: client.manila - key: "{{ cephx.key }}" - mode: '0600' - caps: - mgr: allow rw - mon: allow r - osd: allow rw pool=manila_data - manila_pool: - name: manila_data - target_size_ratio: 0.1 - pg_autoscale_mode: true - application: cephfs - when: adoption_deploy_ceph_for_tripleo | default (false) - no_log: true - - # public network always exist because is provided by the ceph_spec role - - name: Get Storage network range - ansible.builtin.set_fact: - cifmw_cephadm_rgw_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" - - - name: Set IP address of first monitor - ansible.builtin.set_fact: - cifmw_cephadm_first_mon_ip: "{{ hostvars[this_host][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first }}" - vars: - this_host: "{{ _target_hosts | first }}" - - - name: Assert if any EDPM nodes n/w interface is missing in storage network - ansible.builtin.assert: - that: - - hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | length > 0 - fail_msg: "node {{ item }} doesn't have any interface connected to network {{ cifmw_cephadm_rgw_network }}" - loop: "{{ _target_hosts }}" - - - name: Get already assigned IP addresses - ansible.builtin.set_fact: - ips: "{{ ips | default([]) + [ hostvars[item][all_addresses] | ansible.utils.ipaddr(cifmw_cephadm_rgw_network) | first ] }}" - loop: "{{ _target_hosts }}" - - # cifmw_cephadm_vip is the VIP reserved in the Storage network - - name: Set VIP var as empty string - ansible.builtin.set_fact: - cifmw_cephadm_vip: "" - - - name: Process VIP - ansible.builtin.include_role: - name: cifmw_cephadm - tasks_from: check_vip - loop: "{{ range(1, (ips | length) + 1) | list }}" - - tasks: - - name: Satisfy Ceph prerequisites - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: pre - - - name: Bootstrap Ceph - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: bootstrap - - - name: Ensure that Ceph orchestrator is responsive - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: monitor_ceph_orch - - - name: Apply Ceph spec - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: apply_spec - - - name: Create ceph pools - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: pools - - - name: Deploy RGW - when: cifmw_ceph_daemons_layout.rgw_enabled | default(true) | bool - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: rgw - vars: - # cifmw_cephadm_vip is computed or passed as an override via -e @extra.yml - cifmw_cephadm_rgw_vip: "{{ cifmw_cephadm_vip }}" - - - name: Configure Monitoring Stack - when: cifmw_ceph_daemons_layout.dashboard_enabled | default(false) | bool - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: monitoring - vars: - cifmw_cephadm_monitoring_network: "{{ lookup('ansible.builtin.ini', 'public_network section=global file=' ~ cifmw_cephadm_bootstrap_conf) }}" - cifmw_cephadm_dashboard_crt: "{{ cifmw_cephadm_certificate }}" - cifmw_cephadm_dashboard_key: "{{ cifmw_cephadm_key }}" - - - name: Create cephfs volume - when: (cifmw_ceph_daemons_layout.cephfs_enabled | default(true) | bool) or - (cifmw_ceph_daemons_layout.ceph_nfs_enabled | default(false) | bool) - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: cephfs - - - name: Deploy cephnfs - when: cifmw_ceph_daemons_layout.ceph_nfs_enabled | default(false) | bool - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: cephnfs - vars: - # we reuse the same VIP reserved for rgw - cifmw_cephadm_nfs_vip: "{{ cifmw_cephadm_vip }}/{{ cidr }}" - - - name: Deploy rbd-mirror - when: cifmw_ceph_daemons_layout.ceph_rbd_mirror_enabled | default(false) | bool - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: rbd_mirror - - - name: Create Cephx Keys for OpenStack - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: keys - - - name: Export configuration as vars for cifmw_ceph_client - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: export - - - name: Ensure that Ceph orchestrator is responsive - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: monitor_ceph_orch - - - name: Show the Ceph cluster status - ansible.builtin.import_role: - name: cifmw_cephadm - tasks_from: post - vars: - cifmw_cephadm_dashboard_crt: "{{ cifmw_cephadm_certificate }}" - cifmw_cephadm_dashboard_key: "{{ cifmw_cephadm_key }}" - -- name: Render Ceph client configuration - tags: client - hosts: localhost - gather_facts: false - vars: - cifmw_ceph_client_vars: /tmp/ceph_client.yml - cifmw_ceph_client_fetch_dir: /tmp - cifmw_ceph_client_k8s_secret_name: ceph-conf-files - cifmw_ceph_client_k8s_namespace: openstack - cifmw_ceph_client_cluster: "{{ cifmw_cephadm_cluster | default('ceph') }}" - pre_tasks: - # end_play will end this current playbook and go the the next - # imported play. - - name: Early stop ceph related work - when: - - not _deploy_ceph | default(true) - ansible.builtin.meta: end_play - tasks: - - name: Export configuration for ceph client - ansible.builtin.import_role: - name: cifmw_ceph_client - - name: Output usage - ansible.builtin.debug: - msg: >- - Import ceph secret into k8s - 'kubectl create -f /tmp/k8s_ceph_secret.yml' diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml new file mode 120000 index 0000000000..13df2bff76 --- /dev/null +++ b/playbooks/ceph.yml @@ -0,0 +1 @@ +../hooks/playbooks/ceph.yml \ No newline at end of file diff --git a/roles/ci_dcn_site/tasks/ceph.yml b/roles/ci_dcn_site/tasks/ceph.yml index f380a5c0ed..e59bb66c22 100644 --- a/roles/ci_dcn_site/tasks/ceph.yml +++ b/roles/ci_dcn_site/tasks/ceph.yml @@ -95,7 +95,7 @@ -i ~/ci-framework-data/artifacts/zuul_inventory.yml -e @~/ci-framework-data/parameters/reproducer-variables.yml -e @~/ci-framework-data/parameters/ceph-{{ _az }}.yml - playbooks/ceph.yml + hooks/playbooks/ceph.yml - name: Load the Ceph cluster variables ansible.builtin.include_vars: diff --git a/roles/cifmw_cephadm/README.md b/roles/cifmw_cephadm/README.md index 661681dbae..4b414fd2a0 100644 --- a/roles/cifmw_cephadm/README.md +++ b/roles/cifmw_cephadm/README.md @@ -7,7 +7,8 @@ The [openstack-k8s-operators HCI documentation](https://github.com/openstack-k8s-operators/docs/blob/main/hci.md) describes how to run Ceph on EDPM nodes but leaves it to the reader to install Ceph with `cephadm`. The `cifmw_cephadm` role and -`ceph.yml` playbook may be used to automate the Ceph installation. +`hooks/playbooks/ceph.yml` hook playbook may be used to automate the +Ceph installation. Before this role is run the following roles should be run. @@ -19,8 +20,8 @@ After this role is run, the `cifmw_ceph_client` role can generate a k8s CR which OpenStack can use to connect to the deployed Ceph cluster. -The `ceph.yml` playbook in the playbooks directory provides a complete -working example which does all of the above and has been tested on +The `ceph.yml` hook playbook in the `hooks/playbooks` directory provides +a complete working example which does all of the above and has been tested on a three EDPM node deployment from [install_yamls](https://github.com/openstack-k8s-operators/install_yamls). @@ -29,8 +30,8 @@ Requires an Ansible user who can become root to install Ceph server. ## Parameters -The `ceph.yml` playbook defaults these parameters so that they do not -need to be changed for a typical EDPM deployment. +The `hooks/playbooks/ceph.yml` hook playbook defaults these parameters so +that they do not need to be changed for a typical EDPM deployment. * `cifmw_cephadm_default_container`: If this is value is `true`, then `cephadm bootstrap` is not passed the `--image` parameter and whatever @@ -136,13 +137,13 @@ cifmw_cephadm_keys: ## Examples -See `ceph.yml` in the playbooks directory. +See `ceph.yml` in the `hooks/playbooks` directory. ## Tips for using standalone ### Pick the appropriate storage network -In the `ceph.yml` playbook, set the `storage_network_range` variable. +In the `hooks/playbooks/ceph.yml` hook playbook, set the `storage_network_range` variable. * If network isolation is not being used, then set the `storage_network_range` variable to `192.168.122.0/24` (the default @@ -196,20 +197,39 @@ export ANSIBLE_HOST_KEY_CHECKING=False ### Run the Ceph playbook +#### Direct playbook execution using ansible-playbook ``` cd ~/ci-framework/ -ansible-playbook playbooks/ceph.yml +ansible-playbook hooks/playbooks/ceph.yml +``` + +#### Using run_hook role + +``` +- name: Deploy ceph + hosts: localhost + vars: + post_ceph: + - name: Run ceph hook playbook + type: playbook + source: ceph.yml + tasks: + - name: Run post_ceph hook + vars: + step: post_ceph + ansible.builtin.import_role: + name: run_hook ``` ## Regarding the disks used as OSDs -By default the `ceph.yml` playbook assumes there are no block devices -for Ceph to use and calls the `cifmw_block_device` role to create +By default the `hooks/playbooks/ceph.yml` hook playbook assumes there are +no block devices for Ceph to use and calls the `cifmw_block_device` role to create block devices and has the `cifmw_ceph_spec` role configure a spec to use the created block devices. -If `cifmw_ceph_spec_data_devices` is passed to the `ceph.yml` -playbook, then the `cifmw_block_device` role is not called and +If `cifmw_ceph_spec_data_devices` is passed to the `hooks/playbooks/ceph.yml` +hook playbook, then the `cifmw_block_device` role is not called and the spec created by the `cifmw_ceph_spec` role will use whatever block devices were passed by `cifmw_ceph_spec_data_devices`. Use of `cifmw_ceph_spec_data_devices` implies that the block devices diff --git a/roles/hci_prepare/README.md b/roles/hci_prepare/README.md index 24ca83239e..6d0a56a523 100644 --- a/roles/hci_prepare/README.md +++ b/roles/hci_prepare/README.md @@ -31,7 +31,7 @@ None. name: edpm_deploy - name: Deploy Ceph on edpm nodes - ansible.builtin.import_playbook: ceph.yml + ansible.builtin.import_playbook: hooks/playbooks/ceph.yml - name: Prepare for HCI deploy phase 2 ansible.builtin.import_role: diff --git a/scenarios/centos-9/hci_ceph_backends.yml b/scenarios/centos-9/hci_ceph_backends.yml index f0b2c20049..a23a951f37 100644 --- a/scenarios/centos-9/hci_ceph_backends.yml +++ b/scenarios/centos-9/hci_ceph_backends.yml @@ -12,6 +12,13 @@ pre_deploy: cifmw_services_swift_enabled: false +post_ceph: + - name: 80 Run Ceph hook playbook + type: playbook + source: ceph.yml + +cifmw_cephadm_log_path: /home/zuul/ci-framework-data/logs + post_deploy: - name: 81 Kustomize OpenStack CR with Ceph type: playbook diff --git a/zuul.d/edpm.yaml b/zuul.d/edpm.yaml index f325f9c408..349c4159fb 100644 --- a/zuul.d/edpm.yaml +++ b/zuul.d/edpm.yaml @@ -7,6 +7,8 @@ vars: cifmw_extras: - '@scenarios/centos-9/nested_virt.yml' + files: + - '^hooks/playbooks/ceph.yml' # Virtual Baremetal job with CRC and single compute node. - job: diff --git a/zuul.d/edpm_multinode.yaml b/zuul.d/edpm_multinode.yaml index 06b61e2c80..76cafd7023 100644 --- a/zuul.d/edpm_multinode.yaml +++ b/zuul.d/edpm_multinode.yaml @@ -356,11 +356,11 @@ cifmw_cephadm_prepare_host: true files: + - ^hooks/playbooks/ceph.yml - ^hooks/playbooks/control_plane_ceph_backends.yml - ^hooks/playbooks/control_plane_hci_pre_deploy.yml - ^hooks/playbooks/templates/config_ceph_backends.yaml.j2 - ^playbooks/06-deploy-edpm.yml - - ^playbooks/ceph.yml - ^roles/edpm_deploy/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/hci_prepare/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* - ^roles/cifmw_ceph.*/(defaults|files|handlers|library|lookup_plugins|module_utils|tasks|templates|vars).* From f9b046b31c02bac512495eb619b8108ed95b0869 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 26 Aug 2025 14:06:59 -0400 Subject: [PATCH 289/480] Fix SNR/NHC role CRD availability timing issues Add retry logic and configurable timeouts to handle cases where NodeHealthCheck CRDs are not immediately available after operator installation. Changes: - Add cifmw_snr_nhc_retries (default: 10) and cifmw_snr_nhc_delay (default: 15) variables to defaults/main.yml - Add retry logic to NodeHealthCheck CR existence check with configurable timeout - Add retry logic to NodeHealthCheck CR creation task - Fix task conditions to properly handle API call failures when CRDs are not yet available - Improve robustness by waiting for CRDs to be ready before attempting CR operations This addresses timing issues where the role would fail with "Failed to find exact match for remediation.medik8s.io/v1alpha1.NodeHealthCheck" when CRDs were not fully available immediately after operator installation. Jira: https://issues.redhat.com/browse/OSPRH-19454 Co-Authored-By: Claude Signed-off-by: John Fulton --- roles/cifmw_snr_nhc/defaults/main.yml | 2 ++ roles/cifmw_snr_nhc/tasks/main.yml | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/roles/cifmw_snr_nhc/defaults/main.yml b/roles/cifmw_snr_nhc/defaults/main.yml index 73ac086f25..093f91e20b 100644 --- a/roles/cifmw_snr_nhc/defaults/main.yml +++ b/roles/cifmw_snr_nhc/defaults/main.yml @@ -4,3 +4,5 @@ cifmw_snr_nhc_kubeadmin_password_file: "/home/{{ ansible_user | default('zuul') cifmw_snr_nhc_namespace: openshift-workload-availability cifmw_snr_nhc_cleanup_before_install: false cifmw_snr_nhc_cleanup_namespace: false +cifmw_snr_nhc_retries: 10 +cifmw_snr_nhc_delay: 15 diff --git a/roles/cifmw_snr_nhc/tasks/main.yml b/roles/cifmw_snr_nhc/tasks/main.yml index 2a9c7322ab..31bb252e8f 100644 --- a/roles/cifmw_snr_nhc/tasks/main.yml +++ b/roles/cifmw_snr_nhc/tasks/main.yml @@ -509,10 +509,13 @@ kind: NodeHealthCheck name: nodehealthcheck-sample register: existing_nhc_cr_check + until: existing_nhc_cr_check is succeeded + retries: "{{ cifmw_snr_nhc_retries }}" + delay: "{{ cifmw_snr_nhc_delay }}" ignore_errors: true - name: Create Node Health Check CR to use SNR - when: existing_nhc_cr_check.resources | length == 0 + when: existing_nhc_cr_check is succeeded and (existing_nhc_cr_check.resources | length == 0) kubernetes.core.k8s: kubeconfig: "{{ cifmw_snr_nhc_kubeconfig }}" state: present @@ -540,9 +543,12 @@ status: Unknown duration: 30s register: nhc_cr_creation + until: nhc_cr_creation is succeeded + retries: "{{ cifmw_snr_nhc_retries }}" + delay: "{{ cifmw_snr_nhc_delay }}" - name: Display info if NodeHealthCheck CR already exists - when: existing_nhc_cr_check.resources | length > 0 + when: existing_nhc_cr_check is succeeded and (existing_nhc_cr_check.resources | length > 0) ansible.builtin.debug: msg: | NodeHealthCheck CR 'nodehealthcheck-sample' already exists and will not be recreated. From eb940a1e78b5601eccaa450c9be91519d21ac6d7 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 5 Aug 2025 14:35:51 -0400 Subject: [PATCH 290/480] Add dz-storage.yml scenario Add scenarios/reproducers/dz-storage.yml for Distributed Zones (DZ) with 3rd party storage. This scenario is based on the bgp-l3-xl.yml scenario but also includes storage. This adds a dz-storage scenario template directory as a symlink to bgp-l3-xl so that ci_gen_kustomize_values can locate the correct templates without duplicating files. Git tracks this as a symbolic link (120000 mode), ensuring future changes to bgp-l3-xl automatically apply to dz-storage. Jira: https://issues.redhat.com/browse/OSPRH-18447 Signed-off-by: John Fulton --- .../templates/.gitignore | 5 + .../templates/dz-storage | 1 + scenarios/reproducers/dz-storage.yml | 1151 +++++++++++++++++ 3 files changed, 1157 insertions(+) create mode 100644 roles/ci_gen_kustomize_values/templates/.gitignore create mode 120000 roles/ci_gen_kustomize_values/templates/dz-storage create mode 100644 scenarios/reproducers/dz-storage.yml diff --git a/roles/ci_gen_kustomize_values/templates/.gitignore b/roles/ci_gen_kustomize_values/templates/.gitignore new file mode 100644 index 0000000000..0073c5b0d2 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/.gitignore @@ -0,0 +1,5 @@ +# source: .gitignore +# dz-storage is a symlink to bgp-l3-xl (tracked in Git). +# If the symlink is removed and replaced by a real directory, +# ignore its contents to prevent accidental commits. +dz-storage/* diff --git a/roles/ci_gen_kustomize_values/templates/dz-storage b/roles/ci_gen_kustomize_values/templates/dz-storage new file mode 120000 index 0000000000..43886fd2df --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/dz-storage @@ -0,0 +1 @@ +bgp-l3-xl \ No newline at end of file diff --git a/scenarios/reproducers/dz-storage.yml b/scenarios/reproducers/dz-storage.yml new file mode 100644 index 0000000000..fea2ff330e --- /dev/null +++ b/scenarios/reproducers/dz-storage.yml @@ -0,0 +1,1151 @@ +--- +# Storage-specific settings for dz-storage +cifmw_devscripts_enable_iscsi_on_ocp_nodes: true +cifmw_devscripts_enable_multipath_on_ocp_nodes: true + +cifmw_architecture_scenario: dz-storage +cifmw_arch_automation_file: "dz-storage.yaml" +# Everything below is directly from bgp-l3-xl.yml but the vars +# cifmw_architecture_scenario and cifmw_arch_automation_file +# were removed and set by the two lines above. + +cifmw_os_net_setup_config: + - name: public + external: true + is_default: true + provider_network_type: flat + provider_physical_network: datacentre + shared: true + subnets: + - name: public_subnet + cidr: 192.168.133.0/24 + allocation_pool_start: 192.168.133.190 + allocation_pool_end: 192.168.133.250 + gateway_ip: 192.168.133.1 + enable_dhcp: true + + +cifmw_run_id: '' +cifmw_use_devscripts: true +cifmw_use_libvirt: true +cifmw_virtualbmc_daemon_port: 50881 +cifmw_use_uefi: >- + {{ (cifmw_repo_setup_os_release is defined + and cifmw_repo_setup_os_release == 'rhel') | bool }} +num_racks: 3 +cifmw_libvirt_manager_compute_amount: "{{ num_racks }}" +cifmw_libvirt_manager_networker_amount: 3 +cifmw_libvirt_manager_pub_net: ocpbm +cifmw_libvirt_manager_spineleaf_setup: true +cifmw_libvirt_manager_network_interface_types: + rtr-ocp: network + s0-rtr: network + s1-rtr: network + l00-s0: network + l01-s0: network + l00-s1: network + l01-s1: network + l10-s0: network + l11-s0: network + l10-s1: network + l11-s1: network + l20-s0: network + l21-s0: network + l20-s1: network + l21-s1: network + l00-node0: network + l00-node1: network + l00-node2: network + l00-ocp0: network + l00-ocp1: network + l00-ocp2: network + l00-ocp3: network + l01-node0: network + l01-node1: network + l01-node2: network + l01-ocp0: network + l01-ocp1: network + l01-ocp2: network + l01-ocp3: network + l10-node0: network + l10-node1: network + l10-node2: network + l10-ocp0: network + l10-ocp1: network + l10-ocp2: network + l10-ocp3: network + l11-node0: network + l11-node1: network + l11-node2: network + l11-ocp0: network + l11-ocp1: network + l11-ocp2: network + l11-ocp3: network + l20-node0: network + l20-node1: network + l20-node2: network + l20-ocp0: network + l20-ocp1: network + l20-ocp2: network + l20-ocp3: network + l21-node0: network + l21-node1: network + l21-node2: network + l21-ocp0: network + l21-ocp1: network + l21-ocp2: network + l21-ocp3: network + +cifmw_libvirt_manager_configuration: + networks: + osp_trunk: | + + osp_trunk + + + + + + # router to ocp network + rtr-ocp: | + + rtr-ocp + + + # spines to router networks + s0-rtr: | + + s0-rtr + + + s1-rtr: | + + s1-rtr + + + # leafs to spines networks + ## rack0 + l00-s0: | + + l00-s0 + + + l00-s1: | + + l00-s1 + + + l01-s0: | + + l01-s0 + + + l01-s1: | + + l01-s1 + + + ## rack1 + l10-s0: | + + l10-s0 + + + l10-s1: | + + l10-s1 + + + l11-s0: | + + l11-s0 + + + l11-s1: | + + l11-s1 + + + ## rack2 + l20-s0: | + + l20-s0 + + + l20-s1: | + + l20-s1 + + + l21-s0: | + + l21-s0 + + + l21-s1: | + + l21-s1 + + + # leafs to nodes and ocps + ## rack0 + l00-node0: | + + l00-node0 + + + l00-node1: | + + l00-node1 + + + l00-node2: | + + l00-node2 + + + l00-ocp0: | + + l00-ocp0 + + + l00-ocp1: | + + l00-ocp1 + + + l00-ocp2: | + + l00-ocp2 + + + l00-ocp3: | + + l00-ocp3 + + + l01-node0: | + + l01-node0 + + + l01-node1: | + + l01-node1 + + + l01-node2: | + + l01-node2 + + + l01-ocp0: | + + l01-ocp0 + + + l01-ocp1: | + + l01-ocp1 + + + l01-ocp2: | + + l01-ocp2 + + + l01-ocp3: | + + l01-ocp3 + + + ## rack1 + l10-node0: | + + l10-node0 + + + l10-node1: | + + l10-node1 + + + l10-node2: | + + l10-node2 + + + l10-ocp0: | + + l10-ocp0 + + + l10-ocp1: | + + l10-ocp1 + + + l10-ocp2: | + + l10-ocp2 + + + l10-ocp3: | + + l10-ocp3 + + + l11-node0: | + + l11-node0 + + + l11-node1: | + + l11-node1 + + + l11-node2: | + + l11-node2 + + + l11-ocp0: | + + l11-ocp0 + + + l11-ocp1: | + + l11-ocp1 + + + l11-ocp2: | + + l11-ocp2 + + + l11-ocp3: | + + l11-ocp3 + + + ## rack2 + l20-node0: | + + l20-node0 + + + l20-node1: | + + l20-node1 + + + l20-node2: | + + l20-node2 + + + l20-ocp0: | + + l20-ocp0 + + + l20-ocp1: | + + l20-ocp1 + + + l20-ocp2: | + + l20-ocp2 + + + l20-ocp3: | + + l20-ocp3 + + + l21-node0: | + + l21-node0 + + + l21-node1: | + + l21-node1 + + + l21-node2: | + + l21-node2 + + + l21-ocp0: | + + l21-ocp0 + + + l21-ocp1: | + + l21-ocp1 + + + l21-ocp2: | + + l21-ocp2 + + + l21-ocp3: | + + l21-ocp3 + + + ocpbm: | + + ocpbm + + + + + + + ocppr: | + + ocppr + + + + r0_tr: | + + r0_tr + + + + + + r1_tr: | + + r1_tr + + + + + + r2_tr: | + + r2_tr + + + + + + + vms: + controller: + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - osp_trunk + r0-compute: &r0_compute_def + amount: 2 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - "ocpbm" + - "r0_tr" + spineleafnets: + - # rack0 - compute0 + - "l00-node0" + - "l01-node0" + - # rack0 - compute0 + - "l00-node1" + - "l01-node1" + r1-compute: + amount: 2 + root_part_id: "{{ cifmw_root_partition_id }}" + uefi: "{{ cifmw_use_uefi }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "centos-stream-9.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - r1_tr + spineleafnets: + - # rack1 - compute0 + - "l10-node0" + - "l11-node0" + - # rack1 - compute1 + - "l10-node1" + - "l11-node1" + r2-compute: + amount: 2 + root_part_id: "{{ cifmw_root_partition_id }}" + uefi: "{{ cifmw_use_uefi }}" + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "centos-stream-9.qcow2" + disksize: 50 + memory: 8 + cpus: 4 + nets: + - ocpbm + - r2_tr + spineleafnets: + - # rack2 - compute0 + - "l20-node0" + - "l21-node0" + - # rack2 - compute1 + - "l20-node1" + - "l21-node1" + + r0-networker: + amount: 1 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 40 + memory: 8 + cpus: 4 + # ansible_group: networker + nets: + - "ocpbm" + - "r0_tr" + spineleafnets: + - # rack0 - networker0 + - "l00-node2" + - "l01-node2" + r1-networker: + amount: 1 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 40 + memory: 8 + cpus: 4 + # ansible_group: networker + nets: + - "ocpbm" + - "r1_tr" + spineleafnets: + - # rack1 - networker0 + - "l10-node2" + - "l11-node2" + r2-networker: + amount: 1 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 40 + memory: 8 + cpus: 4 + # ansible_group: networker + nets: + - "ocpbm" + - "r2_tr" + spineleafnets: + - # rack2 - networker0 + - "l20-node2" + - "l21-node2" + ocp: + amount: 3 + uefi: true + root_part_id: 4 + admin_user: core + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "ocp_master" + disksize: "105" + memory: 16 + cpus: 10 + extra_disks_num: 1 + extra_disks_size: "20G" + nets: # nets common to all the ocp nodes + - "ocppr" + - "ocpbm" + - "osp_trunk" + spineleafnets: + - # rack0 - ocp master 0 + - "l00-ocp0" + - "l01-ocp0" + - # rack1 - ocp master 1 + - "l10-ocp0" + - "l11-ocp0" + - # rack2 - ocp master 2 + - "l20-ocp0" + - "l21-ocp0" + ocp_worker: + amount: 10 + uefi: true + root_part_id: 4 + admin_user: core + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "ocp_worker" + disksize: "105" + memory: 16 + cpus: 10 + extra_disks_num: 1 + extra_disks_size: "20G" + nets: # nets common to all the ocp_worker nodes + - "ocppr" + - "ocpbm" + - "osp_trunk" + spineleafnets: + - # rack0 - ocp worker 0 + - "l00-ocp1" + - "l01-ocp1" + - # rack0 - ocp worker 1 + - "l00-ocp2" + - "l01-ocp2" + - # rack0 - ocp worker 2 + - "l00-ocp3" + - "l01-ocp3" + - # rack1 - ocp worker 3 + - "l10-ocp1" + - "l11-ocp1" + - # rack1 - ocp worker 4 + - "l10-ocp2" + - "l11-ocp2" + - # rack1 - ocp worker 5 + - "l10-ocp3" + - "l11-ocp3" + - # rack2 - ocp worker 6 + - "l20-ocp1" + - "l21-ocp1" + - # rack2 - ocp worker 7 + - "l20-ocp2" + - "l21-ocp2" + - # rack2 - ocp worker 8 + - "l20-ocp3" + - "l21-ocp3" + - # router - ocp_tester (worker 9) + - "rtr-ocp" + router: + amount: 1 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 25 + memory: 4 + cpus: 2 + nets: # nets common to all the router nodes + - "ocpbm" + spineleafnets: + - # router - ocp_tester + - "s0-rtr" + - "s1-rtr" + - "rtr-ocp" + spine: + amount: 2 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 25 + memory: 4 + cpus: 2 + nets: # nets common to all the spine nodes + - "ocpbm" + spineleafnets: + - # spine0 + - "l00-s0" + - "l01-s0" + - "l10-s0" + - "l11-s0" + - "l20-s0" + - "l21-s0" + - "s0-rtr" + - # spine1 + - "l00-s1" + - "l01-s1" + - "l10-s1" + - "l11-s1" + - "l20-s1" + - "l21-s1" + - "s1-rtr" + leaf: + amount: 6 + root_part_id: >- + {{ + (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | + ternary(4, 1) + }} + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + image_local_dir: "{{ cifmw_basedir }}/images/" + disk_file_name: "base-os.qcow2" + disksize: 25 + memory: 4 + cpus: 2 + nets: # nets common to all the leaf nodes + - "ocpbm" + spineleafnets: + - # rack0 - leaf00 + - "l00-s0" + - "l00-s1" + - "l00-node0" + - "l00-node1" + - "l00-node2" + - "l00-ocp0" + - "l00-ocp1" + - "l00-ocp2" + - "l00-ocp3" + - # rack0 - leaf01 + - "l01-s0" + - "l01-s1" + - "l01-node0" + - "l01-node1" + - "l01-node2" + - "l01-ocp0" + - "l01-ocp1" + - "l01-ocp2" + - "l01-ocp3" + - # rack1 - leaf10 + - "l10-s0" + - "l10-s1" + - "l10-node0" + - "l10-node1" + - "l10-node2" + - "l10-ocp0" + - "l10-ocp1" + - "l10-ocp2" + - "l10-ocp3" + - # rack1 - leaf11 + - "l11-s0" + - "l11-s1" + - "l11-node0" + - "l11-node1" + - "l11-node2" + - "l11-ocp0" + - "l11-ocp1" + - "l11-ocp2" + - "l11-ocp3" + - # rack2 - leaf20 + - "l20-s0" + - "l20-s1" + - "l20-node0" + - "l20-node1" + - "l20-node2" + - "l20-ocp0" + - "l20-ocp1" + - "l20-ocp2" + - "l20-ocp3" + - # rack2 - leaf21 + - "l21-s0" + - "l21-s1" + - "l21-node0" + - "l21-node1" + - "l21-node2" + - "l21-ocp0" + - "l21-ocp1" + - "l21-ocp2" + - "l21-ocp3" + +## devscript support for OCP deploy +cifmw_devscripts_config_overrides: + fips_mode: "{{ cifmw_fips_enabled | default(false) | bool }}" + cluster_subnet_v4: "192.172.0.0/16" + network_config_folder: "/home/zuul/netconf" + +# Required for egress traffic from pods to the osp_trunk network +cifmw_devscripts_enable_ocp_nodes_host_routing: true + +# Automation section. Most of those parameters will be passed to the +# controller-0 as-is and be consumed by the `deploy-va.sh` script. +# Please note, all paths are on the controller-0, meaning managed by the +# Framework. Please do not edit them! +_arch_repo: "/home/zuul/src/github.com/openstack-k8s-operators/architecture" +cifmw_kustomize_deploy_architecture_examples_path: "examples/dt/" +cifmw_architecture_automation_file: >- + {{ + (_arch_repo, + 'automation/vars', + cifmw_arch_automation_file) | + path_join + }} + +cifmw_kustomize_deploy_metallb_source_files: >- + {{ + (_arch_repo, + 'examples/dt/bgp-l3-xl/metallb') | + path_join + }} + +# bgp_spines_leaves_playbook: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/ci-framework']. +# src_dir }}/playbooks/bgp/prepare-bgp-spines-leaves.yaml" +# bgp_computes_playbook: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/ci-framework']. +# src_dir }}/playbooks/bgp/prepare-bgp-computes.yaml" + + +pre_deploy: + - name: BGP spines and leaves configuration + type: playbook + source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/bgp/prepare-bgp-spines-leaves.yaml" + extra_vars: + num_racks: "{{ num_racks }}" + router_bool: true + edpm_nodes_per_rack: 3 + ocp_nodes_per_rack: 4 + router_uplink_ip: 100.64.10.1 + +# post_deploy: +# - name: BGP computes configuration +# type: playbook +# source: "{{ bgp_computes_playbook }}" +# extra_vars: +# #networkers_bool: true +# networkers_bool: false + +cifmw_libvirt_manager_default_gw_nets: + - ocpbm + - r0_tr + - r1_tr + - r2_tr +cifmw_networking_mapper_interfaces_info_translations: + osp_trunk: + - controlplane + - ctlplane + r0_tr: + - ctlplaner0 + r1_tr: + - ctlplaner1 + r2_tr: + - ctlplaner2 + + +cifmw_networking_definition: + networks: + ctlplane: + network: "192.168.125.0/24" + gateway: "192.168.125.1" + dns: + - "192.168.122.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 120 + - start: 150 + end: 200 + + ctlplaner0: + network: "192.168.122.0/24" + gateway: "192.168.122.1" + dns: + - "192.168.122.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 130 + - start: 150 + end: 200 + + ctlplaner1: + network: "192.168.123.0/24" + gateway: "192.168.123.1" + dns: + - "192.168.123.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 130 + - start: 150 + end: 170 + metallb: + ranges: + - start: 80 + end: 90 + ctlplaner2: + network: "192.168.124.0/24" + gateway: "192.168.124.1" + dns: + - "192.168.124.1" + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 130 + - start: 150 + end: 170 + metallb: + ranges: + - start: 80 + end: 90 + + internalapi: + network: "172.17.0.0/24" + vlan: 20 + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + + storage: + network: "172.18.0.0/24" + vlan: 21 + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + + tenant: + network: "172.19.0.0/24" + vlan: 22 + mtu: 1500 + tools: + multus: + ranges: + - start: 30 + end: 70 + metallb: + ranges: + - start: 80 + end: 90 + netconfig: + ranges: + - start: 100 + end: 250 + + octavia: + vlan: 23 + mtu: 1500 + network: "172.23.0.0/24" + tools: + multus: + ranges: + - start: 30 + end: 70 + netconfig: + ranges: + - start: 100 + end: 250 + + # Not really used, but required by architecture + # https://github.com/openstack-k8s-operators/architecture/blob/main/lib/networking/netconfig/kustomization.yaml#L28-L36 + external: + network: "192.168.32.0/20" + vlan: 99 + mtu: 1500 + tools: + netconfig: + ranges: + - start: 130 + end: 250 + + group-templates: + r0-computes: + network-template: + range: + start: 100 + length: 5 + networks: + ctlplaner0: {} + internalapi: + trunk-parent: ctlplaner0 + tenant: + trunk-parent: ctlplaner0 + storage: + trunk-parent: ctlplaner0 + r1-computes: + network-template: + range: + start: 110 + length: 5 + networks: + ctlplaner1: {} + internalapi: + trunk-parent: ctlplaner1 + tenant: + trunk-parent: ctlplaner1 + storage: + trunk-parent: ctlplaner1 + r2-computes: + network-template: + range: + start: 120 + length: 5 + networks: + ctlplaner2: {} + internalapi: + trunk-parent: ctlplaner2 + tenant: + trunk-parent: ctlplaner2 + storage: + trunk-parent: ctlplaner2 + r0-networkers: + network-template: + range: + start: 200 + length: 5 + networks: + ctlplaner0: {} + internalapi: + trunk-parent: ctlplaner0 + tenant: + trunk-parent: ctlplaner0 + storage: + trunk-parent: ctlplaner0 + r1-networkers: + network-template: + range: + start: 210 + length: 5 + networks: + ctlplaner1: {} + internalapi: + trunk-parent: ctlplaner1 + tenant: + trunk-parent: ctlplaner1 + storage: + trunk-parent: ctlplaner1 + r2-networkers: + network-template: + range: + start: 220 + length: 5 + networks: + ctlplaner2: {} + internalapi: + trunk-parent: ctlplaner2 + tenant: + trunk-parent: ctlplaner2 + storage: + trunk-parent: ctlplaner2 + ocps: + network-template: + range: + start: 10 + length: 10 + networks: {} + ocp_workers: + network-template: + range: + start: 20 + length: 10 + networks: {} + + instances: + controller-0: + networks: + ctlplane: + ip: "192.168.125.9" From a8d02f451768e59c4660b3653123c6b92c276778 Mon Sep 17 00:00:00 2001 From: eshulman2 Date: Tue, 26 Aug 2025 14:10:44 +0300 Subject: [PATCH 291/480] Cleanup scripts and old ansible facts Cleanup scripts and old ansible facts leftovers from artifacts directory --- roles/cleanup_openstack/tasks/main.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/roles/cleanup_openstack/tasks/main.yaml b/roles/cleanup_openstack/tasks/main.yaml index 4830834096..7a1241629a 100644 --- a/roles/cleanup_openstack/tasks/main.yaml +++ b/roles/cleanup_openstack/tasks/main.yaml @@ -99,6 +99,19 @@ }} ansible.builtin.import_tasks: cleanup_crs.yaml +- name: Get artifacts scripts + ansible.builtin.find: + path: "{{ cifmw_kustomize_deploy_basedir }}/artifacts" + patterns: "*.sh, ansible_facts.*" + register: artifacts_to_remove + +- name: Remove artifacts + become: true + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: "{{ artifacts_to_remove.files | map(attribute='path') | list }}" + - name: Remove logs and tests directories ansible.builtin.file: path: "{{ item }}" From c323d53a8836697a225699513cf88adcb1ccded2 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Thu, 21 Aug 2025 17:11:44 +0200 Subject: [PATCH 292/480] Add support for tempest re-run feature MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The tempest runner script has recently got a feature that allows re-running the failed tests – for having additional insight on the issues repeatability. This commit adds the support of enabling that feature from the test-operator perspective. Related: https://github.com/openstack-k8s-operators/tcib/pull/320 Related: https://github.com/openstack-k8s-operators/test-operator/pull/330 --- roles/test_operator/README.md | 2 ++ roles/test_operator/defaults/main.yml | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 1bd9ba9351..e29c290dc0 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -88,6 +88,8 @@ cifmw_test_operator_stages: * `cifmw_test_operator_tempest_extra_configmaps_mounts`: (List) A list of configmaps that should be mounted into the tempest test pods. Default value: `[]` * `cifmw_test_operator_tempest_extra_mounts`: (List) A list of additional volume mounts for the tempest test pods. Each item specifies a volume name, mount path, and other mount properties. Default value: `[]` * `cifmw_test_operator_tempest_debug`: (Bool) Run Tempest in debug mode, it keeps the operator pod sleeping infinity (it must only set to `true`only for debugging purposes). Default value: `false` +* `cifmw_test_operator_tempest_rerun_failed_tests`: (Bool) Activate tempest re-run feature. When activated, tempest will perform another run of the tests that failed during the first execution. Default value: `false` +* `cifmw_test_operator_tempest_rerun_override_status`: (Bool) Allow override of exit status with the tempest re-run feature. When activated, the original return value of the tempest run will be overridden with a result of the tempest run on the set of failed tests. Default value: `false` * `cifmw_test_operator_tempest_resources`: (Dict) A dictionary that specifies resources (cpu, memory) for the test pods. When untouched it clears the default values set on the test-operator side. This means that the tempest test pods run with unspecified resource limits. Default value: `{requests: {}, limits: {}}` * `cifmw_tempest_tempestconf_config`: Deprecated, please use `cifmw_test_operator_tempest_tempestconf_config` instead * `cifmw_test_operator_tempest_tempestconf_config`: (Dict) This parameter can be used to customize the execution of the `discover-tempest-config` run. Please consult the test-operator documentation. For example, to pass a custom configuration for `tempest.conf`, use the `overrides` section: diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 0792d1ea00..c1679230e2 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -76,6 +76,8 @@ cifmw_test_operator_tempest_tests_include_override_scenario: false cifmw_test_operator_tempest_tests_exclude_override_scenario: false cifmw_test_operator_tempest_workflow: [] cifmw_test_operator_tempest_cleanup: false +cifmw_test_operator_tempest_rerun_failed_tests: false +cifmw_test_operator_tempest_rerun_override_status: false cifmw_test_operator_tempest_tempestconf_config: "{{ cifmw_tempest_tempestconf_config }}" # TODO: The default value of this parameter should be changed to {} once this fix @@ -163,6 +165,8 @@ cifmw_test_operator_tempest_config: extraImages: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_images | default([]) }}" tempestconfRun: "{{ cifmw_tempest_tempestconf_config_defaults | combine(stage_vars_dict.cifmw_test_operator_tempest_tempestconf_config | default({})) }}" cleanup: "{{ stage_vars_dict.cifmw_test_operator_tempest_cleanup | bool }}" + rerunFailedTests: "{{ stage_vars_dict.cifmw_test_operator_tempest_rerun_failed_tests | bool }}" + rerunOverrideStatus: "{{ stage_vars_dict.cifmw_test_operator_tempest_rerun_override_status | bool }}" workflow: "{{ stage_vars_dict.cifmw_test_operator_tempest_workflow }}" debug: "{{ stage_vars_dict.cifmw_test_operator_tempest_debug }}" From 4448d7853858c9e050341298f823c66e368377eb Mon Sep 17 00:00:00 2001 From: Luca Miccini Date: Sat, 16 Aug 2025 06:45:33 +0200 Subject: [PATCH 293/480] Add ram to va-pidone workers --- scenarios/reproducers/va-pidone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scenarios/reproducers/va-pidone.yml b/scenarios/reproducers/va-pidone.yml index ed3f2d510e..3695c788b5 100644 --- a/scenarios/reproducers/va-pidone.yml +++ b/scenarios/reproducers/va-pidone.yml @@ -79,7 +79,7 @@ cifmw_libvirt_manager_configuration: extra_disks_num: 4 extra_disks_size: "100G" cpus: 10 - memory: 16 + memory: 32 nets: - ocppr - ocpbm From 82ae564d1fe504fd157c085cf9c40d0513f12231 Mon Sep 17 00:00:00 2001 From: Sergii Golovatiuk Date: Thu, 14 Aug 2025 23:27:37 +0200 Subject: [PATCH 294/480] Run postdeploy tasks one when deploy is enabled When cifmw_deploy_architecture is false, the job fails as postdeploy tasks are very dependant on deployment. This patch adds and condition to run postdeployment tasks only when deployment is specified as well --- reproducer.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/reproducer.yml b/reproducer.yml index 54b9c0f476..91bffd1f29 100644 --- a/reproducer.yml +++ b/reproducer.yml @@ -106,6 +106,7 @@ - name: Run post deployment if instructed to when: + - cifmw_deploy_architecture | default(false) | bool - cifmw_post_deployment | default(true) | bool no_log: "{{ cifmw_nolog | default(true) | bool }}" async: "{{ 7200 + cifmw_test_operator_timeout | default(3600) }}" # 2h should be enough to deploy EDPM and rest for tests. From 95547e87836b7882ecdacc03b3146c554ab7dc39 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 28 Aug 2025 12:58:29 +0200 Subject: [PATCH 295/480] Drop pre-run and post-run for kuttl_multinode The playbook execution is duplicated and it raises complication in some cases. Signed-off-by: Daniel Pawlik --- zuul.d/kuttl_multinode.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/zuul.d/kuttl_multinode.yaml b/zuul.d/kuttl_multinode.yaml index 7d40c19edc..f0b38929b4 100644 --- a/zuul.d/kuttl_multinode.yaml +++ b/zuul.d/kuttl_multinode.yaml @@ -41,13 +41,9 @@ ip: 172.18.0.5 tenant: ip: 172.19.0.5 - pre-run: - - ci/playbooks/e2e-prepare.yml run: - ci/playbooks/dump_zuul_data.yml - ci/playbooks/kuttl/run.yml - post-run: - - ci/playbooks/collect-logs.yml required-projects: - github.com/openstack-k8s-operators/install_yamls From df648f6fd4112c7a2d87536e1ca9bd939ff3d0b3 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Thu, 28 Aug 2025 13:20:35 -0400 Subject: [PATCH 296/480] Default fips_mode in BGP leaf-frr template Without a default value, then the Configure FRR task in the playbook prepare-bgp-spines-leaves.yaml will fail with the following, unless the user knows to override this variable. 'AnsibleUndefinedVariable: ''fips_mode'' is undefined. Signed-off-by: John Fulton --- playbooks/bgp/templates/leaf-frr.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/bgp/templates/leaf-frr.conf.j2 b/playbooks/bgp/templates/leaf-frr.conf.j2 index d65b1e84b7..3b432027b4 100644 --- a/playbooks/bgp/templates/leaf-frr.conf.j2 +++ b/playbooks/bgp/templates/leaf-frr.conf.j2 @@ -28,7 +28,7 @@ router bgp 64999 neighbor downlink bfd neighbor downlink bfd profile tripleo {# TODO: remove the next if when RHEL-63205 is fixed #} -{% if not (fips_mode | bool) %} +{% if not (fips_mode | default(false) | bool) %} neighbor downlink password f00barZ {% endif %} ! neighbor downlink capability extended-nexthop From 1bcabd2d5f4830f0b8cc9c021559d5246afc9b2d Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 21 Aug 2025 08:51:57 +0200 Subject: [PATCH 297/480] Run nested ansible command and call run_logs tasks to drop 99-logs play Earlier the 99-logs.yml playbook was executed as nested Ansible. After we migrate that playbook to role, it can not be called directly, because it contains: cimfw collection to be installed on Zuul executor and include_vars can not read files, that are located on the controller, so they needs to be synchronized with Zuul executor job dir. Also fix condition that was changed when moving 99-logs.yml play into the role. Signed-off-by: Daniel Pawlik --- ci/playbooks/e2e-collect-logs.yml | 20 +++++++++++++------- roles/cifmw_setup/tasks/run_logs.yml | 7 ++++--- zuul.d/kuttl.yaml | 2 ++ 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/ci/playbooks/e2e-collect-logs.yml b/ci/playbooks/e2e-collect-logs.yml index 63371fe3ba..30cd43a856 100644 --- a/ci/playbooks/e2e-collect-logs.yml +++ b/ci/playbooks/e2e-collect-logs.yml @@ -20,22 +20,28 @@ - not cifmw_status.stat.exists ansible.builtin.meta: end_host - - name: Run log collection - ansible.builtin.command: +- name: Run log collection when zuul_log_collection + hosts: "{{ cifmw_target_host | default(cifmw_zuul_target_host) | default('controller') }}" + gather_facts: true + tasks: + - name: Run run_logs tasks from cifmw_setup + ansible.builtin.command: > + ansible localhost + -m include_role + -a "name=cifmw_setup tasks_from=run_logs.yml" + -e "@scenarios/centos-9/base.yml" + args: chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" - cmd: >- - ansible-playbook playbooks/99-logs.yml - -e @scenarios/centos-9/base.yml - name: "Run ci/playbooks/collect-logs.yml on CRC host" hosts: crc gather_facts: false tasks: - name: Get kubelet journalctl logs - ignore_errors: true # noqa: ignore-errors + ignore_errors: true # noqa: ignore-errors become: true ansible.builtin.shell: | - journalctl -u kubelet > kubelet.log + journalctl -u kubelet > kubelet.log no_log: true args: chdir: "{{ ansible_user_dir }}/zuul-output/logs/" diff --git a/roles/cifmw_setup/tasks/run_logs.yml b/roles/cifmw_setup/tasks/run_logs.yml index 2d0ef72130..9b909351c2 100644 --- a/roles/cifmw_setup/tasks/run_logs.yml +++ b/roles/cifmw_setup/tasks/run_logs.yml @@ -1,5 +1,6 @@ +--- - name: Check if the logging requires - when: not zuul_log_collection | default('false') | bool + when: not (zuul_log_collection | default(false)) block: - name: Ensure cifmw_basedir param is set when: @@ -9,7 +10,7 @@ - name: Try to load parameters files block: - - name: Check directory availabilty + - name: Check directory availability register: param_dir ansible.builtin.stat: path: "{{ cifmw_basedir }}/artifacts/parameters" @@ -54,7 +55,7 @@ - name: Return a list of log files in home directory ansible.builtin.find: paths: "{{ ansible_user_dir }}" - patterns: '*.log' + patterns: "*.log" register: _log_files - name: Ensure ansible facts cache exists diff --git a/zuul.d/kuttl.yaml b/zuul.d/kuttl.yaml index d8015bb220..883e53cd3c 100644 --- a/zuul.d/kuttl.yaml +++ b/zuul.d/kuttl.yaml @@ -23,6 +23,8 @@ name: cifmw-kuttl parent: cifmw-base-kuttl files: + - ^ci/playbooks/e2e-collect-logs.yml + - ^ci/playbooks/collect-logs.yml - ^ci/playbooks/kuttl/.* - ^scenarios/centos-9/kuttl.yml - ^zuul.d/kuttl.yaml From cda9cae5c62f538cd86ecb0315ac3486eae8ab6f Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 29 Aug 2025 17:32:13 +0200 Subject: [PATCH 298/480] Remove condition for zuul_log_collection to openstack-must-gather The condition seems not to be required according to basic tests. Let's drop it. Signed-off-by: Daniel Pawlik --- roles/cifmw_setup/tasks/run_logs.yml | 180 +++++++++++++-------------- 1 file changed, 88 insertions(+), 92 deletions(-) diff --git a/roles/cifmw_setup/tasks/run_logs.yml b/roles/cifmw_setup/tasks/run_logs.yml index 9b909351c2..eb1ec53eaf 100644 --- a/roles/cifmw_setup/tasks/run_logs.yml +++ b/roles/cifmw_setup/tasks/run_logs.yml @@ -1,105 +1,101 @@ ---- -- name: Check if the logging requires - when: not (zuul_log_collection | default(false)) +- name: Ensure cifmw_basedir param is set + when: + - cifmw_basedir is not defined + ansible.builtin.set_fact: + cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" + +- name: Try to load parameters files block: - - name: Ensure cifmw_basedir param is set + - name: Check directory availabilty + register: param_dir + ansible.builtin.stat: + path: "{{ cifmw_basedir }}/artifacts/parameters" + + - name: Load parameters files when: - - cifmw_basedir is not defined + - param_dir.stat.exists | bool + ansible.builtin.include_vars: + dir: "{{ cifmw_basedir }}/artifacts/parameters" + always: + - name: Set custom cifmw PATH reusable fact + when: + - cifmw_path is not defined ansible.builtin.set_fact: - cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" + cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" + cacheable: true - - name: Try to load parameters files - block: - - name: Check directory availability - register: param_dir - ansible.builtin.stat: - path: "{{ cifmw_basedir }}/artifacts/parameters" +- name: Set destination folder for the logs + ansible.builtin.set_fact: + logfiles_dest_dir: >- + {{ + ( + cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data'), + 'logs/', + now(fmt='%Y-%m-%d_%H-%M') + ) | path_join + }} +- name: Generate artifacts + ansible.builtin.import_role: + name: artifacts - - name: Load parameters files - when: - - param_dir.stat.exists | bool - ansible.builtin.include_vars: - dir: "{{ cifmw_basedir }}/artifacts/parameters" - always: - - name: Set custom cifmw PATH reusable fact - when: - - cifmw_path is not defined - ansible.builtin.set_fact: - cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" - cacheable: true +- name: Collect container images used in the environment + ansible.builtin.import_role: + name: env_op_images - - name: Set destination folder for the logs - ansible.builtin.set_fact: - logfiles_dest_dir: >- +- name: Create a versioned log folder + ansible.builtin.file: + path: "{{ logfiles_dest_dir }}" + state: directory + mode: "0775" + +- name: Return a list of log files in home directory + ansible.builtin.find: + paths: "{{ ansible_user_dir }}" + patterns: "*.log" + register: _log_files + +- name: Ensure ansible facts cache exists + register: ansible_facts_cache_state + ansible.builtin.stat: + path: "{{ ansible_user_dir }}/ansible_facts_cache" + +- name: Copy log files + when: + - _log_files.matched > 0 + block: + - name: Copy logs to proper location + ansible.builtin.copy: + src: "{{ item.path }}" + dest: "{{ [ logfiles_dest_dir , item.path | basename ] | path_join }}" + remote_src: true + mode: "0666" + loop: "{{ _log_files.files }}" + + - name: Remove original log from home directory + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ _log_files.files }}" + +- name: Copy Ansible facts if exists + when: + - ansible_facts_cache_state.stat.exists is defined + - ansible_facts_cache_state.stat.exists | bool + block: + - name: Copy facts to dated directory + ansible.builtin.copy: + src: "{{ ansible_user_dir }}/ansible_facts_cache" + dest: >- {{ ( - cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data'), - 'logs/', - now(fmt='%Y-%m-%d_%H-%M') + cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data'), + "artifacts/ansible_facts." + now(fmt='%Y-%m-%d_%H-%M') ) | path_join }} - - name: Generate artifacts - ansible.builtin.import_role: - name: artifacts - - - name: Collect container images used in the environment - ansible.builtin.import_role: - name: env_op_images + mode: "0777" + remote_src: true - - name: Create a versioned log folder + - name: Clean ansible fact cache ansible.builtin.file: - path: "{{ logfiles_dest_dir }}" - state: directory - mode: "0775" - - - name: Return a list of log files in home directory - ansible.builtin.find: - paths: "{{ ansible_user_dir }}" - patterns: "*.log" - register: _log_files - - - name: Ensure ansible facts cache exists - register: ansible_facts_cache_state - ansible.builtin.stat: path: "{{ ansible_user_dir }}/ansible_facts_cache" - - - name: Copy log files - when: - - _log_files.matched > 0 - block: - - name: Copy logs to proper location - ansible.builtin.copy: - src: "{{ item.path }}" - dest: "{{ [ logfiles_dest_dir , item.path | basename ] | path_join }}" - remote_src: true - mode: "0666" - loop: "{{ _log_files.files }}" - - - name: Remove original log from home directory - ansible.builtin.file: - path: "{{ item.path }}" - state: absent - loop: "{{ _log_files.files }}" - - - name: Copy Ansible facts if exists - when: - - ansible_facts_cache_state.stat.exists is defined - - ansible_facts_cache_state.stat.exists | bool - block: - - name: Copy facts to dated directory - ansible.builtin.copy: - src: "{{ ansible_user_dir }}/ansible_facts_cache" - dest: >- - {{ - ( - cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data'), - "artifacts/ansible_facts." + now(fmt='%Y-%m-%d_%H-%M') - ) | path_join - }} - mode: "0777" - remote_src: true - - - name: Clean ansible fact cache - ansible.builtin.file: - path: "{{ ansible_user_dir }}/ansible_facts_cache" - state: absent + state: absent From 268b511f9fbd5975f34a5b41996a5d42a2e6f6db Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 3 Sep 2025 12:23:42 +0530 Subject: [PATCH 299/480] Add wait condition for router pod Due to some reason, router pod was taking more time to be in ready state. It was causing the route resource creation to fail. Having a wait task before creating the route resource will ensure it does not fail. --- roles/sushy_emulator/tasks/apply_resources.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/roles/sushy_emulator/tasks/apply_resources.yml b/roles/sushy_emulator/tasks/apply_resources.yml index 6bda206a08..5f627c3812 100644 --- a/roles/sushy_emulator/tasks/apply_resources.yml +++ b/roles/sushy_emulator/tasks/apply_resources.yml @@ -23,6 +23,21 @@ kind: Namespace state: present +- name: Check if router pod is running in openshift-ingress namespace + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: openshift-ingress + kind: Pod + label_selectors: "ingresscontroller.operator.openshift.io/deployment-ingresscontroller=default" + wait: true + wait_sleep: 10 + wait_timeout: 360 + wait_condition: + type: Ready + status: "True" + - name: Apply Sushy Emulator resources kubernetes.core.k8s: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" From fb16cbd83394b99f721be1efd6f6aed70375e4ce Mon Sep 17 00:00:00 2001 From: Andrew Bays Date: Tue, 12 Aug 2025 15:47:52 +0000 Subject: [PATCH 300/480] [OSPRH-19024] Remove control plane bridges from multi-ns VA --- .../network-values/values.yaml.j2 | 126 ++++++++++++++++++ .../network-values2/values.yaml.j2 | 2 - scenarios/reproducers/va-multi.yml | 6 +- 3 files changed, 130 insertions(+), 4 deletions(-) create mode 100644 roles/ci_gen_kustomize_values/templates/multi-namespace/network-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values/values.yaml.j2 new file mode 100644 index 0000000000..fc50d6f04f --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values/values.yaml.j2 @@ -0,0 +1,126 @@ +--- +# source: multi-namespace/network-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: +{% set ns.ocp_index = ns.ocp_index+1 %} + name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} + {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} +{% endfor %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() %} +{% set ns.lb_tools = {} %} + {{ network.network_name }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} + subnets: +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} + - allocationRanges: +{% for range in network.tools.netconfig[_ipv.ipvX_ranges] %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network[_ipv.network_vX] }} +{% if network[_ipv.gw_vX] is defined %} + gateway: {{ network[_ipv.gw_vX] }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% for lb_range in network.tools[tool][_ipv.ipvX_ranges] %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network[_ipv.network_vX] | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} + base_iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% else %} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% endif %} +{% if network.tools.multus is defined %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "macvlan", +{% if network.vlan_id is defined%} + "master": "{{ network.network_name }}", +{% else %} + "master": "{{ ns.interfaces[network.network_name] }}", +{% endif %} + "ipam": { + "type": "whereabouts", + "range": "{{ network[_ipv.network_vX] }}", + "range_start": "{{ network.tools.multus[_ipv.ipvX_ranges].0.start }}", + "range_end": "{{ network.tools.multus[_ipv.ipvX_ranges].0.end }}" + } + } +{% endif %} +{% endfor %} + + dns-resolver: + config: + server: + - "{{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }}" + search: [] + options: + - key: server + values: + - {{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }} +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + + routes: + config: [] + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: {{ cifmw_ci_gen_kustomize_values_storage_class }} diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 index a9ad9c29dc..ac7ffc8104 100644 --- a/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 @@ -83,8 +83,6 @@ data: "type": "macvlan", {% if network.vlan_id is defined%} "master": "{{ network.network_name }}", -{% elif network.network_name == "ctlplane" %} - "master": "ospbr", {% else %} "master": "{{ ns.interfaces[network.network_name] }}", {% endif %} diff --git a/scenarios/reproducers/va-multi.yml b/scenarios/reproducers/va-multi.yml index 1ebd366ac4..0c71d6b25a 100644 --- a/scenarios/reproducers/va-multi.yml +++ b/scenarios/reproducers/va-multi.yml @@ -93,8 +93,10 @@ cifmw_libvirt_manager_configuration: nets: - ocppr - ocpbm - - osp_trunk - - osptrunk2 + - osp_trunk # ctlplane and isolated networks for openstack namespace cloud + - osptrunk2 # ctlplane and isolated networks for openstack2 namespace cloud + - osp_trunk # OVN datacentre for openstack namespace cloud + - osptrunk2 # OVN datacentre for openstack2 namespace cloud compute: uefi: "{{ cifmw_use_uefi }}" root_part_id: "{{ cifmw_root_partition_id }}" From f6ba4edf6f0b5fe502280c4c03657c2f86afad18 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 3 Sep 2025 08:58:40 +0200 Subject: [PATCH 301/480] Add logging to a file for os must gather task After we drop executing Ansible playbook and we move temporary to Ansible command directly, there is an issue that some tasks are not executed properly. Add ANSIBLE_LOG_PATH for that task for now, to make clear view what is executed. Signed-off-by: Daniel Pawlik --- ci/playbooks/e2e-collect-logs.yml | 2 ++ roles/cifmw_setup/tasks/run_logs.yml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ci/playbooks/e2e-collect-logs.yml b/ci/playbooks/e2e-collect-logs.yml index 30cd43a856..3932feb899 100644 --- a/ci/playbooks/e2e-collect-logs.yml +++ b/ci/playbooks/e2e-collect-logs.yml @@ -32,6 +32,8 @@ -e "@scenarios/centos-9/base.yml" args: chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" + environment: + ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/ci-framework-data/logs/e2e-collect-logs-must-gather.log" - name: "Run ci/playbooks/collect-logs.yml on CRC host" hosts: crc diff --git a/roles/cifmw_setup/tasks/run_logs.yml b/roles/cifmw_setup/tasks/run_logs.yml index eb1ec53eaf..20d939d9dd 100644 --- a/roles/cifmw_setup/tasks/run_logs.yml +++ b/roles/cifmw_setup/tasks/run_logs.yml @@ -6,7 +6,7 @@ - name: Try to load parameters files block: - - name: Check directory availabilty + - name: Check directory availability register: param_dir ansible.builtin.stat: path: "{{ cifmw_basedir }}/artifacts/parameters" From 1bf036c7080fdb3de066e220a1e793fe53f0b438 Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Thu, 21 Aug 2025 11:23:19 +0200 Subject: [PATCH 302/480] Add generic must-gather fallback Adding a generic must-gather fallback in case it fails fist, the second one is a simpler one less likely to fail. --- roles/os_must_gather/tasks/main.yml | 59 ++++++++++++++++++----------- 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/roles/os_must_gather/tasks/main.yml b/roles/os_must_gather/tasks/main.yml index 6d96acd9fc..a7157a2311 100644 --- a/roles/os_must_gather/tasks/main.yml +++ b/roles/os_must_gather/tasks/main.yml @@ -99,27 +99,40 @@ "{{ cifmw_os_must_gather_output_dir }}/logs/openstack-k8s-operators-openstack-must-gather" rescue: - - name: Create oc_inspect log directory - ansible.builtin.file: - path: "{{ cifmw_os_must_gather_output_dir }}/logs/oc_inspect" - state: directory - mode: "0755" + - name: Openstack-must-gather failure + block: + - name: Log openstack-must-gather failure + ansible.builtin.debug: + msg: "OpenStack must-gather failed, running fallback generic must-gather" - - name: Inspect the cluster after must-gather failure - ignore_errors: true # noqa: ignore-errors - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - cifmw.general.ci_script: - output_dir: "{{ cifmw_os_must_gather_output_dir }}/artifacts" - script: | - oc adm inspect namespace/{{ item }} --dest-dir={{ cifmw_os_must_gather_output_dir }}/logs/oc_inspect - loop: >- - {{ - ( - cifmw_os_must_gather_namespaces | default([]) + - ( - cifmw_os_must_gather_additional_namespaces | split(',') | list - ) - ) | unique - }} + - name: Run fallback generic must-gather command + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + ansible.builtin.command: + cmd: oc adm must-gather --dest-dir {{ ansible_user_dir }}/ci-framework-data/must-gather + always: + - name: Create oc_inspect log directory + ansible.builtin.file: + path: "{{ cifmw_os_must_gather_output_dir }}/logs/oc_inspect" + state: directory + mode: "0755" + + - name: Inspect the cluster after must-gather failure + ignore_errors: true # noqa: ignore-errors + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_os_must_gather_output_dir }}/artifacts" + script: | + oc adm inspect namespace/{{ item }} --dest-dir={{ cifmw_os_must_gather_output_dir }}/logs/oc_inspect + loop: >- + {{ + ( + cifmw_os_must_gather_namespaces | default([]) + + ( + cifmw_os_must_gather_additional_namespaces | split(',') | list + ) + ) | unique + }} From 797a67465778b1ed2e15f03e5763bae67cd232eb Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Thu, 4 Sep 2025 12:43:58 +0530 Subject: [PATCH 303/480] Create group_vars role and playbook This commit is the beginning of moving common variable to group_vars. In the first commit, we are creating group_vars/all.yml, a role to read this, and a playbook to run the role. In upcoming patches, we will start moving common vars to group_vars. --- ci/playbooks/group_vars | 1 + ci/playbooks/read_global_vars.yml | 5 +++++ group_vars/all.yml | 4 ++++ playbooks/group_vars | 1 + roles/read_global_vars/tasks/main.yml | 12 ++++++++++++ zuul.d/molecule.yaml | 9 +++++++++ zuul.d/projects.yaml | 1 + 7 files changed, 33 insertions(+) create mode 120000 ci/playbooks/group_vars create mode 100644 ci/playbooks/read_global_vars.yml create mode 100644 group_vars/all.yml create mode 120000 playbooks/group_vars create mode 100644 roles/read_global_vars/tasks/main.yml diff --git a/ci/playbooks/group_vars b/ci/playbooks/group_vars new file mode 120000 index 0000000000..14bb1b3d9f --- /dev/null +++ b/ci/playbooks/group_vars @@ -0,0 +1 @@ +../../group_vars \ No newline at end of file diff --git a/ci/playbooks/read_global_vars.yml b/ci/playbooks/read_global_vars.yml new file mode 100644 index 0000000000..0439a2b3fc --- /dev/null +++ b/ci/playbooks/read_global_vars.yml @@ -0,0 +1,5 @@ +--- +- name: Load global variables + hosts: all + roles: + - read_global_vars diff --git a/group_vars/all.yml b/group_vars/all.yml new file mode 100644 index 0000000000..eff219b575 --- /dev/null +++ b/group_vars/all.yml @@ -0,0 +1,4 @@ +--- +# This file contains all repeating variables, that can be set +# globaly instead of parse Zuul inventory file to get proper value. +#### GLOBAL VARS #### diff --git a/playbooks/group_vars b/playbooks/group_vars new file mode 120000 index 0000000000..cc7e7a90f9 --- /dev/null +++ b/playbooks/group_vars @@ -0,0 +1 @@ +../group_vars \ No newline at end of file diff --git a/roles/read_global_vars/tasks/main.yml b/roles/read_global_vars/tasks/main.yml new file mode 100644 index 0000000000..d4d629ecf7 --- /dev/null +++ b/roles/read_global_vars/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Include group vars + ansible.builtin.include_vars: + file: "{{ playbook_dir }}/group_vars/all.yml" + name: group_vars_global_vars + +- name: Set global variables as cachable fact + when: group_vars_global_vars | default(false) + ansible.builtin.set_fact: + "{{ item.key }}": "{{ item.value }}" + cacheable: true + loop: "{{ group_vars_global_vars | dict2items }}" diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 60e2eb4ec2..4c6b6f05a3 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -976,6 +976,15 @@ - ^.config/molecule/.* name: cifmw-molecule-polarion parent: cifmw-molecule-noop +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/read_global_vars/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-read_global_vars + parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index edc113847d..e97f560f1e 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -83,6 +83,7 @@ - cifmw-molecule-pkg_build - cifmw-molecule-podman - cifmw-molecule-polarion + - cifmw-molecule-read_global_vars - cifmw-molecule-recognize_ssh_keypair - cifmw-molecule-registry_deploy - cifmw-molecule-repo_setup From 3c126ed07810244e4700e0030db1def684056fd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Ciecierski?= Date: Tue, 5 Aug 2025 14:41:59 +0200 Subject: [PATCH 304/480] Move ping test execution from openshift pod to local openstackclient Move ping test execution from openshift pod to local openstackclient running on controller-0. This alligns execution method with continuous control plane test. It also solves problem with incompatibility of workload script with ipv6 enviroments. --- roles/update/tasks/create_instance.yml | 13 +++---------- roles/update/tasks/main.yml | 12 ++++++------ 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/roles/update/tasks/create_instance.yml b/roles/update/tasks/create_instance.yml index 600ea36546..4ad6eead64 100644 --- a/roles/update/tasks/create_instance.yml +++ b/roles/update/tasks/create_instance.yml @@ -15,20 +15,13 @@ # under the License. - name: Create an instance on the overcloud - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" ansible.builtin.shell: | set -o pipefail cat {{ cifmw_update_workload_launch_script }} | \ - oc rsh -n {{ cifmw_update_namespace }} openstackclient bash 2>&1 \ - {{ cifmw_update_timestamper_cmd }} | tee {{ cifmw_update_artifacts_basedir }}/workload_launch.log + podman exec -i lopenstackclient bash -i 2>&1 \ + {{ cifmw_update_timestamper_cmd }} | tee {{ cifmw_update_artifacts_basedir }}/workload_launch.log - name: Get logs from update instance creation - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" ansible.builtin.shell: > - oc cp -n {{ cifmw_update_namespace }} - openstack/openstackclient:{{ cifmw_update_artifacts_basedir_suffix }} + podman cp lopenstackclient:{{ cifmw_update_artifacts_basedir_suffix }}/. {{ cifmw_update_artifacts_basedir }} diff --git a/roles/update/tasks/main.yml b/roles/update/tasks/main.yml index 49e21a576c..627475977e 100644 --- a/roles/update/tasks/main.yml +++ b/roles/update/tasks/main.yml @@ -17,6 +17,12 @@ - name: Create the support files for test ansible.builtin.include_tasks: create_test_files.yml +- name: Create local openstackclient + when: + - (cifmw_update_control_plane_check | bool) or (cifmw_update_ping_test | bool) + - not cifmw_update_run_dryrun | bool + ansible.builtin.include_tasks: create_local_openstackclient.yml + - name: Trigger the ping test when: - cifmw_update_ping_test | bool @@ -29,12 +35,6 @@ - name: Start ping test ansible.builtin.include_tasks: l3_agent_connectivity_check_start.yml -- name: Create local openstackclient - when: - - cifmw_update_control_plane_check | bool - - not cifmw_update_run_dryrun | bool - ansible.builtin.include_tasks: create_local_openstackclient.yml - - name: Trigger the continuous control plane test when: - cifmw_update_control_plane_check | bool From c8e3c47554c077b1388a1ca878008e97ecac3a95 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Wed, 27 Aug 2025 19:02:42 -0400 Subject: [PATCH 305/480] Add two dz-storage post-deployment playbooks Before tempest can be run in the dz-storage scenario, the following two playbooks need to be run. 1. dz_storage_post_deploy_az.yaml This playbook creates Cinder volume types (needed by Glance) and Nova aggregates required for the dz-storage topology after EDPM deployment. These types and aggregates are necessary in order for the Tempest tests to pass. 2. dz_storage_pre_test_images.yaml This playbook downloads a cirros image, creates it in the default store and imports to all stores using --all-stores flag. Tempest cannot handle multi-zone image creation on its own. Signed-off-by: John Fulton Co-authored-by: Claude (AI Assistant) claude@anthropic.com --- .../playbooks/dz_storage_post_deploy_az.yaml | 72 +++++++++ .../playbooks/dz_storage_pre_test_images.yaml | 143 ++++++++++++++++++ 2 files changed, 215 insertions(+) create mode 100644 hooks/playbooks/dz_storage_post_deploy_az.yaml create mode 100644 hooks/playbooks/dz_storage_pre_test_images.yaml diff --git a/hooks/playbooks/dz_storage_post_deploy_az.yaml b/hooks/playbooks/dz_storage_post_deploy_az.yaml new file mode 100644 index 0000000000..28cc88c8f8 --- /dev/null +++ b/hooks/playbooks/dz_storage_post_deploy_az.yaml @@ -0,0 +1,72 @@ +--- +# Setup Cinder volume types and Nova aggregates for dz-storage DT +# Based on architecture/examples/dt/dz-storage/validate.md + +- name: Setup dz-storage environment + hosts: "{{ cifmw_target_host | default('localhost') }}" + tasks: + - name: Get service project ID + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n openstack + openstackclient + openstack project show service -c id -f value + register: service_project_result + + - name: Set service project ID + ansible.builtin.set_fact: + service_project_id: "{{ service_project_result.stdout | trim }}" + + - name: Create Cinder volume types for Glance multistore + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n openstack + openstackclient + openstack volume type create --private + --project "{{ service_project_id }}" + --property "RESKEY:availability_zones={{ item.zone }}" + {{ item.name }} + loop: + - { name: "glance-iscsi-az0", zone: "az0" } + - { name: "glance-iscsi-az1", zone: "az1" } + - { name: "glance-iscsi-az2", zone: "az2" } + failed_when: false # Types might already exist + + - name: Create Nova aggregates for availability zones + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n openstack + openstackclient + openstack aggregate create {{ item }} --zone {{ item }} + loop: + - "az0" + - "az1" + - "az2" + failed_when: false # Aggregates might already exist + + - name: Add compute hosts to availability zone aggregates + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n openstack + openstackclient + openstack aggregate add host {{ item.az }} {{ item.host }} + loop: + - { az: "az0", host: "r0-compute-0.ctlplane.example.com" } + - { az: "az0", host: "r0-compute-1.ctlplane.example.com" } + - { az: "az1", host: "r1-compute-0.ctlplane.example.com" } + - { az: "az1", host: "r1-compute-1.ctlplane.example.com" } + - { az: "az2", host: "r2-compute-0.ctlplane.example.com" } + - { az: "az2", host: "r2-compute-1.ctlplane.example.com" } + failed_when: false # Hosts might already be in aggregates diff --git a/hooks/playbooks/dz_storage_pre_test_images.yaml b/hooks/playbooks/dz_storage_pre_test_images.yaml new file mode 100644 index 0000000000..c5feb6a7ce --- /dev/null +++ b/hooks/playbooks/dz_storage_pre_test_images.yaml @@ -0,0 +1,143 @@ +--- +# Create and import images to all Glance stores for dz-storage DT testing +# Based on DCN pre-test approach in ci-framework-jobs/playbooks/dcn/dcn-pre-tests.yaml + +- name: Prepare dz-storage images for multi-zone testing + hosts: "{{ cifmw_target_host | default('localhost') }}" + vars: + cirros_version: "0.6.2" + cirros_image_name: "cirros-{{ cirros_version }}-x86_64-disk.img" + cirros_download_url: "https://github.com/cirros-dev/cirros/releases/download/{{ cirros_version }}/{{ cirros_image_name }}" + openstack_namespace: "{{ cifmw_openstack_namespace | default('openstack') }}" + tasks: + - name: Check if cirros image already exists + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n {{ openstack_namespace }} + openstackclient + openstack image show {{ cirros_image_name }} + register: _image_exists + failed_when: false + + - name: Create and import cirros image to all glance stores + when: _image_exists.rc != 0 + block: + - name: Get keystone public URL + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n {{ openstack_namespace }} + openstackclient + openstack endpoint list --service keystone --interface public -f value -c URL + register: keystone_url + + - name: Get admin password from secret + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc get secret osp-secret -n {{ openstack_namespace }} -o jsonpath='{.data.AdminPassword}' + register: admin_password_b64 + + - name: Decode admin password + ansible.builtin.set_fact: + admin_password: "{{ admin_password_b64.stdout | b64decode }}" + + - name: Download cirros image to controller + ansible.builtin.get_url: + url: "{{ cirros_download_url }}" + dest: "/tmp/{{ cirros_image_name }}" + mode: '0644' + + - name: Copy cirros image to openstackclient pod + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc cp + "/tmp/{{ cirros_image_name }}" + "{{ openstack_namespace }}/openstackclient:/home/cloud-admin/{{ cirros_image_name }}" + + - name: Create cirros image in default glance store (az0) + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n {{ openstack_namespace }} + openstackclient + openstack image create + --disk-format qcow2 + --container-format bare + --public + --file "/home/cloud-admin/{{ cirros_image_name }}" + --import + "{{ cirros_image_name }}" + + - name: Wait for image to become active + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n {{ openstack_namespace }} + openstackclient + openstack image show {{ cirros_image_name }} -f value -c status + register: image_status + until: "'active' in image_status.stdout" + retries: 60 + delay: 10 + + - name: Get image ID for import to other stores + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n {{ openstack_namespace }} + openstackclient + openstack image show {{ cirros_image_name }} -f value -c id + register: image_id + + - name: Import image to all glance stores (az0, az1, az2) + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n {{ openstack_namespace }} + openstackclient + glance --os-auth-url {{ keystone_url.stdout | trim }} + --os-project-name admin + --os-username admin + --os-password {{ admin_password }} + --os-user-domain-name default + --os-project-domain-name default + image-import {{ image_id.stdout | trim }} + --all-stores True + --import-method copy-image + + - name: Verify image is available in all stores + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default('/home/' + ansible_user | default('zuul') + '/.kube/config') }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + ansible.builtin.command: >- + oc rsh + -n {{ openstack_namespace }} + openstackclient + openstack image show {{ image_id.stdout | trim }} -c properties -f value + register: image_stores + + - name: Display image store locations + ansible.builtin.debug: + msg: "Image stores: {{ image_stores.stdout }}" + + - name: Clean up local image file + ansible.builtin.file: + path: "/tmp/{{ cirros_image_name }}" + state: absent From 949a1a5994352e9382ad6fc18660c4910b51c8fa Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 5 Sep 2025 12:58:22 +0200 Subject: [PATCH 306/480] Do not wipe Ansible facts in post stage Some CI jobs are executing post tasks even when the CI job is not finished yet. Those variables might be required for next ongoing tasks. Co-Authored-By: Szymon Datko Signed-off-by: Daniel Pawlik --- roles/cifmw_setup/tasks/run_logs.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/roles/cifmw_setup/tasks/run_logs.yml b/roles/cifmw_setup/tasks/run_logs.yml index 20d939d9dd..8240ee7627 100644 --- a/roles/cifmw_setup/tasks/run_logs.yml +++ b/roles/cifmw_setup/tasks/run_logs.yml @@ -94,8 +94,3 @@ }} mode: "0777" remote_src: true - - - name: Clean ansible fact cache - ansible.builtin.file: - path: "{{ ansible_user_dir }}/ansible_facts_cache" - state: absent From 70b25773c105bd97dfec464a3e52c1409187bbc1 Mon Sep 17 00:00:00 2001 From: Ricardo Diaz Date: Fri, 29 Aug 2025 12:34:31 +0200 Subject: [PATCH 307/480] Allow to add metadata labels to BMH This change adds the ability to add additional metadata labels in the BareMetalHost CR. For example, having an input like this: ``` cifmw_baremetal_hosts: compute-0: ... extra_labels: server: tigon23 ``` will result in a CR like this (`app` and `workload` added by default): ``` apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: labels: app: openstack workload: compute server: tigon23 ... ``` which in turn will allow the compute to be bound with a specific server by adding the `bmhLabelSelector` attribute under spec/nodes in the OpenStackDataPlaneNodeSet CR [0]: ``` apiVersion: dataplane.openstack.org/v1beta1 kind: OpenStackDataPlaneNodeSet metadata: name: openstack-edpm namespace: openstack spec: ... nodes: edpm-compute-0: hostName: compute-0 bmhLabelSelector: server: tigon23 ``` [0] https://issues.redhat.com/browse/OSPRH-13226?focusedId=26446283&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-26446283 --- roles/deploy_bmh/template/bmh.yml.j2 | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/deploy_bmh/template/bmh.yml.j2 b/roles/deploy_bmh/template/bmh.yml.j2 index 587917ef04..b14385fb80 100644 --- a/roles/deploy_bmh/template/bmh.yml.j2 +++ b/roles/deploy_bmh/template/bmh.yml.j2 @@ -11,6 +11,11 @@ metadata: labels: app: {{ node_data['label'] | default("openstack") }} workload: {{ node_name.split('-')[0] }} +{% if 'extra_labels' in node_data %} +{% for label,key in node_data['extra_labels'].items() %} + {{ label }}: {{ key }} +{% endfor %} +{% endif %} spec: bmc: address: {{ node_data['connection'] }} From e721fa5e8129a4af01a7889a077db381e1e3c0e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Ciecierski?= Date: Tue, 26 Aug 2025 09:02:02 +0200 Subject: [PATCH 308/480] Add tempest test run in between services and system update to update role Added new tempest test execution in between services and system update to update role for update_variant_split --- roles/update/tasks/update_variant_split.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/update/tasks/update_variant_split.yml b/roles/update/tasks/update_variant_split.yml index f366a1a481..346408cfa0 100644 --- a/roles/update/tasks/update_variant_split.yml +++ b/roles/update/tasks/update_variant_split.yml @@ -20,6 +20,15 @@ {{ cifmw_update_artifacts_basedir }}/update_event.sh Services update sequence complete +- name: Run tests after Services update + vars: + cifmw_test_operator_artifacts_basedir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/tests/test_operator_update" + cifmw_test_operator_tempest_name: "post-services-update-tempest-tests" + ansible.builtin.include_role: + name: cifmw_setup + tasks_from: run_tests.yml + when: cifmw_run_tests | default(false) | bool + - name: Set update step to Starting the system update sequence ansible.builtin.command: cmd: > From ff040aace826391870040a288d0ec3d572ed3d27 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Sat, 6 Sep 2025 20:19:36 +0200 Subject: [PATCH 309/480] openshift_setup: enable registry.connect.redhat.com too It is the official registry of the 3rd-party container images. --- roles/openshift_setup/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/openshift_setup/tasks/main.yml b/roles/openshift_setup/tasks/main.yml index 59e52ffde9..8b19bb697d 100644 --- a/roles/openshift_setup/tasks/main.yml +++ b/roles/openshift_setup/tasks/main.yml @@ -176,6 +176,7 @@ - "gcr.io" - "registry.k8s.io" - "registry.redhat.io" + - "registry.connect.redhat.com" - "registry-proxy.engineering.redhat.com" - "images.paas.redhat.com" - "image-registry.openshift-image-registry.svc:5000" From 78171ec2b09efe8619757b972600a97206ceeae9 Mon Sep 17 00:00:00 2001 From: mkatari Date: Fri, 5 Sep 2025 19:19:43 +0530 Subject: [PATCH 310/480] update ceph var for ipv6 cifmw filters vars only with prefix cifmw- so updating the var so that it can be propogated duirng job execution. --- hooks/playbooks/ceph.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hooks/playbooks/ceph.yml b/hooks/playbooks/ceph.yml index dee6093e77..eecf70eaac 100644 --- a/hooks/playbooks/ceph.yml +++ b/hooks/playbooks/ceph.yml @@ -163,7 +163,7 @@ - name: Set IPv4 facts when: - ansible_all_ipv4_addresses | length > 0 - - not ceph_ipv6 | default(false) + - not cifmw_ceph_ipv6 | default(false) ansible.builtin.set_fact: ssh_network_range: 192.168.122.0/24 # storage_network_range: 172.18.0.0/24 @@ -175,7 +175,7 @@ - name: Set IPv6 facts when: - ansible_all_ipv6_addresses | length > 0 - - ceph_ipv6 | default(false) + - cifmw_ceph_ipv6 | default(false) ansible.builtin.set_fact: ssh_network_range: "2620:cf:cf:aaaa::/64" # storage_network_range: "2620:cf:cf:cccc::/64" @@ -214,7 +214,7 @@ when: - cifmw_networking_env_definition is defined - ansible_all_ipv4_addresses | length > 0 - - not ceph_ipv6 | default(false) + - not cifmw_ceph_ipv6 | default(false) ansible.builtin.set_fact: storage_network_range: >- {{ @@ -229,7 +229,7 @@ when: - cifmw_networking_env_definition is defined - ansible_all_ipv6_addresses | length > 0 - - ceph_ipv6 | default(false) + - cifmw_ceph_ipv6 | default(false) ansible.builtin.set_fact: storage_network_range: >- {{ @@ -318,7 +318,7 @@ - name: Set IPv4 facts when: - ansible_all_ipv4_addresses | length > 0 - - not ceph_ipv6 | default(false) + - not cifmw_ceph_ipv6 | default(false) ansible.builtin.set_fact: all_addresses: ansible_all_ipv4_addresses cidr: 24 @@ -326,7 +326,7 @@ - name: Set IPv6 facts when: - ansible_all_ipv6_addresses | length > 0 - - ceph_ipv6 | default(false) + - cifmw_ceph_ipv6 | default(false) ansible.builtin.set_fact: all_addresses: ansible_all_ipv6_addresses cidr: 64 From 70b355459de803b9326125c5316eb571057bff71 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Sun, 7 Sep 2025 20:58:44 +0200 Subject: [PATCH 311/480] Check if hook is properly defined for execution Adding the additional `when` conditions make it possible to override the named hooks in job definitions with empty mappings (i.e. `{}`) in order to skip specific ones. Unfortunately, because `set_fact` tasks are involved we cannot simply use the `undef()` to achieve that (it raises `Mandatory variable has not been overridden.` [1]). [1] https://stackoverflow.com/questions/73427379/how-to-unset-a-variable-in-ansible#comment138144412_78351300 --- roles/run_hook/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/run_hook/tasks/main.yml b/roles/run_hook/tasks/main.yml index bd39f87e53..4c4dda184d 100644 --- a/roles/run_hook/tasks/main.yml +++ b/roles/run_hook/tasks/main.yml @@ -77,3 +77,7 @@ loop: "{{ _hooks }}" loop_control: loop_var: 'hook' + when: + - hook | length > 0 + - hook.type is defined + - hook.source is defined From 5f7e36e70f11677541d1e5bcabce6ede7152ef0e Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 8 Sep 2025 12:47:53 +0530 Subject: [PATCH 312/480] Increase timeout for shiftstack pod creation Default timeout is 120 and seems like pods are not getting ready within that time. Increasing wait_timeout might help resource getting ready --- roles/shiftstack/tasks/deploy_shiftstackclient_pod.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/shiftstack/tasks/deploy_shiftstackclient_pod.yml b/roles/shiftstack/tasks/deploy_shiftstackclient_pod.yml index 028373597f..9bb091629c 100644 --- a/roles/shiftstack/tasks/deploy_shiftstackclient_pod.yml +++ b/roles/shiftstack/tasks/deploy_shiftstackclient_pod.yml @@ -60,6 +60,8 @@ kubeconfig: "{{ cifmw_openshift_kubeconfig }}" src: "{{ (cifmw_shiftstack_manifests_dir, cifmw_shiftstack_client_pod_manifest) | path_join }}" wait: true + wait_sleep: 10 + wait_timeout: 360 wait_condition: type: Ready status: "True" From 41fd85e9c4d43771bc7ec41f9f5f761687430df0 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Mon, 8 Sep 2025 17:18:13 +0200 Subject: [PATCH 313/480] run_hook: fix the condition which makes an hook valid A previous change to make the code more roboust (namely https://github.com/openstack-k8s-operators/ci-framework/pull/3266 ) excluded the case when an hook is a CR, which does not need a source. Remove the playbook-specific condition: only type is relevant from the run_hook point of view, the other attributes are handled by the type-specific code. --- roles/run_hook/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/run_hook/tasks/main.yml b/roles/run_hook/tasks/main.yml index 4c4dda184d..791deb1aa3 100644 --- a/roles/run_hook/tasks/main.yml +++ b/roles/run_hook/tasks/main.yml @@ -80,4 +80,3 @@ when: - hook | length > 0 - hook.type is defined - - hook.source is defined From 177fafa8957c20c0bd3fa138f87aafd0682e2ec1 Mon Sep 17 00:00:00 2001 From: "Chandan Kumar (raukadah)" Date: Thu, 4 Sep 2025 21:15:30 +0530 Subject: [PATCH 314/480] [edpm_prepare] Add cifmw_edpm_prepare_extra_kustomizations var We have cifmw_edpm_prepare_kustomizations var to pass kustomize for applying on top of Controlplane deploy. We define them in scenario file and kustomize list is very long. In order to test different scenario, like enabling few features on top of existing deployment already using cifmw_edpm_prepare_kustomizations var, apart from hook there is no way to pass extra set of kustomize via zuul job vars. Creating hooks for the same each time was hard to maintain. This pr introduces a new cifmw_edpm_prepare_extra_kustomizations var to pass additional kustomization via zuul jobs allow use to enable/disable certain features easily. It is needed for watcher operator testing. Signed-off-by: Chandan Kumar (raukadah) --- roles/edpm_prepare/README.md | 1 + roles/edpm_prepare/defaults/main.yml | 1 + roles/edpm_prepare/tasks/kustomize_and_deploy.yml | 7 ++++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/edpm_prepare/README.md b/roles/edpm_prepare/README.md index eee1711729..af9ff1bb22 100644 --- a/roles/edpm_prepare/README.md +++ b/roles/edpm_prepare/README.md @@ -19,3 +19,4 @@ This role doesn't need privilege escalation. * `cifmw_edpm_prepare_kustomizations`: (List) Kustomizations to apply on top of the controlplane CRs. Defaults to `[]`. * `cifmw_edpm_prepare_wait_controplane_status_change_sec`: (Integer) Time, in seconds, to wait before checking openstack control plane deployment status. Useful when using the role to only update the control plane resource, scenario where it may be in a `ready` status. Defaults to `30`. +* `cifmw_edpm_prepare_extra_kustomizations`: (List) Extra Kustomizations to apply on top of the controlplane CRs. Defaults to `[]`. diff --git a/roles/edpm_prepare/defaults/main.yml b/roles/edpm_prepare/defaults/main.yml index 203723f4cf..9d4f6e2b49 100644 --- a/roles/edpm_prepare/defaults/main.yml +++ b/roles/edpm_prepare/defaults/main.yml @@ -32,3 +32,4 @@ cifmw_edpm_prepare_kustomizations: [] # when we are modifying the control plane, since the check status task can get a # false 'ready' status. cifmw_edpm_prepare_wait_controplane_status_change_sec: 30 +cifmw_edpm_prepare_extra_kustomizations: [] diff --git a/roles/edpm_prepare/tasks/kustomize_and_deploy.yml b/roles/edpm_prepare/tasks/kustomize_and_deploy.yml index 335647dbf6..37f238c10f 100644 --- a/roles/edpm_prepare/tasks/kustomize_and_deploy.yml +++ b/roles/edpm_prepare/tasks/kustomize_and_deploy.yml @@ -68,7 +68,12 @@ cifmw.general.ci_kustomize: target_path: "{{ cifmw_edpm_prepare_openstack_crs_path }}" sort_ascending: false - kustomizations: "{{ cifmw_edpm_prepare_kustomizations + _ctlplane_name_kustomizations }}" + kustomizations: >- + {{ + cifmw_edpm_prepare_kustomizations + + _ctlplane_name_kustomizations + + (cifmw_edpm_prepare_extra_kustomizations | default([])) + }} kustomizations_paths: >- {{ [ From 5e2ae3d83b068f6877e0d9d86fa34a1fd4603003 Mon Sep 17 00:00:00 2001 From: Oleg Belo0lipetskii Date: Thu, 28 Aug 2025 06:44:33 -0400 Subject: [PATCH 315/480] Add tobikoPatch parameter for custom patches --- roles/test_operator/README.md | 1 + roles/test_operator/defaults/main.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index e29c290dc0..1bf111c750 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -135,6 +135,7 @@ Default value: {} * `cifmw_test_operator_tobiko_image_tag`: (String) Tag for the `cifmw_test_operator_tobiko_image`. Default value: `{{ cifmw_test_operator_default_image_tag }}` * `cifmw_test_operator_tobiko_testenv`: (String) Executed tobiko testenv. See tobiko `tox.ini` file for further details. Some allowed values: scenario, sanity, faults, neutron, octavia, py3, etc. Default value: `scenario` * `cifmw_test_operator_tobiko_version`: (String) Tobiko version to install. It could refer to a branch (master, osp-16.2), a tag (0.6.x, 0.7.x) or an sha-1. Default value: `master` +* `cifmw_test_operator_tobiko_patch`: (Dict) A specific Git patch to apply to the Tobiko repository. This feature expects both `repository` and `refspec` to be defined. Default value: `{}` * `cifmw_test_operator_tobiko_pytest_addopts`: (String) `PYTEST_ADDOPTS` env variable with input pytest args. Example: `-m --maxfail --skipregex `. Defaults to `null`. In case of `null` value, `PYTEST_ADDOPTS` is not set (tobiko tests are executed without any extra pytest options). * `cifmw_test_operator_tobiko_prevent_create`: (Boolean) Sets the value of the env variable `TOBIKO_PREVENT_CREATE` that specifies whether tobiko scenario tests create new resources or expect that those resource had been created before. Default to `null`. In case of `null` value, `TOBIKO_PREVENT_CREATE` is not set (tobiko tests create new resources). * `cifmw_test_operator_tobiko_num_processes`: (Integer) Sets the value of the env variable `TOX_NUM_PROCESSES` that is used to run pytest with `--numprocesses $TOX_NUM_PROCESSES`. Defaults to `null`. In case of `null` value, `TOX_NUM_PROCESSES` is not set (tobiko internally uses the value `auto`, see pytest documentation about the `--numprocesses` option). diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index c1679230e2..51b74a88b2 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -206,6 +206,7 @@ cifmw_test_operator_tobiko_config: containerImage: "{{ stage_vars_dict.cifmw_test_operator_tobiko_image }}:{{ stage_vars_dict.cifmw_test_operator_tobiko_image_tag }}" testenv: "{{ stage_vars_dict.cifmw_test_operator_tobiko_testenv }}" version: "{{ stage_vars_dict.cifmw_test_operator_tobiko_version }}" + patch: "{{ stage_vars_dict.cifmw_test_operator_tobiko_patch | default(omit) }}" pytestAddopts: "{{ stage_vars_dict.cifmw_test_operator_tobiko_pytest_addopts if stage_vars_dict.cifmw_test_operator_tobiko_pytest_addopts is not none else omit }}" tolerations: "{{ cifmw_test_operator_tolerations | default(omit) }}" nodeSelector: "{{ cifmw_test_operator_node_selector | default(omit) }}" From 76335c602a5a2a4cf1188390e129b28a2240191b Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Thu, 28 Aug 2025 15:19:21 +0200 Subject: [PATCH 316/480] Replace hardcoded zuul user with cifmw_libvirt_manager_user variable --- roles/libvirt_manager/tasks/manage_vms.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/libvirt_manager/tasks/manage_vms.yml b/roles/libvirt_manager/tasks/manage_vms.yml index c9c700d2e4..4a39cf8337 100644 --- a/roles/libvirt_manager/tasks/manage_vms.yml +++ b/roles/libvirt_manager/tasks/manage_vms.yml @@ -95,8 +95,8 @@ ansible.builtin.copy: dest: "/home/zuul/.ssh/id_cifw" content: "{{ priv_key }}" - owner: zuul - group: zuul + owner: "{{ cifmw_libvirt_manager_user }}" + group: "{{ cifmw_libvirt_manager_user }}" mode: "0400" - name: "Inject public key on hosts {{ vm }}" @@ -108,6 +108,6 @@ ansible.builtin.copy: dest: "/home/zuul/.ssh/id_cifw.pub" content: "{{ pub_key }}" - owner: zuul - group: zuul + owner: "{{ cifmw_libvirt_manager_user }}" + group: "{{ cifmw_libvirt_manager_user }}" mode: "0444" From 3db28662f18ef218d78ddcedf481fd4081aa3440 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Fri, 29 Aug 2025 14:53:20 +0200 Subject: [PATCH 317/480] replaced other hardcoded zuul user/group --- create-infra.yml | 4 ++-- .../delete_all_pre_adoption_resources.yaml | 4 ++-- hooks/playbooks/run_tofu.yml | 4 ++-- roles/reproducer/README.md | 1 + roles/reproducer/defaults/main.yml | 1 + roles/reproducer/tasks/ci_job.yml | 4 ++-- .../reproducer/tasks/configure_controller.yml | 22 +++++++++---------- 7 files changed, 21 insertions(+), 19 deletions(-) diff --git a/create-infra.yml b/create-infra.yml index 1ab3524866..089708ad43 100644 --- a/create-infra.yml +++ b/create-infra.yml @@ -154,6 +154,6 @@ path: "{{ cifmw_basedir | default('/home/zuul/ci-framework-data') }}" state: directory recurse: true - owner: zuul - group: zuul + owner: "{{ ansible_user_id }}" + group: "{{ ansible_user_id }}" mode: "0755" diff --git a/hooks/playbooks/delete_all_pre_adoption_resources.yaml b/hooks/playbooks/delete_all_pre_adoption_resources.yaml index 6cfd948521..36f0e16145 100644 --- a/hooks/playbooks/delete_all_pre_adoption_resources.yaml +++ b/hooks/playbooks/delete_all_pre_adoption_resources.yaml @@ -6,8 +6,8 @@ - name: Create openstack config dir ansible.builtin.file: path: "/home/zuul/.config/openstack/" - owner: zuul - group: zuul + owner: "{{ ansible_user | default('zuul') }}" + group: "{{ ansible_user | default('zuul') }}" mode: '0744' state: directory diff --git a/hooks/playbooks/run_tofu.yml b/hooks/playbooks/run_tofu.yml index 69416ba20c..e0b5a78a87 100644 --- a/hooks/playbooks/run_tofu.yml +++ b/hooks/playbooks/run_tofu.yml @@ -22,8 +22,8 @@ - name: Create openstack config dir ansible.builtin.file: path: "{{ ansible_user_dir }}/.config/openstack/" - owner: zuul - group: zuul + owner: "{{ ansible_user_id }}" + group: "{{ ansible_user_id }}" mode: '0744' state: directory - name: Fetch cloud congig to host diff --git a/roles/reproducer/README.md b/roles/reproducer/README.md index 0b884bc5cf..5dbf8fba86 100644 --- a/roles/reproducer/README.md +++ b/roles/reproducer/README.md @@ -6,6 +6,7 @@ None ## Parameters +* `cifmw_reproducer_user`: (String) User used for reproducer role. Defaults to `zuul` * `cifmw_reproducer_basedir`: (String) Base directory. Defaults to `cifmw_basedir`, which defaults to `~/ci-framework-data`. * `cifmw_reproducer_compute_repos`: (List[mapping]) List of yum repository that must be deployed on the compute nodes during their creation. Defaults to `[]`. * `cifmw_reproducer_compute_set_repositories`: (Bool) Deploy repositories (rhos-release) on Compute nodes. Defaults to `true`. diff --git a/roles/reproducer/defaults/main.yml b/roles/reproducer/defaults/main.yml index 737d2ebd74..a005e9d93f 100644 --- a/roles/reproducer/defaults/main.yml +++ b/roles/reproducer/defaults/main.yml @@ -17,6 +17,7 @@ # All variables intended for modification should be placed in this file. # All variables within this role should have a prefix of "cifmw_reproducer" +cifmw_reproducer_user: "{{ ansible_user | default('zuul') }}" cifmw_reproducer_basedir: "{{ cifmw_basedir | default( ansible_user_dir ~ '/ci-framework-data') }}" cifmw_reproducer_src_dir: "{{ cifmw_ci_src_dir | default( ansible_user_dir ~ '/src') }}" cifmw_reproducer_kubecfg: "{{ cifmw_libvirt_manager_configuration.vms.crc.image_local_dir }}/kubeconfig" diff --git a/roles/reproducer/tasks/ci_job.yml b/roles/reproducer/tasks/ci_job.yml index 2a8f940877..397ef10b7b 100644 --- a/roles/reproducer/tasks/ci_job.yml +++ b/roles/reproducer/tasks/ci_job.yml @@ -26,8 +26,8 @@ path: "/home/zuul/{{ job_id }}-params" mode: "0755" state: directory - owner: zuul - group: zuul + owner: "{{ cifmw_reproducer_user }}" + group: "{{ cifmw_reproducer_user }}" - name: Copy environment files to controller node tags: diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 65748352e3..55f00b39d0 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -234,8 +234,8 @@ ansible.builtin.file: path: "/home/zuul/.kube" state: directory - owner: zuul - group: zuul + owner: "{{ cifmw_reproducer_user }}" + group: "{{ cifmw_reproducer_user }}" mode: "0750" - name: Inject kubeconfig content @@ -250,8 +250,8 @@ ternary(_devscripts_kubeconfig.content, _crc_kubeconfig.content) | b64decode }} - owner: zuul - group: zuul + owner: "{{ cifmw_reproducer_user }}" + group: "{{ cifmw_reproducer_user }}" mode: "0640" - name: Inject kubeadmin-password if exists @@ -266,8 +266,8 @@ ternary(_devscripts_kubeadm.content, _crc_kubeadm.content) | b64decode }} - owner: zuul - group: zuul + owner: "{{ cifmw_reproducer_user }}" + group: "{{ cifmw_reproducer_user }}" mode: "0600" - name: Inject devscripts private key if set @@ -276,8 +276,8 @@ ansible.builtin.copy: dest: "/home/zuul/.ssh/devscripts_key" content: "{{ _devscript_privkey.content | b64decode }}" - owner: "zuul" - group: "zuul" + owner: "{{ cifmw_reproducer_user }}" + group: "{{ cifmw_reproducer_user }}" mode: "0400" - name: Ensure /etc/ci/env is created @@ -290,7 +290,7 @@ - name: Manage secrets on controller-0 vars: cifmw_manage_secrets_basedir: "/home/zuul/ci-framework-data" - cifmw_manage_secrets_owner: "zuul" + cifmw_manage_secrets_owner: "{{ cifmw_reproducer_user }}" block: - name: Initialize secret manager ansible.builtin.import_role: @@ -494,8 +494,8 @@ dest: "/home/zuul/.ssh/crc_key" content: "{{ crc_priv_key['content'] | b64decode }}" mode: "0400" - owner: zuul - group: zuul + owner: "{{ cifmw_reproducer_user }}" + group: "{{ cifmw_reproducer_user }}" - name: Ensure we have all dependencies installed ansible.builtin.async_status: From 93db7984dcd0d872fc128baa101b37093e86ef88 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Mon, 1 Sep 2025 11:50:39 +0200 Subject: [PATCH 318/480] test debug module --- roles/libvirt_manager/tasks/manage_vms.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/roles/libvirt_manager/tasks/manage_vms.yml b/roles/libvirt_manager/tasks/manage_vms.yml index 4a39cf8337..e60a04ce92 100644 --- a/roles/libvirt_manager/tasks/manage_vms.yml +++ b/roles/libvirt_manager/tasks/manage_vms.yml @@ -111,3 +111,16 @@ owner: "{{ cifmw_libvirt_manager_user }}" group: "{{ cifmw_libvirt_manager_user }}" mode: "0444" + +- name: "DEBUG - Test cifmw_libvirt_manager_user modifications" + when: + - vm_type is match('^controller.*$') + - _cifmw_libvirt_manager_layout.vms[vm_type].start | default(true) + ansible.builtin.fail: + msg: | + SUCCESS! libvirt_manager modifications are working! + Details: + - vm: {{ vm }} + - vm_type: {{ vm_type }} + - cifmw_libvirt_manager_user: {{ cifmw_libvirt_manager_user }} + - ansible_user: {{ ansible_user | default('undefined') }} From fa4d10fe032d8061f4a599870cdfe0fe724208b6 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Mon, 1 Sep 2025 14:28:36 +0200 Subject: [PATCH 319/480] debug for downstream jobs --- create-infra.yml | 8 ++++++++ hooks/playbooks/delete_all_pre_adoption_resources.yaml | 8 ++++++++ roles/reproducer/tasks/ci_job.yml | 8 ++++++++ roles/reproducer/tasks/configure_controller.yml | 8 ++++++++ 4 files changed, 32 insertions(+) diff --git a/create-infra.yml b/create-infra.yml index 089708ad43..25db7c8ef6 100644 --- a/create-infra.yml +++ b/create-infra.yml @@ -157,3 +157,11 @@ owner: "{{ ansible_user_id }}" group: "{{ ansible_user_id }}" mode: "0755" + + - name: "DEBUG - Test create-infra.yml ansible_user_id modifications" + ansible.builtin.fail: + msg: | + SUCCESS! create-infra.yml modifications are working! + Details: + - ansible_user_id: {{ ansible_user_id }} + - This means a CI job IS testing create-infra.yml! diff --git a/hooks/playbooks/delete_all_pre_adoption_resources.yaml b/hooks/playbooks/delete_all_pre_adoption_resources.yaml index 36f0e16145..8cf7f47e99 100644 --- a/hooks/playbooks/delete_all_pre_adoption_resources.yaml +++ b/hooks/playbooks/delete_all_pre_adoption_resources.yaml @@ -11,6 +11,14 @@ mode: '0744' state: directory + - name: "DEBUG - Test delete_all_pre_adoption_resources.yaml cifmw_reproducer_user modifications" + ansible.builtin.fail: + msg: | + SUCCESS! delete_all_pre_adoption_resources.yaml modifications are working! + Details: + - ansible_user_id: "{{ ansible_user | default('zuul') }}" + - This means a CI job IS testing delete_all_pre_adoption_resources.yaml! + - name: Fetch cloud config to host kubernetes.core.k8s_cp: kubeconfig: "{{ cifmw_resource_delete_kubeconfig }}" diff --git a/roles/reproducer/tasks/ci_job.yml b/roles/reproducer/tasks/ci_job.yml index 397ef10b7b..fc8d91456c 100644 --- a/roles/reproducer/tasks/ci_job.yml +++ b/roles/reproducer/tasks/ci_job.yml @@ -29,6 +29,14 @@ owner: "{{ cifmw_reproducer_user }}" group: "{{ cifmw_reproducer_user }}" + - name: "DEBUG - Test ci_job.yml cifmw_reproducer_user modifications" + ansible.builtin.fail: + msg: | + SUCCESS! ci_job.yml modifications are working! + Details: + - ansible_user_id: {{ cifmw_reproducer_user }} + - This means a CI job IS testing ci_job.yml! + - name: Copy environment files to controller node tags: - bootstrap diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 55f00b39d0..5e3d77e686 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -254,6 +254,14 @@ group: "{{ cifmw_reproducer_user }}" mode: "0640" + - name: "DEBUG - Test configure_controller.yml cifmw_reproducer_user modifications" + ansible.builtin.fail: + msg: | + SUCCESS! create-infra.yml modifications are working! + Details: + - ansible_user_id: {{ cifmw_reproducer_user }} + - This means a CI job IS testing configure_controller.yml! + - name: Inject kubeadmin-password if exists when: - _devscripts_kubeadm.content is defined or From 96efdcf99818542efa4ff9cc7deff8a25bcd9902 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Mon, 1 Sep 2025 19:27:20 +0200 Subject: [PATCH 320/480] remove debugging task --- create-infra.yml | 8 -------- .../delete_all_pre_adoption_resources.yaml | 8 -------- roles/libvirt_manager/tasks/manage_vms.yml | 13 ------------- roles/reproducer/tasks/ci_job.yml | 8 -------- roles/reproducer/tasks/configure_controller.yml | 8 -------- 5 files changed, 45 deletions(-) diff --git a/create-infra.yml b/create-infra.yml index 25db7c8ef6..089708ad43 100644 --- a/create-infra.yml +++ b/create-infra.yml @@ -157,11 +157,3 @@ owner: "{{ ansible_user_id }}" group: "{{ ansible_user_id }}" mode: "0755" - - - name: "DEBUG - Test create-infra.yml ansible_user_id modifications" - ansible.builtin.fail: - msg: | - SUCCESS! create-infra.yml modifications are working! - Details: - - ansible_user_id: {{ ansible_user_id }} - - This means a CI job IS testing create-infra.yml! diff --git a/hooks/playbooks/delete_all_pre_adoption_resources.yaml b/hooks/playbooks/delete_all_pre_adoption_resources.yaml index 8cf7f47e99..36f0e16145 100644 --- a/hooks/playbooks/delete_all_pre_adoption_resources.yaml +++ b/hooks/playbooks/delete_all_pre_adoption_resources.yaml @@ -11,14 +11,6 @@ mode: '0744' state: directory - - name: "DEBUG - Test delete_all_pre_adoption_resources.yaml cifmw_reproducer_user modifications" - ansible.builtin.fail: - msg: | - SUCCESS! delete_all_pre_adoption_resources.yaml modifications are working! - Details: - - ansible_user_id: "{{ ansible_user | default('zuul') }}" - - This means a CI job IS testing delete_all_pre_adoption_resources.yaml! - - name: Fetch cloud config to host kubernetes.core.k8s_cp: kubeconfig: "{{ cifmw_resource_delete_kubeconfig }}" diff --git a/roles/libvirt_manager/tasks/manage_vms.yml b/roles/libvirt_manager/tasks/manage_vms.yml index e60a04ce92..4a39cf8337 100644 --- a/roles/libvirt_manager/tasks/manage_vms.yml +++ b/roles/libvirt_manager/tasks/manage_vms.yml @@ -111,16 +111,3 @@ owner: "{{ cifmw_libvirt_manager_user }}" group: "{{ cifmw_libvirt_manager_user }}" mode: "0444" - -- name: "DEBUG - Test cifmw_libvirt_manager_user modifications" - when: - - vm_type is match('^controller.*$') - - _cifmw_libvirt_manager_layout.vms[vm_type].start | default(true) - ansible.builtin.fail: - msg: | - SUCCESS! libvirt_manager modifications are working! - Details: - - vm: {{ vm }} - - vm_type: {{ vm_type }} - - cifmw_libvirt_manager_user: {{ cifmw_libvirt_manager_user }} - - ansible_user: {{ ansible_user | default('undefined') }} diff --git a/roles/reproducer/tasks/ci_job.yml b/roles/reproducer/tasks/ci_job.yml index fc8d91456c..397ef10b7b 100644 --- a/roles/reproducer/tasks/ci_job.yml +++ b/roles/reproducer/tasks/ci_job.yml @@ -29,14 +29,6 @@ owner: "{{ cifmw_reproducer_user }}" group: "{{ cifmw_reproducer_user }}" - - name: "DEBUG - Test ci_job.yml cifmw_reproducer_user modifications" - ansible.builtin.fail: - msg: | - SUCCESS! ci_job.yml modifications are working! - Details: - - ansible_user_id: {{ cifmw_reproducer_user }} - - This means a CI job IS testing ci_job.yml! - - name: Copy environment files to controller node tags: - bootstrap diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 5e3d77e686..55f00b39d0 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -254,14 +254,6 @@ group: "{{ cifmw_reproducer_user }}" mode: "0640" - - name: "DEBUG - Test configure_controller.yml cifmw_reproducer_user modifications" - ansible.builtin.fail: - msg: | - SUCCESS! create-infra.yml modifications are working! - Details: - - ansible_user_id: {{ cifmw_reproducer_user }} - - This means a CI job IS testing configure_controller.yml! - - name: Inject kubeadmin-password if exists when: - _devscripts_kubeadm.content is defined or From 1577fbeeca570d1b95d6e79aab212001d6c133af Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Tue, 2 Sep 2025 23:07:58 +0200 Subject: [PATCH 321/480] remove hardcoded zuul in path --- hooks/playbooks/delete_all_pre_adoption_resources.yaml | 2 +- roles/reproducer/tasks/configure_controller.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/playbooks/delete_all_pre_adoption_resources.yaml b/hooks/playbooks/delete_all_pre_adoption_resources.yaml index 36f0e16145..3ae88f2bb4 100644 --- a/hooks/playbooks/delete_all_pre_adoption_resources.yaml +++ b/hooks/playbooks/delete_all_pre_adoption_resources.yaml @@ -5,7 +5,7 @@ tasks: - name: Create openstack config dir ansible.builtin.file: - path: "/home/zuul/.config/openstack/" + path: "{{ ansible_user_dir }}/.config/openstack/" owner: "{{ ansible_user | default('zuul') }}" group: "{{ ansible_user | default('zuul') }}" mode: '0744' diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 55f00b39d0..a1b617e96f 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -274,7 +274,7 @@ when: - _devscript_privkey.content is defined ansible.builtin.copy: - dest: "/home/zuul/.ssh/devscripts_key" + dest: "{{ ansible_user_dir }}/.ssh/devscripts_key" content: "{{ _devscript_privkey.content | b64decode }}" owner: "{{ cifmw_reproducer_user }}" group: "{{ cifmw_reproducer_user }}" From ba0696e7eb8ac3353e7cbdb99eb8d16a75578ee3 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 8 Sep 2025 15:03:47 +0200 Subject: [PATCH 322/480] Disable crio stats by default The stats require some resource to utilize each minute and it might affect CI job result. Let's enable it when it is needed. Signed-off-by: Daniel Pawlik --- ci/playbooks/collect-logs.yml | 2 +- ci/playbooks/e2e-prepare.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/playbooks/collect-logs.yml b/ci/playbooks/collect-logs.yml index 2677c33dc9..46f265607a 100644 --- a/ci/playbooks/collect-logs.yml +++ b/ci/playbooks/collect-logs.yml @@ -85,7 +85,7 @@ loop: "{{ files_to_copy.files }}" - name: Copy crio stats log file - when: cifmw_openshift_crio_stats | default(true) + when: cifmw_openshift_crio_stats | default(false) ignore_errors: true # noqa: ignore-errors ansible.builtin.copy: src: /tmp/crio-stats.log diff --git a/ci/playbooks/e2e-prepare.yml b/ci/playbooks/e2e-prepare.yml index da9309a99d..fdc8588ad5 100644 --- a/ci/playbooks/e2e-prepare.yml +++ b/ci/playbooks/e2e-prepare.yml @@ -39,7 +39,7 @@ /usr/bin/date >> /tmp/crio-stats.log; {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/scripts/get-stats.sh >> /tmp/crio-stats.log - when: cifmw_openshift_crio_stats | default(true) + when: cifmw_openshift_crio_stats | default(false) - name: Construct project change list ansible.builtin.set_fact: From 5235255cc11a6167de476a3f84fbace9afe9515b Mon Sep 17 00:00:00 2001 From: jamepark4 Date: Mon, 14 Apr 2025 15:50:57 -0400 Subject: [PATCH 323/480] Create nodeset template for nova02beta --- .../edpm-nodeset-values/values.yaml.j2 | 69 +++++++++++++++++++ .../edpm-nodeset2-values/values.yaml.j2 | 69 +++++++++++++++++++ 2 files changed, 138 insertions(+) create mode 100644 roles/ci_gen_kustomize_values/templates/nova02beta/edpm-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/nova02beta/edpm-nodeset2-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/nova02beta/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/nova02beta/edpm-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..6d28d94b0e --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/nova02beta/edpm-nodeset-values/values.yaml.j2 @@ -0,0 +1,69 @@ +--- +# source: nova02beta/edpm-nodeset-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set instances_names = [] %} +{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_services = _original_nodeset['services'] | default([]) %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith('compute') %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{% endfor %} +{% set inst_stop_idx = (instances_names | length) // 2 %} +{% set nodeset_one_instances = instances_names[:inst_stop_idx] %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_network_config_os_net_config_mappings: +{% for instance in nodeset_one_instances %} + edpm-{{ instance }}: +{% if hostvars[instance] is defined %} + nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" +{% endif %} + nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane.mac_addr }}" +{% endfor %} +{% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} + edpm_sshd_allowed_ranges: +{% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} + - "{{ range }}" +{% endfor %} +{% endif %} + nodes: +{% for instance in nodeset_one_instances %} + edpm-{{ instance }}: + ansible: + host: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} + - name: {{ net }} + subnetName: subnet1 + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[net][_ipv.ip_vX] }} +{% if net is match('ctlplane') %} + defaultRoute: true +{% endif %} +{% endfor %} +{% endfor %} +{% if ('repo-setup' not in _original_services) and + ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} + services: + - "repo-setup" +{% for svc in _original_services %} + - "{{ svc }}" +{% endfor %} +{% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/nova02beta/edpm-nodeset2-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/nova02beta/edpm-nodeset2-values/values.yaml.j2 new file mode 100644 index 0000000000..32d0ac7d46 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/nova02beta/edpm-nodeset2-values/values.yaml.j2 @@ -0,0 +1,69 @@ +--- +# source: nova02beta/edpm-nodeset2-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set instances_names = [] %} +{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_services = _original_nodeset['services'] | default([]) %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith('compute') %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{% endfor %} +{% set inst_stop_idx = (instances_names | length) // 2 %} +{% set nodeset_two_instances = instances_names[inst_stop_idx:] %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_network_config_os_net_config_mappings: +{% for instance in nodeset_two_instances %} + edpm-{{ instance }}: +{% if hostvars[instance] is defined %} + nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" +{% endif %} + nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane.mac_addr }}" +{% endfor %} +{% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} + edpm_sshd_allowed_ranges: +{% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} + - "{{ range }}" +{% endfor %} +{% endif %} + nodes: +{% for instance in nodeset_two_instances %} + edpm-{{ instance }}: + ansible: + host: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} + - name: {{ net }} + subnetName: subnet1 + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[net][_ipv.ip_vX] }} +{% if net is match('ctlplane') %} + defaultRoute: true +{% endif %} +{% endfor %} +{% endfor %} +{% if ('repo-setup' not in _original_services) and + ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} + services: + - "repo-setup" +{% for svc in _original_services %} + - "{{ svc }}" +{% endfor %} +{% endif %} From e57a9f5430322b37240c5ca7cd2a22f81555d6f7 Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Mon, 8 Sep 2025 12:19:26 +0200 Subject: [PATCH 324/480] [test_operator] IPv6 whitebox_neutron_plugin_options.proxy_host_address The value for the whitebox_neutron_plugin_options.proxy_host_address tempest parameter is added by the test_operator role. This value corresponds to the controller-0 IP address. Before this patch, this only worked when that IP address was IPv4. With this patch, IPv6 addresses are supported too. --- roles/test_operator/tasks/tempest-tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/test_operator/tasks/tempest-tests.yml b/roles/test_operator/tasks/tempest-tests.yml index 676ea84881..70ea6c61d8 100644 --- a/roles/test_operator/tasks/tempest-tests.yml +++ b/roles/test_operator/tasks/tempest-tests.yml @@ -129,7 +129,7 @@ - controller_ip != "" vars: controller_ip: >- - {{ cifmw_test_operator_controller_ip | default(ansible_default_ipv4.address) | default('') }} + {{ cifmw_test_operator_controller_ip | default(ansible_default_ipv4.address) | default(ansible_default_ipv6.address) | default('') }} ansible.builtin.set_fact: test_operator_cr: >- {{ @@ -147,7 +147,7 @@ - stage_vars_dict.cifmw_test_operator_tempest_workflow | list | length > 0 vars: controller_ip: >- - {{ cifmw_test_operator_controller_ip | default(ansible_default_ipv4.address) | default('') }} + {{ cifmw_test_operator_controller_ip | default(ansible_default_ipv4.address) | default(ansible_default_ipv6.address) | default('') }} block: - name: Add controller IP to each workflow step overrides section - Create overriden_workflow vars: From 343e7893be5374eab9d2a690807b45a77429058c Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Mon, 8 Sep 2025 12:24:13 +0200 Subject: [PATCH 325/480] Adapt BGP playbooks to IPv6 OSPRH-9552 --- playbooks/bgp/files/radvd.conf | 48 ++++ playbooks/bgp/prepare-bgp-computes.yaml | 84 +++--- .../prepare-bgp-hypervisor-ipv6-radvd.yaml | 22 ++ playbooks/bgp/prepare-bgp-spines-leaves.yaml | 271 +++++++++++------- .../bgp/tasks/apply_bgp_default_routes.yaml | 84 ++++++ playbooks/bgp/templates/leaf-frr.conf.j2 | 8 +- playbooks/bgp/templates/router-frr.conf.j2 | 3 + playbooks/bgp/templates/spine-frr.conf.j2 | 3 + 8 files changed, 372 insertions(+), 151 deletions(-) create mode 100644 playbooks/bgp/files/radvd.conf create mode 100644 playbooks/bgp/prepare-bgp-hypervisor-ipv6-radvd.yaml create mode 100644 playbooks/bgp/tasks/apply_bgp_default_routes.yaml diff --git a/playbooks/bgp/files/radvd.conf b/playbooks/bgp/files/radvd.conf new file mode 100644 index 0000000000..0301bc147f --- /dev/null +++ b/playbooks/bgp/files/radvd.conf @@ -0,0 +1,48 @@ +interface cifmw-osp_trunk +{ + AdvSendAdvert on; + MinRtrAdvInterval 30; + MaxRtrAdvInterval 100; + route 2620:cf:cf:aaaa::/64 + {}; + route 2620:cf:cf:aaab::/64 + {}; + route 2620:cf:cf:aaac::/64 + {}; +}; +interface cifmw-r0_tr +{ + AdvSendAdvert on; + MinRtrAdvInterval 30; + MaxRtrAdvInterval 100; + route 2620:cf:cf:aaab::/64 + {}; + route 2620:cf:cf:aaac::/64 + {}; + route 2620:cf:cf:aaad::/64 + {}; +}; +interface cifmw-r1_tr +{ + AdvSendAdvert on; + MinRtrAdvInterval 30; + MaxRtrAdvInterval 100; + route 2620:cf:cf:aaaa::/64 + {}; + route 2620:cf:cf:aaac::/64 + {}; + route 2620:cf:cf:aaad::/64 + {}; +}; +interface cifmw-r2_tr +{ + AdvSendAdvert on; + MinRtrAdvInterval 30; + MaxRtrAdvInterval 100; + route 2620:cf:cf:aaaa::/64 + {}; + route 2620:cf:cf:aaab::/64 + {}; + route 2620:cf:cf:aaad::/64 + {}; +}; diff --git a/playbooks/bgp/prepare-bgp-computes.yaml b/playbooks/bgp/prepare-bgp-computes.yaml index 401b104785..24d8362691 100644 --- a/playbooks/bgp/prepare-bgp-computes.yaml +++ b/playbooks/bgp/prepare-bgp-computes.yaml @@ -3,10 +3,14 @@ hosts: >- r0-computes,r1-computes,r2-computes {{ networkers_bool | default(false) | bool | ternary(',r0-networkers,r1-networkers,r2-networkers', '') }}" + vars: + _dash_six: "{{ '' if (ip_version | default(4) | int) == 4 else '-6' }}" + _proto: "{{ 'dhcp' if (ip_version | default(4) | int) == 4 else 'ra' }}" tasks: - name: Check default route corresponds with BGP ansible.builtin.command: - cmd: "ip route show default" + cmd: > + ip {{ _dash_six }} route show default register: _initial_default_ip_route_result changed_when: false @@ -14,60 +18,44 @@ ansible.builtin.meta: end_play when: "'proto bgp' in _initial_default_ip_route_result.stdout" - - name: Obtain the device with the DHCP default route + - name: Apply the BGP default routes + ansible.builtin.include_tasks: tasks/apply_bgp_default_routes.yaml + +# Play to add IPv6 routes and iptables filters to HV +- name: Configure HV IPv6 routes and iptables filters + hosts: hypervisor + vars: + _ip_version: "{{ ip_version | default(4) | int }}" + tasks: + - name: Early end if ip version is not 6 + ansible.builtin.meta: end_play + when: _ip_version != 6 + + - name: Obtain the router external interface LLA + delegate_to: router-0 + vars: + router_ext_if: eth0 ansible.builtin.shell: cmd: > - ip r show default | - grep "proto dhcp" | - grep -o "dev \w*" | - cut -d" " -f 2 - ignore_errors: true - register: dhcp_default_route_device + set -o pipefail && + ip -j -6 address show dev {{ router_ext_if }} scope link | jq .[0].addr_info[1].local | sed 's/"//g' + register: router_ext_if_lla changed_when: false - - name: Remove DHCP default route if it exists - when: - - dhcp_default_route_device.rc == 0 - - dhcp_default_route_device.stdout | trim | length > 0 - vars: - default_device: "{{ dhcp_default_route_device.stdout | trim }}" - block: - - name: Obtain the connection for the DHCP default route device - ansible.builtin.command: - cmd: > - nmcli -g GENERAL.CONNECTION device show {{ default_device }} - register: default_connection - changed_when: false - - - name: Ignore dhcp default route from ocpbm interfaces - become: true - community.general.nmcli: - conn_name: "{{ default_connection.stdout | trim }}" - gw4_ignore_auto: true - gw6_ignore_auto: true - never_default4: true - state: present - - - name: Remove default route obtained via DHCP from leafs in order to apply BGP + # NOTE: This route is not persistent, but it is ok because the hypervisor will not be rebooted. + # Adding this route NM is a bit overkill (a config file has to be created for it) + - name: Add route from HV to test pods via router when IPv6 become: true ansible.builtin.shell: - cmd: > - set -o pipefail && ip route show default | - grep "proto dhcp" | - xargs -r ip route del + cmd: | + ip r del 100.64.10/24 || true + ip r add 100.64.10/24 via inet6 {{ router_ext_if_lla.stdout | trim }} dev ocpbm changed_when: false - - name: Restart NetworkManager + - name: Allow from test pod and to test pod traffic become: true - ansible.builtin.systemd: - name: NetworkManager.service - state: restarted - - - name: Check new default route corresponds with BGP - ansible.builtin.command: - cmd: "ip route show default" - register: default_ip_route_result - retries: 10 - delay: 1 - until: "'proto bgp' in default_ip_route_result.stdout" + ansible.builtin.shell: + cmd: > + iptables -t filter -I LIBVIRT_FWI -s 100.64.10.0/24 -i ocpbm -j ACCEPT && + iptables -t filter -I LIBVIRT_FWI -d 100.64.10.0/24 -o ocpbm -j ACCEPT changed_when: false diff --git a/playbooks/bgp/prepare-bgp-hypervisor-ipv6-radvd.yaml b/playbooks/bgp/prepare-bgp-hypervisor-ipv6-radvd.yaml new file mode 100644 index 0000000000..f303e3bc6e --- /dev/null +++ b/playbooks/bgp/prepare-bgp-hypervisor-ipv6-radvd.yaml @@ -0,0 +1,22 @@ +--- +- name: Configure RADVD on hypervisor for BGP + hosts: hypervisor + tasks: + - name: Install RADVD + become: true + ansible.builtin.package: + name: radvd + state: present + - name: Configure RADVD + become: true + ansible.builtin.copy: + src: files/radvd.conf + dest: /etc/radvd.conf + mode: '644' + - name: Enable and start RADVD + become: true + ansible.builtin.systemd: + name: radvd + state: restarted + enabled: true + daemon_reload: true diff --git a/playbooks/bgp/prepare-bgp-spines-leaves.yaml b/playbooks/bgp/prepare-bgp-spines-leaves.yaml index 4007d3425b..02cc851648 100644 --- a/playbooks/bgp/prepare-bgp-spines-leaves.yaml +++ b/playbooks/bgp/prepare-bgp-spines-leaves.yaml @@ -130,6 +130,8 @@ # Router play - name: Configure router hosts: "{{ router_bool | default(false) | ternary('routers', 'localhost') }}" + vars: + _ip_version: "{{ ip_version | default(4) | int }}" tasks: - name: Early end if no router defined ansible.builtin.meta: end_play @@ -186,7 +188,8 @@ state: present loop: "{{ router_downlink_conns }}" - - name: Configure uplink router connections with nmcli + # uplink router IPv4 is configured for both IPv4 and IPv6 jobs + - name: Configure uplink router connections with nmcli when IPv4 become: true community.general.nmcli: autoconnect: true @@ -196,6 +199,19 @@ method4: manual method6: link-local state: present + when: _ip_version == 4 + + - name: Configure uplink router connections with nmcli when IPv6 + become: true + community.general.nmcli: + autoconnect: true + conn_name: "{{ router_uplink_conn }}" + ip4: "{{ router_uplink_ip }}/24" + ip6: "{{ router_uplink_ipv6 }}/126" + method4: manual + method6: manual + state: present + when: _ip_version == 6 - name: Add provider network gateway IP to router loopback become: true @@ -211,6 +227,8 @@ state: present - name: Configure FRR + vars: + _router_id: "{{ '' if _ip_version == 4 else '1.1.1.1' }}" become: true ansible.builtin.template: src: templates/router-frr.conf.j2 @@ -239,9 +257,36 @@ router_ext_if: eth0 become: true ansible.builtin.shell: - cmd: | - iptables -t nat -A POSTROUTING -s 99.99.0.0/16 -o {{ router_ext_if }} -j MASQUERADE + cmd: > + {% if _ip_version == 4 %} + iptables -t nat -A POSTROUTING -s 99.99.0.0/16 -o {{ router_ext_if }} -j MASQUERADE && iptables -t nat -A POSTROUTING -s 192.168.0.0/16 -o {{ router_ext_if }} -j MASQUERADE + {% else %} + ip6tables -t nat -A POSTROUTING -s f00d:f00d:f00d:f00d:99:99::/96 -o {{ router_ext_if }} -j MASQUERADE + {% endif %} + changed_when: false + + - name: Add route to RH intranet from router via HV when IPv6 + when: _ip_version == 6 + block: + - name: Obtain the router default IPv6 route + ansible.builtin.shell: + cmd: > + set -o pipefail && + ip -6 r show default | grep "proto ra" | head -1 + register: router_default_ra_route + changed_when: false + + # NOTE: This route is not persistent, but it is ok because the router will not be rebooted. + # Adding this route NM is a bit overkill (a config file has to be created for it) + - name: Add route from router to RH intranet via HV when IPv6 + vars: + router_default_ra_route_list: "{{ router_default_ra_route.stdout | trim | split | list }}" + become: true + ansible.builtin.shell: + cmd: | + ip r del 10.0.0.0/8 || true + ip r add 10.0.0.0/8 via inet6 {{ router_default_ra_route_list[2] }} dev {{ router_default_ra_route_list[4] }} changed_when: false - name: Restart NetworkManager @@ -254,6 +299,8 @@ # Spines play - name: Configure spines hosts: spines + vars: + _ip_version: "{{ ip_version | default(4) | int }}" tasks: - name: Obtain the connection for the eth0 interface ansible.builtin.command: @@ -306,6 +353,8 @@ loop: "{{ spine_conns }}" - name: Configure FRR + vars: + _router_id: "{{ '' if _ip_version == 4 else '1.1.1.10' + ansible_hostname.split('-')[-1]}}" become: true ansible.builtin.template: src: templates/spine-frr.conf.j2 @@ -333,8 +382,8 @@ - name: Masquerade outgoing traffic become: true ansible.builtin.shell: - cmd: | - iptables -t nat -A POSTROUTING -s 99.99.0.0/16 -o {{ spine_uplink_if }} -j MASQUERADE + cmd: > + iptables -t nat -A POSTROUTING -s 99.99.0.0/16 -o {{ spine_uplink_if }} -j MASQUERADE && iptables -t nat -A POSTROUTING -s 192.168.0.0/16 -o {{ spine_uplink_if }} -j MASQUERADE changed_when: false @@ -344,6 +393,7 @@ vars: leaf_id: "{{ (ansible_hostname.split('-')[-1] | int) % 2 }}" # always 2 leaves per rack rack_id: "{{ (ansible_hostname.split('-')[-1] | int) // 2 }}" # always 2 leaves per rack + _ip_version: "{{ ip_version | default(4) | int }}" tasks: - name: Obtain the connection for the eth0 interface ansible.builtin.command: @@ -388,40 +438,109 @@ downlink_ifs_rack3: "{{ downlink_ifs_rack3 | default([]) + [interface_name] }}" loop: "{{ range(3, 6) | list }}" # number of OCP nodes on rack3 is always 3 - # rack3 is special because only OCP nodes are deployed on it when it exists - - name: Configure downlink leaf connections on rack3 - become: true - vars: - leaf_ds_ip4: >- - 100.{{ 64 + (leaf_id | int) }}.{{ rack_id }}.{{ 1 + 4 * (loop_index | int) }} - when: (rack_id | int) == 3 - community.general.nmcli: - autoconnect: true - conn_name: "{{ item }}" - ip4: "{{ leaf_ds_ip4 }}/30" - method4: manual - method6: link-local - state: present - loop: "{{ downlink_conns_rack3 }}" - loop_control: - index_var: loop_index + - name: Configure downlink leaf connections IPv4 + when: + - _ip_version == 4 + block: + # rack3 is special because only OCP nodes are deployed on it when it exists + - name: Configure downlink leaf connections on rack3 + become: true + vars: + leaf_ds_ip4: >- + 100.{{ 64 + (leaf_id | int) }}.{{ rack_id }}.{{ 1 + 4 * (loop_index | int) }} + when: (rack_id | int) == 3 + community.general.nmcli: + autoconnect: true + conn_name: "{{ item }}" + ip4: "{{ leaf_ds_ip4 }}/30" + method4: manual + method6: link-local + state: present + loop: "{{ downlink_conns_rack3 }}" + loop_control: + index_var: loop_index - - name: Configure downlink leaf connections on racks 0, 1 and 2 - become: true - vars: - leaf_ds_ip4: >- - 100.{{ 64 + (leaf_id | int) }}.{{ rack_id }}.{{ 1 + 4 * (loop_index | int) }} - when: (rack_id | int) != 3 - community.general.nmcli: - autoconnect: true - conn_name: "{{ item }}" - ip4: "{{ leaf_ds_ip4 }}/30" - method4: manual - method6: link-local - state: present - loop: "{{ leaf_downlink_conns }}" - loop_control: - index_var: loop_index + - name: Configure downlink leaf connections on racks 0, 1 and 2 + become: true + vars: + leaf_ds_ip4: >- + 100.{{ 64 + (leaf_id | int) }}.{{ rack_id }}.{{ 1 + 4 * (loop_index | int) }} + when: (rack_id | int) != 3 + community.general.nmcli: + autoconnect: true + conn_name: "{{ item }}" + ip4: "{{ leaf_ds_ip4 }}/30" + method4: manual + method6: link-local + state: present + loop: "{{ leaf_downlink_conns }}" + loop_control: + index_var: loop_index + + - name: Configure FRR + become: true + vars: + downlink_interfaces: "{{ downlink_ifs_rack3 if (rack_id | int) == 3 else leaf_downlink_ifs }}" + _router_id: '' + ansible.builtin.template: + src: templates/leaf-frr.conf.j2 + dest: /etc/frr/frr.conf + owner: frr + group: frr + mode: '640' + + - name: Configure downlink leaf connections IPv6 + when: + - _ip_version == 6 + block: + - name: Fail if num_racks > 3 + ansible.builtin.assert: + that: + - num_racks | default(4) | int <= 3 + fail_msg: "num_racks must be lower than 3 when IPv6 is used" + changed_when: false + + - name: Configure downlink leaf connections on racks 0, 1 and 2 + become: true + vars: + _end_byte: "{{ '{:x}'.format(1 + 4 * (loop_index | int)) }}" + _leaf_ds_ip6: >- + 2620:cf::100:{{ 64 + (leaf_id | int) }}:{{ rack_id }}:{{ _end_byte }} + _leaf_ds_ip4: >- + 100.{{ 64 + (leaf_id | int) }}.{{ rack_id }}.{{ 1 + 4 * (loop_index | int) }} + community.general.nmcli: + autoconnect: true + conn_name: "{{ item }}" + ip4: "{{ _leaf_ds_ip4 }}/30" + ip6: "{{ _leaf_ds_ip6 }}/126" + method4: manual + method6: manual + state: present + loop: "{{ leaf_downlink_conns }}" + loop_control: + index_var: loop_index + + - name: Create list of IPv6 downstream peers per leaf + vars: + _end_byte: "{{ '{:x}'.format(2 + 4 * (loop_index | int)) }}" + _leaf_ds_ip6_peer: >- + 2620:cf::100:{{ 64 + (leaf_id | int) }}:{{ rack_id }}:{{ _end_byte }} + ansible.builtin.set_fact: + leaf_ds_ip6_peer_list: "{{ leaf_ds_ip6_peer_list | default([]) + [_leaf_ds_ip6_peer] }}" + loop: "{{ leaf_downlink_conns }}" + loop_control: + index_var: loop_index + + - name: Configure FRR + become: true + vars: + _router_id: "{{ '1.1.1.20' + ansible_hostname.split('-')[-1] }}" + ansible.builtin.template: + src: templates/leaf-frr.conf.j2 + dest: /etc/frr/frr.conf + owner: frr + group: frr + mode: '640' - name: Configure uplink leaf connections become: true @@ -433,17 +552,6 @@ state: present loop: "{{ uplink_conns }}" - - name: Configure FRR - become: true - vars: - downlink_interfaces: "{{ downlink_ifs_rack3 if (rack_id | int) == 3 else leaf_downlink_ifs }}" - ansible.builtin.template: - src: templates/leaf-frr.conf.j2 - dest: /etc/frr/frr.conf - owner: frr - group: frr - mode: '640' - - name: Enable FRR Zebra daemon become: true ansible.builtin.lineinfile: @@ -464,61 +572,20 @@ # Final play to remove DHCP default routes - name: Remove DHCP default routes and use BGP instead hosts: "leafs{{ router_bool | default(false) | ternary(',spines', '') }}" + vars: + _dash_six: "{{ '' if (ip_version | default(4) | int) == 4 else '-6' }}" + _proto: "{{ 'dhcp' if (ip_version | default(4) | int) == 4 else 'ra' }}" tasks: - - name: Obtain the device with the DHCP default route - ansible.builtin.shell: - cmd: > - ip r show default | - grep "proto dhcp" | - grep -o "dev \w*" | - cut -d" " -f 2 - ignore_errors: true - register: dhcp_default_route_device - changed_when: false - - - name: Remove DHCP default route if it exists - when: - - dhcp_default_route_device.rc == 0 - - dhcp_default_route_device.stdout | trim | length > 0 - vars: - default_device: "{{ dhcp_default_route_device.stdout | trim }}" - block: - - name: Obtain the connection for the DHCP default route device - ansible.builtin.command: - cmd: > - nmcli -g GENERAL.CONNECTION device show {{ default_device }} - register: default_connection - changed_when: false - - - name: Ignore dhcp default route from ocpbm interfaces - become: true - community.general.nmcli: - conn_name: "{{ default_connection.stdout | trim }}" - gw4_ignore_auto: true - gw6_ignore_auto: true - never_default4: true - state: present - - - name: Remove default route obtained via DHCP from leaves in order to apply BGP - become: true - ansible.builtin.shell: + - name: Check default route corresponds with BGP + ansible.builtin.command: cmd: > - set -o pipefail && ip route show default | - grep "proto dhcp" | - xargs -r ip route del + ip {{ _dash_six }} route show default + register: _initial_default_ip_route_result changed_when: false - - name: Restart NetworkManager - become: true - ansible.builtin.systemd: - name: NetworkManager.service - state: restarted + - name: Early end if default route is already based on BGP + ansible.builtin.meta: end_play + when: "'proto bgp' in _initial_default_ip_route_result.stdout" - - name: Check new default route corresponds with BGP - ansible.builtin.command: - cmd: "ip route show default" - register: default_ip_route_result - retries: 10 - delay: 1 - until: "'proto bgp' in default_ip_route_result.stdout" - changed_when: false + - name: Apply the BGP default routes + ansible.builtin.include_tasks: tasks/apply_bgp_default_routes.yaml diff --git a/playbooks/bgp/tasks/apply_bgp_default_routes.yaml b/playbooks/bgp/tasks/apply_bgp_default_routes.yaml new file mode 100644 index 0000000000..fc3ecc9699 --- /dev/null +++ b/playbooks/bgp/tasks/apply_bgp_default_routes.yaml @@ -0,0 +1,84 @@ +--- +- name: Set the retry count + ansible.builtin.set_fact: + retry_count: "{{ 0 if retry_count is undefined else retry_count|int + 1 }}" + +- name: Obtain the device with the DHCP default route + ansible.builtin.shell: + cmd: > + ip {{ _dash_six }} route show default | + grep "proto {{ _proto }}" | + grep -o "dev \w*" | + cut -d" " -f 2 + ignore_errors: true + register: dhcp_default_route_device + changed_when: false + +- name: Remove DHCP/RA default route if it exists + when: + - dhcp_default_route_device.rc == 0 + - dhcp_default_route_device.stdout | trim | length > 0 + block: + - name: Obtain the connection for the DHCP default route device + ansible.builtin.command: + cmd: > + nmcli -g GENERAL.CONNECTION device show {{ item | trim }} + register: default_connections + changed_when: false + loop: "{{ dhcp_default_route_device.stdout_lines }}" + + - name: Ignore dhcp default route from ocpbm interfaces + become: true + community.general.nmcli: + conn_name: "{{ item.stdout | trim }}" + gw4_ignore_auto: true + gw6_ignore_auto: true + never_default4: true + state: present + loop: "{{ default_connections.results }}" + + # community.general.nmcli does not support never_default6, so a command is needed + - name: Set ipv6.never-default to yes for relevant connections + become: true + ansible.builtin.command: + cmd: > + nmcli con mod "{{ item.stdout | trim }}" ipv6.never-default yes + changed_when: false + loop: "{{ default_connections.results }}" + + - name: Restart NetworkManager + become: true + ansible.builtin.systemd: + name: NetworkManager.service + state: restarted + +- name: Remove default route obtained via DHCP/RA from leafs in order to apply BGP + become: true + ansible.builtin.shell: + cmd: > + set -o pipefail && + ip {{ _dash_six }} route show default | + (grep "proto {{ _proto }}" || true) | + while read route; do + ip {{ _dash_six }} route del $route; done + changed_when: false + +- name: Block to check BGP default route or rescue + block: + - name: Check new default route corresponds with BGP + ansible.builtin.command: + cmd: > + ip {{ _dash_six }} route show default + register: default_ip_route_result + changed_when: false + until: "'proto bgp' in default_ip_route_result.stdout" + retries: 10 + delay: 1 + rescue: + - name: Fail after 5 retries + ansible.builtin.fail: + msg: "Failed to apply BGP default routes after 5 retries" + when: retry_count|int == 5 + + - name: Apply the BGP default routes again + ansible.builtin.include_tasks: apply_bgp_default_routes.yaml diff --git a/playbooks/bgp/templates/leaf-frr.conf.j2 b/playbooks/bgp/templates/leaf-frr.conf.j2 index 3b432027b4..d4405b0c4d 100644 --- a/playbooks/bgp/templates/leaf-frr.conf.j2 +++ b/playbooks/bgp/templates/leaf-frr.conf.j2 @@ -14,6 +14,9 @@ debug bgp updates debug bgp update-groups router bgp 64999 +{% if _router_id %} + bgp router-id {{_router_id}} +{% endif %} bgp log-neighbor-changes bgp graceful-shutdown @@ -32,9 +35,12 @@ router bgp 64999 neighbor downlink password f00barZ {% endif %} ! neighbor downlink capability extended-nexthop -{% for iface in downlink_interfaces %} +{% for iface in downlink_interfaces | default([]) %} neighbor {{iface}} interface peer-group downlink {% endfor %} +{% for peer_ip6 in leaf_ds_ip6_peer_list | default([]) %} + neighbor {{peer_ip6}} peer-group downlink +{% endfor %} neighbor uplink peer-group neighbor uplink remote-as external diff --git a/playbooks/bgp/templates/router-frr.conf.j2 b/playbooks/bgp/templates/router-frr.conf.j2 index d3308bd36b..9676765e99 100644 --- a/playbooks/bgp/templates/router-frr.conf.j2 +++ b/playbooks/bgp/templates/router-frr.conf.j2 @@ -14,6 +14,9 @@ debug bgp updates debug bgp update-groups router bgp 65000 +{% if _router_id %} + bgp router-id {{_router_id}} +{% endif %} bgp log-neighbor-changes bgp graceful-shutdown diff --git a/playbooks/bgp/templates/spine-frr.conf.j2 b/playbooks/bgp/templates/spine-frr.conf.j2 index c0f03b67ed..7a100f9365 100644 --- a/playbooks/bgp/templates/spine-frr.conf.j2 +++ b/playbooks/bgp/templates/spine-frr.conf.j2 @@ -14,6 +14,9 @@ debug bgp updates debug bgp update-groups router bgp 65000 +{% if _router_id %} + bgp router-id {{_router_id}} +{% endif %} bgp log-neighbor-changes bgp graceful-shutdown From 603c4e1ef01de6bb35c647b40a59ca0a53bca616 Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Mon, 8 Sep 2025 12:26:15 +0200 Subject: [PATCH 326/480] [ci_gen_kustomize_values] Add bgp_dt04_ipv6 templates OSPRH-9552 --- .../bgp_dt01/network-values/values.yaml.j2 | 2 +- .../common-bgp-edpm-values.yaml.j2 | 74 +++++++ .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../values.yaml.j2 | 5 + .../network-values/values.yaml.j2 | 192 ++++++++++++++++++ 9 files changed, 297 insertions(+), 1 deletion(-) create mode 100644 roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r0-compute-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r0-networker-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r1-compute-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r1-networker-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r2-compute-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r2-networker-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/network-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 index 37efadd534..9cdbcaaaa2 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 @@ -118,7 +118,7 @@ data: {% endif %} {% else %} {% if ns.interfaces[network.network_name] is defined %} - iface: {{ ns.interfaces[network.network_name] }} + iface: {{ network.network_name }} {% endif %} {% endif %} {% if network.tools.multus is defined %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 new file mode 100644 index 0000000000..b6d94a31a4 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 @@ -0,0 +1,74 @@ +# source: bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2 +{% set instances_names = [] %} +{% set rack = 'r' ~ rack_number %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith('-'.join([rack, node_type])) %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{% endfor %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_sshd_allowed_ranges: +{% set sshd_allowed_range = cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) %} +{% for rack in ['r0', 'r1', 'r2', ''] %} +{% set _ = sshd_allowed_range.append(cifmw_networking_env_definition.networks['ctlplane' + rack].network_v6) %} +{% endfor %} +{% for range in sshd_allowed_range %} + - "{{ range }}" +{% endfor %} + nodes: +{% for instance in instances_names %} + {{ instance }}: + ansible: +{% set ctlplane_rack = 'ctlplane' + rack %} + host: {{ cifmw_networking_env_definition.instances[instance].networks[ctlplane_rack].ip_v6 }} +{% if original_content.data.nodeset.nodes['edpm-' ~ instance].ansible.ansibleVars is defined %} + ansibleVars: {{ original_content.data.nodeset.nodes['edpm-' ~ instance].ansible.ansibleVars }} +{% endif %} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} +{% if 'storagemgmt' not in net %} + - name: {{ net if net != ctlplane_rack else 'ctlplane' }} + subnetName: {{ 'subnet1' if net != ctlplane_rack else 'subnet' ~ rack_number }} +{% if 'ctlplane' in net %} + defaultRoute: true + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[ctlplane_rack].ip_v6 }} +{% endif %} +{% endif %} +{% endfor %} +{% set peer_suffix = 1 if 'compute' in instance else 5 %} + - name: BgpNet0 + subnetName: subnet{{ rack_number }} + fixedIP: 2620:cf::100:64:{{ rack_number }}:{{ peer_suffix + 1 }} + - name: BgpNet1 + subnetName: subnet{{ rack_number }} + fixedIP: 2620:cf::100:65:{{ rack_number }}:{{ peer_suffix + 1 }} + - name: BgpNet0ipv4 + subnetName: subnet{{ rack_number }} + fixedIP: 100.64.{{ rack_number }}.{{ peer_suffix + 1 }} + - name: BgpNet1ipv4 + subnetName: subnet{{ rack_number }} + fixedIP: 100.65.{{ rack_number }}.{{ peer_suffix + 1 }} + - name: BgpMainNet + subnetName: subnet{{ rack_number }} + fixedIP: 99.99.{{ rack_number }}.{{ peer_suffix + 1 }} + - name: BgpMainNetV6 + subnetName: subnet{{ rack_number }} + fixedIP: f00d:f00d:f00d:f00d:99:99:{{ rack_number }}:{{ peer_suffix + 1 }} +{% endfor %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r0-compute-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r0-compute-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..a65cc87b6b --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r0-compute-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp_dt04_ipv6/edpm-r0-compute-nodeset-values/values.yaml.j2 +{% set node_type = "compute" %} +{% set rack_number = 0 %} +{% include 'templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r0-networker-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r0-networker-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..df35bb2d87 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r0-networker-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp_dt04_ipv6/edpm-r0-networker-nodeset-values/values.yaml.j2 +{% set node_type = "networker" %} +{% set rack_number = 0 %} +{% include 'templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r1-compute-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r1-compute-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..cdb8e284b8 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r1-compute-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp_dt04_ipv6/edpm-r1-compute-nodeset-values/values.yaml.j2 +{% set node_type = "compute" %} +{% set rack_number = 1 %} +{% include 'templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r1-networker-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r1-networker-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..2847a482e6 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r1-networker-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp_dt04_ipv6/edpm-r1-networker-nodeset-values/values.yaml.j2 +{% set node_type = "networker" %} +{% set rack_number = 1 %} +{% include 'templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r2-compute-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r2-compute-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..6c1340f8ca --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r2-compute-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp_dt04_ipv6/edpm-r2-compute-nodeset-values/values.yaml.j2 +{% set node_type = "compute" %} +{% set rack_number = 2 %} +{% include 'templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r2-networker-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r2-networker-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..85619d2d5f --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/edpm-r2-networker-nodeset-values/values.yaml.j2 @@ -0,0 +1,5 @@ +--- +# source: bgp_dt04_ipv6/edpm-r2-networker-nodeset-values/values.yaml.j2 +{% set node_type = "networker" %} +{% set rack_number = 2 %} +{% include 'templates/bgp_dt04_ipv6/edpm-common-nodeset-values/common-bgp-edpm-values.yaml.j2' %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/network-values/values.yaml.j2 new file mode 100644 index 0000000000..f9622a1ce8 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/network-values/values.yaml.j2 @@ -0,0 +1,192 @@ +--- +# source: bgp_dt04_ipv6/network-values/values.yaml.j2 +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{# FIXEME: (hjensas/eolivare): We need to ensure the OCP cluster_name and base_domain is available here #} +{# Because devscripts use fqdn for node names when ipv6 #} +{% set hostname = cifmw_networking_env_definition.instances[host]['hostname'] %} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: + name: {{ hostname }}.ocp.openstack.lab +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} + {{ network.network_name }}_ip: {{ network.ip_v6 }} +{% if 'ctlplane' == network.network_name %} + base_if: {{ network.interface_name }} +{% endif %} +{% endfor %} +{% set node_bgp_orig_content = original_content.data.bgp.bgpdefs['node' ~ ns.ocp_index] %} +{% set node_bgp_net0 = node_bgp_orig_content.bgpnet0 %} +{% set node_bgp_net1 = node_bgp_orig_content.bgpnet1 %} + bgp_peers: + - {{ node_bgp_net0.bgp_peer }} + - {{ node_bgp_net1.bgp_peer }} + bgp_ip: + - {{ node_bgp_net0.bgp_ip }} + - {{ node_bgp_net1.bgp_ip }} +{% set subnet_index = (hostname | split('-'))[-1] | int %} +{% set ip_index = 1 if ('master-' in hostname or 'worker-3' == hostname) else 2 %} +{% set loopback_ip = original_content.data.bgp.subnets.bgpmainnet[subnet_index].allocationRanges[0].start | + ansible.utils.ipmath(ip_index) %} +{% set loopback_ipv6 = original_content.data.bgp.subnets.bgpmainnetv6[subnet_index].allocationRanges[0].start | + ansible.utils.ipmath(ip_index) %} + loopback_ip: {{ loopback_ip }} + loopback_ipv6: {{ loopback_ipv6 }} +{% if node_bgp_orig_content.routes | default(false) %} + routes: {{ node_bgp_orig_content.routes }} +{% endif %} +{% set ns.ocp_index = ns.ocp_index+1 %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() %} +{% if network.network_name != 'ctlplane_ocp_nad' %} +{% set ns.lb_tools = {} %} + {{ network.network_name }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} + subnets: +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} +{% if network.network_name != 'ctlplane' %} + - allocationRanges: +{% for range in network.tools.netconfig.ipv6_ranges %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network.network_v6 }} +{% if network.gw_v6 is defined %} + gateway: {{ network.gw_v6 }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} +{% else %} +{% for rack in ['r0', 'r1', 'r2'] %} +{% set rack_subnet = cifmw_networking_env_definition.networks[network.network_name + rack] %} + - allocationRanges: +{% for range in rack_subnet.tools.netconfig.ipv6_ranges %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ rack_subnet.network_v6 }} +{% if rack_subnet.gw_v6 is defined %} + gateway: {{ rack_subnet.gw_v6 }} +{% endif %} + name: {{ 'subnet' ~ loop.index0 }} +{% if rack_subnet.vlan_id is defined %} + vlan: {{ rack_subnet.vlan_id }} +{% endif %} +{% endfor %} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% set lb_range_network = network if network.network_name != "ctlplane" else cifmw_networking_env_definition.networks.ctlplane_ocp_nad %} +{% for lb_range in lb_range_network.tools[tool].ipv6_ranges %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network.network_v6 | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} + base_iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% elif network.network_name == "ctlplane" %} + iface: {{ ns.interfaces[network.network_name] }} +{% elif ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} +{% endif %} +{% if network.tools.multus is defined %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "bridge", + "isDefaultGateway": true, + "isGateway": true, + "forceAddress": false, + "ipMasq": true, + "hairpinMode": true, +{% if network.network_name == "octavia" %} + "bridge": "octbr", +{% elif network.network_name == "ctlplane" %} + "bridge": "ospbr", +{% else %} + "bridge": "{{ network.network_name }}", +{% endif %} + "ipam": { + "type": "whereabouts", +{% if network.network_name == "octavia" and network.tools.multus.ipv6_routes | default([]) | length > 0 %} + "routes": [ +{% for route in network.tools.multus.ipv6_routes %} + { + "dst": "{{ route.destination }}", + "gw": "{{ route.gateway }}" + }{% if not loop.last %},{% endif %} +{% endfor %} + ], +{% endif %} +{% set range_network = network if network.network_name != "ctlplane" else cifmw_networking_env_definition.networks.ctlplane_ocp_nad %} + "range": "{{ range_network.network_v6 }}", + "range_start": "{{ range_network.tools.multus.ipv6_ranges.0.start }}", + "range_end": "{{ range_network.tools.multus.ipv6_ranges.0.end }}", + "gateway": "{{ range_network.network_v6 |ansible.utils.nthhost(1) }}" + } + } +{% endif %} +{% endif %} +{% endfor %} + + dns-resolver: + config: + server: + - "{{ cifmw_networking_env_definition.networks.ctlplane.gw_v6 }}" + search: [] + options: + - key: server + values: + - {{ cifmw_networking_env_definition.networks.ctlplane.gw_v6 }} +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + + routes: + config: [] + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'].network_v6 | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'].network_v6 | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: local-storage From 5215b4f90745af7e739034d099c91a2fbed3098f Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 10 Sep 2025 14:56:11 +0530 Subject: [PATCH 327/480] Set fail-fast to true in upstream zuul project fail-fast allows us to fail the entire pipeline the moment any job from the pipeline fails. Currently, we have to wait for all jobs to return their status for the pipeline to get complete. It wastes compute resoures as well as time. --- ci/templates/projects.yaml | 1 + zuul.d/projects.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/ci/templates/projects.yaml b/ci/templates/projects.yaml index 857c9e85a7..dd0d7fd622 100644 --- a/ci/templates/projects.yaml +++ b/ci/templates/projects.yaml @@ -9,6 +9,7 @@ - podified-multinode-edpm-ci-framework-pipeline - data-plane-adoption-ci-framework-pipeline github-check: + fail-fast: true jobs: - noop - cifmw-pod-ansible-test diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index e97f560f1e..36a6512e9d 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -1,5 +1,6 @@ - project: github-check: + fail-fast: true jobs: - noop - cifmw-pod-ansible-test From eddd0d59988db76bb0f604e3e93392a0d3d8310c Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 10 Sep 2025 12:38:13 +0530 Subject: [PATCH 328/480] Include read_global_vars.yml in pre-run: zuul.d/pods.yaml The reason to make the change one file per commit is our flakey jobs. It is pain to get all jobs pass at once --- zuul.d/pods.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/zuul.d/pods.yaml b/zuul.d/pods.yaml index c65aa6de7a..8955abeb0e 100644 --- a/zuul.d/pods.yaml +++ b/zuul.d/pods.yaml @@ -9,6 +9,8 @@ Run lightweight jobs in pods required-projects: - openstack-k8s-operators/ci-framework + pre-run: + - ci/playbooks/read_global_vars.yml run: ci/playbooks/pod-jobs.yml - job: @@ -62,6 +64,7 @@ parent: build-push-container-base nodeset: centos-stream-9 pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/molecule-prepare.yml - ci/playbooks/dump_zuul_data.yml run: ci/playbooks/build_push_container_runner.yml From acae411996e935667fc7abf0e14ee37857060405 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 10 Sep 2025 13:01:46 +0530 Subject: [PATCH 329/480] Include read_global_vars.yml in pre-run: zuul.d/adoption.yaml The reason to make the change one file per commit is our flakey jobs. It is pain to get all jobs pass at once --- zuul.d/adoption.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index cdce1c15bf..f879f0ea52 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -11,6 +11,7 @@ roles: - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/multinode-customizations.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml @@ -208,6 +209,7 @@ roles: &multinode-roles - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: &multinode-prerun + - ci/playbooks/read_global_vars.yml - ci/playbooks/multinode-customizations.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml From ff3ad9464cb2bc418c7d4bdea6728a3351febf55 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 10 Sep 2025 13:06:33 +0530 Subject: [PATCH 330/480] Include read_global_vars.yml in pre-run: zuul.d/edpm_build_images.yaml & edpm_build_images_content_provider.yaml The reason to make the change one file per commit is our flaky jobs. It is pain to get all jobs pass at once. --- zuul.d/edpm_build_images.yaml | 1 + zuul.d/edpm_build_images_content_provider.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/zuul.d/edpm_build_images.yaml b/zuul.d/edpm_build_images.yaml index e790dad22e..71caac4311 100644 --- a/zuul.d/edpm_build_images.yaml +++ b/zuul.d/edpm_build_images.yaml @@ -8,6 +8,7 @@ required-projects: - github.com/openstack-k8s-operators/edpm-image-builder pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/molecule-prepare.yml run: - ci/playbooks/dump_zuul_data.yml diff --git a/zuul.d/edpm_build_images_content_provider.yaml b/zuul.d/edpm_build_images_content_provider.yaml index da36923a60..9c28efd63f 100644 --- a/zuul.d/edpm_build_images_content_provider.yaml +++ b/zuul.d/edpm_build_images_content_provider.yaml @@ -9,6 +9,7 @@ - github.com/openstack-k8s-operators/edpm-image-builder - github.com/openstack-k8s-operators/ci-framework pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/content_provider/pre.yml run: - ci/playbooks/e2e-prepare.yml From 1f06dd5fcf57496abec43535a41409d565ac5c97 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 10 Sep 2025 12:35:53 +0530 Subject: [PATCH 331/480] Include read_global_vars.yml in pre-run: zuul.d/molecule-base.yaml The reason to make the change one file per commit is our flakey jobs. It is pain to get all jobs pass at once --- zuul.d/molecule-base.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zuul.d/molecule-base.yaml b/zuul.d/molecule-base.yaml index 01c41e6aa2..58a620efee 100644 --- a/zuul.d/molecule-base.yaml +++ b/zuul.d/molecule-base.yaml @@ -7,6 +7,7 @@ provides: - cifmw-molecule pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/dump_zuul_data.yml - ci/playbooks/molecule-prepare.yml run: ci/playbooks/molecule-test.yml @@ -25,6 +26,7 @@ provides: - cifmw-molecule pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/dump_zuul_data.yml - ci/playbooks/molecule-prepare.yml run: ci/playbooks/molecule-test.yml From 5c1a18382aa7d627b55dc2bf6589240279d352c0 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Wed, 10 Sep 2025 16:12:16 -0400 Subject: [PATCH 332/480] kustomize_deploy: Add retry logic for transient auth failures Add configurable retry logic to kubernetes.core.k8s_info tasks that lack resilience against transient OpenShift API authentication failures. When OpenShift is under load, API authentication can temporarily fail with HTTP 401 errors, causing the kustomize_deploy role to abort entire deployments. This change adds retry logic to the 3 vulnerable tasks: - Wait until OpenStack operators are deployed and ready (new install paradigm) - Wait until OpenStack operators are deployed and ready (old install paradigm) - Check if the OpenStack initialization CRD exists Changes: - Add cifmw_kustomize_deploy_k8s_retries (default: 5) and cifmw_kustomize_deploy_k8s_delay (default: 30s) configuration - Apply consistent retry pattern using retries/delay/until logic - Aligns with existing retry patterns used by other tasks in same file This prevents costly deployment restarts when experiencing temporary OpenShift API authentication issues. Jira: https://issues.redhat.com/browse/OSPRH-19853 Co-Authored-By: Claude Signed-off-by: John Fulton --- roles/kustomize_deploy/defaults/main.yml | 6 +++++- roles/kustomize_deploy/tasks/install_operators.yml | 11 +++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/roles/kustomize_deploy/defaults/main.yml b/roles/kustomize_deploy/defaults/main.yml index eefd888a7f..8b42ff26d0 100644 --- a/roles/kustomize_deploy/defaults/main.yml +++ b/roles/kustomize_deploy/defaults/main.yml @@ -219,7 +219,11 @@ cifmw_kustomize_deploy_dp_dest_file: >- ] | path_join }} -# timeouts +# timeouts and retry configuration cifmw_kustomize_deploy_delay: 10 cifmw_kustomize_deploy_retries_install_plan: 60 + +# Default retry settings for k8s_info operations to handle transient auth failures +cifmw_kustomize_deploy_k8s_retries: 5 +cifmw_kustomize_deploy_k8s_delay: 30 diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index b2545d23df..0d7da877ed 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -395,7 +395,10 @@ context: "{{ cifmw_openshift_context | default(omit) }}" kind: CustomResourceDefinition name: openstacks.operator.openstack.org + retries: "{{ cifmw_kustomize_deploy_k8s_retries }}" + delay: "{{ cifmw_kustomize_deploy_k8s_delay }}" register: _cifmw_kustomize_deploy_olm_osp_operator_openstack_crd_out + until: _cifmw_kustomize_deploy_olm_osp_operator_openstack_crd_out is success - name: Handle OpenStack initialization, if necessary when: (_cifmw_kustomize_deploy_olm_osp_operator_openstack_crd_out.resources | length) > 0 @@ -447,6 +450,10 @@ cifmw_kustomize_deploy_check_mode | default(false, true) }} + retries: "{{ cifmw_kustomize_deploy_k8s_retries }}" + delay: "{{ cifmw_kustomize_deploy_k8s_delay }}" + register: _openstack_operators_ready + until: _openstack_operators_ready is success - name: Wait until OpenStack operators are deployed and ready (old install paradigm) when: @@ -468,6 +475,10 @@ cifmw_kustomize_deploy_check_mode | default(false, true) }} + retries: "{{ cifmw_kustomize_deploy_k8s_retries }}" + delay: "{{ cifmw_kustomize_deploy_k8s_delay }}" + register: _openstack_operators_old_ready + until: _openstack_operators_old_ready is success with_items: - openstack.org/operator-name # The RabbitMQ operator does not share our openstack.org/operator-name label From 9f13957074fedb6a3c1129cb8cfe54c217beacc0 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Thu, 11 Sep 2025 17:23:24 +0530 Subject: [PATCH 333/480] Revert "Set fail-fast to true in upstream zuul project" This reverts commit 5215b4f90745af7e739034d099c91a2fbed3098f. --- ci/templates/projects.yaml | 1 - zuul.d/projects.yaml | 1 - 2 files changed, 2 deletions(-) diff --git a/ci/templates/projects.yaml b/ci/templates/projects.yaml index dd0d7fd622..857c9e85a7 100644 --- a/ci/templates/projects.yaml +++ b/ci/templates/projects.yaml @@ -9,7 +9,6 @@ - podified-multinode-edpm-ci-framework-pipeline - data-plane-adoption-ci-framework-pipeline github-check: - fail-fast: true jobs: - noop - cifmw-pod-ansible-test diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 36a6512e9d..e97f560f1e 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -1,6 +1,5 @@ - project: github-check: - fail-fast: true jobs: - noop - cifmw-pod-ansible-test From b2d24f2cda0aedd6b4354b3b3637906791f3c612 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 28 Aug 2025 21:24:29 +0200 Subject: [PATCH 334/480] Add cifmw_helpers role The new role would provide tasks that would be helpful to drop nested Ansible execution in the project. Signed-off-by: Daniel Pawlik --- playbooks/cifmw_collection_zuul_executor.yml | 16 +++ roles/cifmw_helpers/README.md | 133 ++++++++++++++++++ roles/cifmw_helpers/defaults/main.yml | 4 + roles/cifmw_helpers/tasks/set_dir_facts.yml | 8 ++ .../tasks/symlink_cifmw_collection.yml | 29 ++++ roles/cifmw_helpers/tasks/var_dir.yml | 29 ++++ roles/cifmw_helpers/tasks/var_file.yml | 20 +++ zuul.d/molecule.yaml | 9 ++ zuul.d/projects.yaml | 1 + 9 files changed, 249 insertions(+) create mode 100644 playbooks/cifmw_collection_zuul_executor.yml create mode 100644 roles/cifmw_helpers/README.md create mode 100644 roles/cifmw_helpers/defaults/main.yml create mode 100644 roles/cifmw_helpers/tasks/set_dir_facts.yml create mode 100644 roles/cifmw_helpers/tasks/symlink_cifmw_collection.yml create mode 100644 roles/cifmw_helpers/tasks/var_dir.yml create mode 100644 roles/cifmw_helpers/tasks/var_file.yml diff --git a/playbooks/cifmw_collection_zuul_executor.yml b/playbooks/cifmw_collection_zuul_executor.yml new file mode 100644 index 0000000000..95c4e995bb --- /dev/null +++ b/playbooks/cifmw_collection_zuul_executor.yml @@ -0,0 +1,16 @@ +--- +# NOTE: This is a required workaround, that would help us to drop +# nested Ansible execution. The Zuul executor does not have the +# cimfw collection installed, so on directly calling the roles, that +# are using cifmw.general.ci_script or cifmw.general.discover_latest_image +# or other, it will fail with an error: +# couldn't resolve module/action +# This playbook would make a symlink to .ansible collection dir, so in the +# next playbook execution, module should be available. +- name: Make symlink of cifmw general collection to Zuul Ansible workdir + hosts: localhost + tasks: + - name: Make a symlink to local .ansible collection dir + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: symlink_cifmw_collection.yml diff --git a/roles/cifmw_helpers/README.md b/roles/cifmw_helpers/README.md new file mode 100644 index 0000000000..3bac8935d2 --- /dev/null +++ b/roles/cifmw_helpers/README.md @@ -0,0 +1,133 @@ +# cifmw_helpers + +That role was created to replace nested Ansible (Ansible that execute +ansible or ansible-playbook binary using command/shell module) execution in +this project. + +## Helper for Zuul executor cifmw general collection + +The Zuul executor does not have `ci-framework` collection installed. +It means, that when we want to drop nested Ansible execution, it would raise +an errors (example): + + ERROR! couldn't resolve module/action 'cifmw.general.discover_latest_image' + +To avoid such error, we will be using basic Ansible behaviour which is create +a symbolic link to our modules to Ansible workspace before edited playbook is +executed. + +Example, how to apply the workaround in Zuul CI job definition. + +Before applying fix: + +```yaml +# .zuul.yml + +- job: + name: cifmw-adoption-base + (...) + roles: + - zuul: github.com/openstack-k8s-operators/ci-framework + pre-run: + - ci/playbooks/multinode-customizations.yml + - ci/playbooks/e2e-prepare.yml + - ci/playbooks/dump_zuul_data.yml + post-run: + - ci/playbooks/e2e-collect-logs.yml + - ci/playbooks/collect-logs.yml + - ci/playbooks/multinode-autohold.yml + (...) +``` + +After: + +```yaml +- job: + name: cifmw-adoption-base + (...) + roles: + - zuul: github.com/openstack-k8s-operators/ci-framework + pre-run: + - playbooks/cifmw_collection_zuul_executor.yml # here we added our play + - ci/playbooks/multinode-customizations.yml + - ci/playbooks/e2e-prepare.yml + - ci/playbooks/dump_zuul_data.yml + post-run: + - ci/playbooks/e2e-collect-logs.yml + - ci/playbooks/collect-logs.yml + - ci/playbooks/multinode-autohold.yml + (...) +``` + +## Helper for calling nested Ansible + +In many places in the project, there is nested Ansible execution done. +It means, that the Ansible is running `ansible` or `ansible-playbook` +inside the `shell` or `command` module. Sometimes, nested Ansible execution +is done 5 times (Ansible calls Ansible calls Ansible etc.) +That is later difficult to debug. More, logs are not printed directly, but they +are going to special dir, where after job finish, we can read. That's not +what we should have in the CI or during local tests. + +### Example nested Ansible replacement + +Example code, with nested Ansible execution: + +```yaml +- name: Run log collection + ansible.builtin.command: + chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" + cmd: >- + ansible-playbook playbooks/99-logs.yml + -e @scenarios/centos-9/base.yml +``` + +Or another example, which does not execute `ansible-playbook`, but `ansible` +and directly call the role: + +```yaml +- name: Run run_logs tasks from cifmw_setup + ansible.builtin.command: > + ansible localhost + -m include_role + -a "name=cifmw_setup tasks_from=run_logs.yml" + -e "@scenarios/centos-9/base.yml" + args: + chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" +``` + +That code, can be replaced by: + +```yaml +- name: Read base centos-9 scenarios + vars: + provided_file: > + {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ + ci-framework/scenarios/centos-9/base.yml + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: var_file.yml + +- name: Run log collection + ansible.builtin.include_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs +``` + +Of course, before Zuul execute the playbook, it is mandatory to call `playbooks/cifmw_collection_zuul_executor.yml`. + +For setting all files in the directory as fact, use `var_dir.yml` tasks. +Example: + +```yaml +- name: Read all centos-9 scenarios dir files and set as fact + vars: + provided_dir: > + {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ + ci-framework/scenarios/centos-9/ + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: var_dir.yml +``` diff --git a/roles/cifmw_helpers/defaults/main.yml b/roles/cifmw_helpers/defaults/main.yml new file mode 100644 index 0000000000..cb7f32acbf --- /dev/null +++ b/roles/cifmw_helpers/defaults/main.yml @@ -0,0 +1,4 @@ +--- +cifmw_helpers_project_dir: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}" +cifmw_helpers_ansible_collection_dir: "{{ ansible_user_dir }}/.ansible/collections/ansible_collections" +cifmw_helpers_no_log: true diff --git a/roles/cifmw_helpers/tasks/set_dir_facts.yml b/roles/cifmw_helpers/tasks/set_dir_facts.yml new file mode 100644 index 0000000000..91f6b17bf1 --- /dev/null +++ b/roles/cifmw_helpers/tasks/set_dir_facts.yml @@ -0,0 +1,8 @@ +--- +- name: Set files as fact + when: "'content' in item" + ansible.builtin.set_fact: + "{{ item.key }}": "{{ item.value }}" + cacheable: true + loop: "{{ item['content'] | b64decode | from_yaml | dict2items }}" + no_log: "{{ cifmw_helpers_no_log }}" diff --git a/roles/cifmw_helpers/tasks/symlink_cifmw_collection.yml b/roles/cifmw_helpers/tasks/symlink_cifmw_collection.yml new file mode 100644 index 0000000000..df0cd10cf4 --- /dev/null +++ b/roles/cifmw_helpers/tasks/symlink_cifmw_collection.yml @@ -0,0 +1,29 @@ +--- +- name: Check if the ci-framework exists + ansible.builtin.stat: + path: "{{ cifmw_helpers_project_dir }}" + register: _cifmw_helpers_project_dir_stat + +- name: Make symlink to local Ansible collection dir + when: _cifmw_helpers_project_dir_stat.stat.exists + block: + - name: Check if cifmw general collection exists + ansible.builtin.stat: + path: "{{ cifmw_helpers_ansible_collection_dir }}/cifmw/general/plugins" + register: _cifmw_gen_collection + + - name: Check if cifmw general collection exists + when: not _cifmw_gen_collection.stat.exists + block: + - name: Workaround for earlier nested ansible execution + ansible.builtin.file: + path: "{{ cifmw_helpers_ansible_collection_dir }}/cifmw/general/" + state: directory + mode: "0755" + + - name: Create symlink to the local .ansible collection dir + ansible.builtin.file: + src: "{{ cifmw_helpers_project_dir }}/plugins" + dest: "{{ cifmw_helpers_ansible_collection_dir }}/cifmw/general/plugins" + state: link + force: true diff --git a/roles/cifmw_helpers/tasks/var_dir.yml b/roles/cifmw_helpers/tasks/var_dir.yml new file mode 100644 index 0000000000..c2ee42bd8a --- /dev/null +++ b/roles/cifmw_helpers/tasks/var_dir.yml @@ -0,0 +1,29 @@ +--- +# NOTE: include_vars only reads file where ansible-playbook was executed. +# In some plays, we are starting to drop nested ansible execution. +# In that case, include_vars would not work. +- name: Check directory is available + ansible.builtin.stat: + path: "{{ provided_dir }}" + register: param_dir + +- name: List files available in dir and parse + when: param_dir.stat.exists + block: + - name: List available files + ansible.builtin.command: | + ls {{ provided_dir }} + register: _param_dir + + - name: Read vars + ansible.builtin.slurp: + src: "{{ provided_dir }}/{{ item }}" + register: _parsed_vars + loop: "{{ _param_dir.stdout_lines }}" + no_log: "{{ cifmw_helpers_no_log }}" + + - name: Call task to parse all files as fact + ansible.builtin.include_tasks: + file: set_dir_facts.yml + loop: '{{ _parsed_vars["results"] }}' + no_log: "{{ cifmw_helpers_no_log }}" diff --git a/roles/cifmw_helpers/tasks/var_file.yml b/roles/cifmw_helpers/tasks/var_file.yml new file mode 100644 index 0000000000..a0b5513ca2 --- /dev/null +++ b/roles/cifmw_helpers/tasks/var_file.yml @@ -0,0 +1,20 @@ +--- +- name: Check if file is available + ansible.builtin.stat: + path: "{{ provided_file }}" + register: _param_file + +- name: Read vars + when: _param_file.stat.exists + ansible.builtin.slurp: + src: "{{ provided_file }}" + register: _parsed_vars + no_log: "{{ cifmw_helpers_no_log }}" + +- name: Set vars as fact + when: "'content' in _parsed_vars" + ansible.builtin.set_fact: + "{{ item.key }}": "{{ item.value }}" + cacheable: true + loop: "{{ _parsed_vars['content'] | b64decode | from_yaml | dict2items }}" + no_log: "{{ cifmw_helpers_no_log }}" diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 4c6b6f05a3..3856712de2 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -886,6 +886,15 @@ - ^.config/molecule/.* name: cifmw-molecule-cifmw_external_dns parent: cifmw-molecule-noop +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/cifmw_helpers/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-cifmw_helpers + parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index e97f560f1e..bbc342067a 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -33,6 +33,7 @@ - cifmw-molecule-cifmw_cephadm - cifmw-molecule-cifmw_create_admin - cifmw-molecule-cifmw_external_dns + - cifmw-molecule-cifmw_helpers - cifmw-molecule-cifmw_nfs - cifmw-molecule-cifmw_ntp - cifmw-molecule-cifmw_setup From 00df4068ee3979bcd0b610de98d15fb5c2c5f1ba Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 12 Sep 2025 09:25:58 +0200 Subject: [PATCH 335/480] Replace read_global_vars role with cifmw_helpers var_file The files are similar and there is no sense to keep one role just to parse the file. Signed-off-by: Daniel Pawlik --- ci/playbooks/read_global_vars.yml | 9 +++++++-- roles/read_global_vars/tasks/main.yml | 12 ------------ zuul.d/molecule.yaml | 9 --------- zuul.d/projects.yaml | 1 - 4 files changed, 7 insertions(+), 24 deletions(-) delete mode 100644 roles/read_global_vars/tasks/main.yml diff --git a/ci/playbooks/read_global_vars.yml b/ci/playbooks/read_global_vars.yml index 0439a2b3fc..c4dc24b7a9 100644 --- a/ci/playbooks/read_global_vars.yml +++ b/ci/playbooks/read_global_vars.yml @@ -1,5 +1,10 @@ --- - name: Load global variables hosts: all - roles: - - read_global_vars + tasks: + - name: Read group_vars all file + vars: + provided_file: "{{ playbook_dir }}/group_vars/all.yml" + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: var_file.yml diff --git a/roles/read_global_vars/tasks/main.yml b/roles/read_global_vars/tasks/main.yml deleted file mode 100644 index d4d629ecf7..0000000000 --- a/roles/read_global_vars/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Include group vars - ansible.builtin.include_vars: - file: "{{ playbook_dir }}/group_vars/all.yml" - name: group_vars_global_vars - -- name: Set global variables as cachable fact - when: group_vars_global_vars | default(false) - ansible.builtin.set_fact: - "{{ item.key }}": "{{ item.value }}" - cacheable: true - loop: "{{ group_vars_global_vars | dict2items }}" diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 3856712de2..2ef8d22291 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -985,15 +985,6 @@ - ^.config/molecule/.* name: cifmw-molecule-polarion parent: cifmw-molecule-noop -- job: - files: - - ^common-requirements.txt - - ^test-requirements.txt - - ^roles/read_global_vars/.* - - ^ci/playbooks/molecule.* - - ^.config/molecule/.* - name: cifmw-molecule-read_global_vars - parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index bbc342067a..7e94482b1b 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -84,7 +84,6 @@ - cifmw-molecule-pkg_build - cifmw-molecule-podman - cifmw-molecule-polarion - - cifmw-molecule-read_global_vars - cifmw-molecule-recognize_ssh_keypair - cifmw-molecule-registry_deploy - cifmw-molecule-repo_setup From 0efd6d4362dddb30e09d05610b0dc7d1bb7c797a Mon Sep 17 00:00:00 2001 From: jgilaber Date: Mon, 8 Sep 2025 10:53:38 +0200 Subject: [PATCH 336/480] Run nova-manage discover_hosts in ci_script Run the discover_hosts hosts command in a ci_script instead of a plain ansible command to enable debugging the output from the logs. --- roles/edpm_deploy/tasks/main.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/roles/edpm_deploy/tasks/main.yml b/roles/edpm_deploy/tasks/main.yml index 61f164126a..04ca19c73e 100644 --- a/roles/edpm_deploy/tasks/main.yml +++ b/roles/edpm_deploy/tasks/main.yml @@ -173,11 +173,12 @@ environment: PATH: "{{ cifmw_path }}" KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - ansible.builtin.command: - cmd: >- - oc rsh - --namespace={{ cifmw_install_yamls_defaults['NAMESPACE'] }} - nova-cell0-conductor-0 nova-manage cell_v2 discover_hosts --verbose + cifmw.general.ci_script: + output_dir: "{{ cifmw_basedir }}/artifacts" + executable: "/bin/bash" + script: | + set -xe + oc rsh --namespace={{ cifmw_install_yamls_defaults['NAMESPACE'] }} nova-cell0-conductor-0 nova-manage cell_v2 discover_hosts --verbose - name: Validate EDPM when: cifmw_edpm_deploy_run_validation | bool From 1e3d6491cf0abc52820ec8ad554a2e501801d522 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Sat, 6 Sep 2025 15:43:11 +0200 Subject: [PATCH 337/480] update_containers: allow to set full URIs for volume containers Sometimes the container used for cinder-volume needs to be fetched from a 3rd-party container registry. Thanks to cifmw_update_containers_cindervolumes_extra it is now possible to configure such containers. --- roles/update_containers/README.md | 1 + roles/update_containers/defaults/main.yml | 1 + .../update_containers/templates/update_containers.j2 | 11 ++++++++++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/roles/update_containers/README.md b/roles/update_containers/README.md index 99634e8124..20d175e9da 100644 --- a/roles/update_containers/README.md +++ b/roles/update_containers/README.md @@ -17,6 +17,7 @@ If apply, please explain the privilege escalation done in this role. * `cifmw_update_containers_org`: The container registry namespace to pull container from. Default to `podified-antelope-centos9` * `cifmw_update_containers_tag`: The container tag. Default to "current-podified". * `cifmw_update_containers_cindervolumes`: The names of the cinder volumes prefix. Default to `[]`. +* `cifmw_update_containers_cindervolumes_extra`: Additional cinder volumes containers, meaning names and container URIs. Default to `{}`. * `cifmw_update_containers_manilashares`: The names of the manila shares prefix. Default to `[]`. * `cifmw_update_containers_agentimage`: Full Agent Image url for updating Agent Image. * `cifmw_update_containers_ceilometersgcoreImage`: Full Ceilometersgcore Image url for updating Ceilometersgcore Image. diff --git a/roles/update_containers/defaults/main.yml b/roles/update_containers/defaults/main.yml index 90aa83d19b..da7c0e65b6 100644 --- a/roles/update_containers/defaults/main.yml +++ b/roles/update_containers/defaults/main.yml @@ -42,6 +42,7 @@ cifmw_update_containers_openstack: false cifmw_update_containers_rollback: false cifmw_update_containers_cindervolumes: - default +cifmw_update_containers_cindervolumes_extra: {} cifmw_update_containers_manilashares: - default cifmw_update_containers_watcher: false diff --git a/roles/update_containers/templates/update_containers.j2 b/roles/update_containers/templates/update_containers.j2 index 6d0fb6a099..8ca97c31fb 100644 --- a/roles/update_containers/templates/update_containers.j2 +++ b/roles/update_containers/templates/update_containers.j2 @@ -77,12 +77,21 @@ spec: swiftObjectImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-swift-object:{{ cifmw_update_containers_tag }} swiftProxyImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-swift-proxy-server:{{ cifmw_update_containers_tag }} testTempestImage: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-tempest-all:{{ cifmw_update_containers_tag }} -{% if cifmw_update_containers_cindervolumes | length > 0 %} +{% if (cifmw_update_containers_cindervolumes | length > 0 or + (cifmw_update_containers_cindervolumes_extra is defined and cifmw_update_containers_cindervolumes_extra is mapping)) %} cinderVolumeImages: +{% endif %} +{% if cifmw_update_containers_cindervolumes | length > 0 %} {% for vol in cifmw_update_containers_cindervolumes %} {{ vol }}: {{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/{{ cifmw_update_containers_name_prefix }}-cinder-volume:{{ cifmw_update_containers_tag }} {% endfor %} {% endif %} +{% if (cifmw_update_containers_cindervolumes_extra is defined and + cifmw_update_containers_cindervolumes_extra is mapping) %} +{% for container_name, container_uri in cifmw_update_containers_cindervolumes_extra.items() %} + {{ container_name }}: {{ container_uri }} +{% endfor %} +{% endif %} {% if cifmw_update_containers_manilashares | length > 0 %} manilaShareImages: {% for shares in cifmw_update_containers_manilashares %} From 7bb835a9f79129c9fd820cab436e18b03d594eeb Mon Sep 17 00:00:00 2001 From: jgilaber Date: Mon, 8 Sep 2025 10:50:58 +0200 Subject: [PATCH 338/480] Add cifmw_hci_prepare_extra_services parameter Add cifmw_hci_prepare_extra_services parameter to hci_prepare role. This parameter allows the user to specify extra services in the dataplane when deploying using hci, for example, adding the telemetry service. By default, the parameter is empty and will do nothing. --- roles/hci_prepare/README.md | 1 + roles/hci_prepare/defaults/main.yml | 1 + roles/hci_prepare/tasks/phase2.yml | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/roles/hci_prepare/README.md b/roles/hci_prepare/README.md index 6d0a56a523..6489272d6d 100644 --- a/roles/hci_prepare/README.md +++ b/roles/hci_prepare/README.md @@ -15,6 +15,7 @@ None. * `cifmw_hci_prepare_storage_mgmt_mtu`: (Int) Storage-Management network MTU. Defaults to `1500`. * `cifmw_hci_prepare_storage_mgmt_vlan`: (Int) Storage-Management network VLAn. Defaults to `23`. * `cifmw_hci_prepare_namespace`: (String) Namespace to use to apply resources if install-yamls is not used. Defaults to `openstack`. +* `cifmw_hci_prepare_extra_services`: (List) List of additional services to add to the OpenStackDataPlaneNodeSet `services` list during HCI deployment. This allows you to customize which extra services are enabled on the EDPM nodes beyond the default set. Defaults to an empty list. ## Examples ### 1 - How to deploy HCI using hci_prepare and edpm_deploy diff --git a/roles/hci_prepare/defaults/main.yml b/roles/hci_prepare/defaults/main.yml index c2d9451637..7549fb849e 100644 --- a/roles/hci_prepare/defaults/main.yml +++ b/roles/hci_prepare/defaults/main.yml @@ -24,3 +24,4 @@ cifmw_hci_prepare_enable_repo_setup_service: true cifmw_hci_prepare_storage_mgmt_mtu: 1500 cifmw_hci_prepare_storage_mgmt_vlan: 23 cifmw_hci_prepare_namespace: openstack +cifmw_hci_prepare_extra_services: [] diff --git a/roles/hci_prepare/tasks/phase2.yml b/roles/hci_prepare/tasks/phase2.yml index 94c2fac861..6b5d0a792c 100644 --- a/roles/hci_prepare/tasks/phase2.yml +++ b/roles/hci_prepare/tasks/phase2.yml @@ -116,6 +116,12 @@ - neutron-metadata - libvirt - nova-custom-ceph + {% if cifmw_hci_prepare_extra_services | length > 0 %} + {% for svc in cifmw_hci_prepare_extra_services %} + - {{ svc }} + {% endfor %} + {% endif %} + - name: Enabled nova discover_hosts after deployment ansible.builtin.set_fact: From 8989b3a3637dbbc5b1cb037f44cb01c54a5d0a1f Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Wed, 3 Sep 2025 10:45:06 +0200 Subject: [PATCH 339/480] Dump openstack databases when must-gather is collected When we collect the must-gather info in CI, it would be useful to dump the openstack databases for troubleshooting purposes. In addition, this makes sure we properly test the "gather_db" code from must-gather. Jira: https://issues.redhat.com/browse/OSPRH-19553 Signed-off-by: Francesco Pantano --- roles/os_must_gather/defaults/main.yml | 1 + roles/os_must_gather/tasks/main.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/roles/os_must_gather/defaults/main.yml b/roles/os_must_gather/defaults/main.yml index c0eac6da65..4005722552 100644 --- a/roles/os_must_gather/defaults/main.yml +++ b/roles/os_must_gather/defaults/main.yml @@ -35,3 +35,4 @@ cifmw_os_must_gather_namespaces: - metallb-system - crc-storage cifmw_os_must_gather_host_network: false +cifmw_os_must_gather_dump_db: "ALL" diff --git a/roles/os_must_gather/tasks/main.yml b/roles/os_must_gather/tasks/main.yml index a7157a2311..bb899ccedd 100644 --- a/roles/os_must_gather/tasks/main.yml +++ b/roles/os_must_gather/tasks/main.yml @@ -58,6 +58,7 @@ PATH: "{{ cifmw_path }}" SOS_EDPM: "all" SOS_DECOMPRESS: "0" + OPENSTACK_DATABASES: "{{ cifmw_os_must_gather_dump_db }}" cifmw.general.ci_script: output_dir: "{{ cifmw_os_must_gather_output_dir }}/artifacts" script: >- @@ -66,6 +67,7 @@ --host-network={{ cifmw_os_must_gather_host_network }} --dest-dir {{ cifmw_os_must_gather_output_dir }}/logs -- ADDITIONAL_NAMESPACES={{ cifmw_os_must_gather_additional_namespaces }} + OPENSTACK_DATABASES=$OPENSTACK_DATABASES SOS_EDPM=$SOS_EDPM SOS_DECOMPRESS=$SOS_DECOMPRESS gather &> {{ cifmw_os_must_gather_output_dir }}/logs/os_must_gather.log From 67824926ab0ebb66b0967be7032be69a2256a0be Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Tue, 2 Sep 2025 04:20:31 -0400 Subject: [PATCH 340/480] Expose new parameter TimingDataUrl This change is needed for the introduction of a new parameter timingDataUrl to the test-operator. This parameter will enable specifying the URL for saved stestr timing data, which will be used to optimize the test order and reduce Tempest execution time. --- roles/test_operator/README.md | 1 + roles/test_operator/defaults/main.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index 1bf111c750..fda3bb6138 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -90,6 +90,7 @@ cifmw_test_operator_stages: * `cifmw_test_operator_tempest_debug`: (Bool) Run Tempest in debug mode, it keeps the operator pod sleeping infinity (it must only set to `true`only for debugging purposes). Default value: `false` * `cifmw_test_operator_tempest_rerun_failed_tests`: (Bool) Activate tempest re-run feature. When activated, tempest will perform another run of the tests that failed during the first execution. Default value: `false` * `cifmw_test_operator_tempest_rerun_override_status`: (Bool) Allow override of exit status with the tempest re-run feature. When activated, the original return value of the tempest run will be overridden with a result of the tempest run on the set of failed tests. Default value: `false` +* `cifmw_test_operator_tempest_timing_data_url`: (String) An URL pointing to an archive that contains the saved timing data. This data is used to optimize the test order and reduce Tempest execution time. Default value: `''` * `cifmw_test_operator_tempest_resources`: (Dict) A dictionary that specifies resources (cpu, memory) for the test pods. When untouched it clears the default values set on the test-operator side. This means that the tempest test pods run with unspecified resource limits. Default value: `{requests: {}, limits: {}}` * `cifmw_tempest_tempestconf_config`: Deprecated, please use `cifmw_test_operator_tempest_tempestconf_config` instead * `cifmw_test_operator_tempest_tempestconf_config`: (Dict) This parameter can be used to customize the execution of the `discover-tempest-config` run. Please consult the test-operator documentation. For example, to pass a custom configuration for `tempest.conf`, use the `overrides` section: diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 51b74a88b2..ac626afbca 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -167,6 +167,7 @@ cifmw_test_operator_tempest_config: cleanup: "{{ stage_vars_dict.cifmw_test_operator_tempest_cleanup | bool }}" rerunFailedTests: "{{ stage_vars_dict.cifmw_test_operator_tempest_rerun_failed_tests | bool }}" rerunOverrideStatus: "{{ stage_vars_dict.cifmw_test_operator_tempest_rerun_override_status | bool }}" + timingDataUrl: "{{ stage_vars_dict.cifmw_test_operator_tempest_timing_data_url | default(omit) }}" workflow: "{{ stage_vars_dict.cifmw_test_operator_tempest_workflow }}" debug: "{{ stage_vars_dict.cifmw_test_operator_tempest_debug }}" From 492cd56292fce4222e6311be2b97794b9b601841 Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Mon, 8 Sep 2025 13:24:02 +0200 Subject: [PATCH 341/480] libvirt_manager: support SCSI alongside virtio Add optional SCSI bus support for the root and extra disks in libvirt manager role. For OCP VMs, scsi could provide better performance, especially in SNO layout. Signed-off-by: Bohdan Dobrelia --- roles/devscripts/README.md | 2 ++ roles/libvirt_manager/README.md | 2 ++ roles/libvirt_manager/tasks/create_vms.yml | 1 + roles/libvirt_manager/templates/attach-volume.xml.j2 | 7 ++++++- roles/libvirt_manager/templates/domain.xml.j2 | 8 ++++++++ 5 files changed, 19 insertions(+), 1 deletion(-) diff --git a/roles/devscripts/README.md b/roles/devscripts/README.md index 8f068e4eae..79eac7f133 100644 --- a/roles/devscripts/README.md +++ b/roles/devscripts/README.md @@ -183,10 +183,12 @@ Allowed values can be found [here](https://mirror.openshift.com/pub/openshift-v4 image_local_dir: "{{ cifmw_basedir }}/images/" disk_file_name: "ocp_master" disksize: "100" + disk_bus: virtio cpus: 16 memory: 32 extra_disks_num: 3 extra_disks_size: 50G + extra_disks_bus: scsi nets: - ocppr - ocpbm diff --git a/roles/libvirt_manager/README.md b/roles/libvirt_manager/README.md index daa0b2fc40..f9c79514a8 100644 --- a/roles/libvirt_manager/README.md +++ b/roles/libvirt_manager/README.md @@ -83,11 +83,13 @@ cifmw_libvirt_manager_configuration: image_local_dir: (string, image destination for download. Optional if disk_file_name is set to "blank") disk_file_name: (string, target image name. If set to "blank", will create a blank image) disksize: (integer, disk size for the VM type. Optional, defaults to 40G) + disk_bus: (string, optional. Bus type for / disk. It can be virtio or scsi. Defaults to `virtio`) memory: (integer, RAM amount in GB. Optional, defaults to 2) cpus: (integer, amount of CPU. Optional, defaults to 2) nets: (ordered list of networks to connect to) extra_disks_num: (integer, optional. Number of extra disks to be configured.) extra_disks_size: (string, optional. Storage capacity to be allocated. Example 1G, 512M) + extra_disks_bus: (string, optional. Bus type for extra disks. It can be virtio or scsi. Defaults to `virtio`) user: (string, optional. Username to create on the vm which can becomes root. Defaults to `zuul`) password: (string, optional, defaults to fooBar. Root password for console access) target: (Hypervisor hostname you want to deploy the family on. Optional) diff --git a/roles/libvirt_manager/tasks/create_vms.yml b/roles/libvirt_manager/tasks/create_vms.yml index 0e586abe8d..3705e08af1 100644 --- a/roles/libvirt_manager/tasks/create_vms.yml +++ b/roles/libvirt_manager/tasks/create_vms.yml @@ -105,6 +105,7 @@ vars: vol_num: "{{ vm_data.extra_disks_num }}" vol_size: "{{ vm_data.extra_disks_size }}" + vol_bus: "{{ vm_data.extra_disks_bus | default('virtio') }}" ansible.builtin.include_tasks: volumes.yml - name: "Find volume attachments for VM {{ vm }}" diff --git a/roles/libvirt_manager/templates/attach-volume.xml.j2 b/roles/libvirt_manager/templates/attach-volume.xml.j2 index 8ec73722fb..9da62ac176 100644 --- a/roles/libvirt_manager/templates/attach-volume.xml.j2 +++ b/roles/libvirt_manager/templates/attach-volume.xml.j2 @@ -1,5 +1,10 @@ - + {% if vol_bus == 'scsi' %} + +
+ {% else %} + + {% endif %} diff --git a/roles/libvirt_manager/templates/domain.xml.j2 b/roles/libvirt_manager/templates/domain.xml.j2 index 544276b1c4..c62a8b6dbc 100644 --- a/roles/libvirt_manager/templates/domain.xml.j2 +++ b/roles/libvirt_manager/templates/domain.xml.j2 @@ -30,12 +30,20 @@ + {% set disk_bus = vm_data.disk_bus | default('scsi') %} + {% if disk_bus == 'scsi' %} + {% else %} + + {% endif %} + {% set extra_disks_bus = vm_data.extra_disks_bus | default('virtio') %} + {% if disk_bus == 'scsi' or extra_disks_bus == 'scsi' %}
+ {% endif %}
From 866d65ebeb8585e8b74295290a31dc0d7f8f5035 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 12 Sep 2025 18:46:19 +0200 Subject: [PATCH 342/480] Improve getting CRC pods logs There is an issue, that the ci_script on executing command, that is copying files from CRC host to local dir it fails with error as a root user: debug1: No more authentication methods to try. root@api.crc.testing: Permission denied (publickey,gssapi-keyex,gssapi-with-mic). Connection closed Let's copy the files on the server to another place, set permissions to the user and then pull them to the local server. Signed-off-by: Daniel Pawlik --- roles/artifacts/tasks/crc.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/artifacts/tasks/crc.yml b/roles/artifacts/tasks/crc.yml index 4e30b9d9b7..2971eaec2d 100644 --- a/roles/artifacts/tasks/crc.yml +++ b/roles/artifacts/tasks/crc.yml @@ -12,6 +12,7 @@ ansible.builtin.shell: cmd: >- ssh-keyscan {{ cifmw_artifacts_crc_host }} >> ~/.ssh/known_hosts + - name: Get CRC things only if we know it when: - crc_host_key is defined @@ -40,12 +41,16 @@ sudo systemctl restart sshd; sudo cp -r .ssh /root/; sudo chown -R root: /root/.ssh; + mkdir -p /tmp/crc-logs-artifacts; + sudo cp -av /ostree/deploy/rhcos/var/log/pods /tmp/crc-logs-artifacts/; + sudo chown -R core:core /tmp/crc-logs-artifacts; EOF + - name: Copy logs from CRC VM ignore_errors: true # noqa: ignore-errors cifmw.general.ci_script: output_dir: "{{ cifmw_artifacts_basedir }}/artifacts" script: >- scp -v -r -i {{ new_keypair_path | default(cifmw_artifacts_crc_sshkey) }} - root@{{ cifmw_artifacts_crc_host }}:/ostree/deploy/rhcos/var/log/pods + core@{{ cifmw_artifacts_crc_host }}:/tmp/crc-logs-artifacts {{ cifmw_artifacts_basedir }}/logs/crc/ From 978e2546a7596d527ee88473f6e4a84aa1d73a89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 28 Aug 2025 12:27:21 +0200 Subject: [PATCH 343/480] Un-pin the nat64 CS 9-stream image Image was pinned for OSPCIX-1020, should be unpinned once CentOS 9-Stream images are good again. When the nat64 molecule job passes, this should be good to merge. --- roles/nat64_appliance/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/nat64_appliance/tasks/main.yml b/roles/nat64_appliance/tasks/main.yml index 71e151b291..2a9aa941ce 100644 --- a/roles/nat64_appliance/tasks/main.yml +++ b/roles/nat64_appliance/tasks/main.yml @@ -72,7 +72,6 @@ ELEMENTS_PATH: "{{ cifmw_nat64_appliance_workdir }}/elements:{{ cifmw_nat64_appliance_workdir }}/edpm-image-builder/dib/" DIB_IMAGE_CACHE: "{{ cifmw_nat64_appliance_workdir }}/cache" DIB_DEBUG_TRACE: '1' - BASE_IMAGE_FILE: CentOS-Stream-GenericCloud-x86_64-9-20250812.1.x86_64.qcow2 cifmw.general.ci_script: chdir: "{{ cifmw_nat64_appliance_workdir }}" output_dir: "{{ cifmw_nat64_appliance_basedir }}/artifacts" From da3f9638285d6d7928de9c5cf19de47723b522bf Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 16 Sep 2025 13:44:31 +0200 Subject: [PATCH 344/480] Add example for cifmw_helpers for symlink cifmw modules The example how to solve an error: ERROR! couldn't resolve module/action 'cifmw.general.' might be helpful for others. Signed-off-by: Daniel Pawlik --- roles/cifmw_helpers/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/roles/cifmw_helpers/README.md b/roles/cifmw_helpers/README.md index 3bac8935d2..ea42af052c 100644 --- a/roles/cifmw_helpers/README.md +++ b/roles/cifmw_helpers/README.md @@ -59,6 +59,22 @@ After: (...) ``` +The example playbook - `playbooks/cifmw_collection_zuul_executor.yml` can look like: + +```yaml +--- +- name: Make cifmw modules to be available + hosts: all + tasks: + - name: Make a symlink to local .ansible collection dir + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: symlink_cifmw_collection.yml +``` + +After doing a symbolic link of modules dir to Ansible working dir in `$HOME` dir, +we should not have `ERROR! couldn't resolve module/action` error anymore. + ## Helper for calling nested Ansible In many places in the project, there is nested Ansible execution done. From 00cc2824f012d24ac4741c18bf923523b6187be8 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 28 Aug 2025 08:40:02 +0200 Subject: [PATCH 345/480] Drop ceph playbook symlink After merging change [1], the places where Ceph playbook was used, have been moved to use hooks. Let's drop the symlink and verify in all used job, that the playbook path has been updated correctly. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/3154 Signed-off-by: Daniel Pawlik --- playbooks/ceph.yml | 1 - 1 file changed, 1 deletion(-) delete mode 120000 playbooks/ceph.yml diff --git a/playbooks/ceph.yml b/playbooks/ceph.yml deleted file mode 120000 index 13df2bff76..0000000000 --- a/playbooks/ceph.yml +++ /dev/null @@ -1 +0,0 @@ -../hooks/playbooks/ceph.yml \ No newline at end of file From 423b9b6c98abbe0761ae5c70aa9e18467f5850ea Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Fri, 5 Sep 2025 13:02:53 +0530 Subject: [PATCH 346/480] Add read_global_vars.yml in pre-run stage In order to ensure global vars are loaded correctly, this commit adds read_global_vars.yml playbook in pre-run stage of required jobs. It will ensure that future commits can replace common used vars without worrying about jobs. --- zuul.d/base.yaml | 4 ++++ zuul.d/end-to-end.yaml | 1 + zuul.d/kuttl.yaml | 1 + zuul.d/kuttl_multinode.yaml | 2 ++ zuul.d/tcib.yaml | 1 + zuul.d/test-job.yaml | 1 + 6 files changed, 10 insertions(+) diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index a41ad6c290..5832869ca8 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -13,6 +13,7 @@ and prepare the environment for running ci-framework playbooks. Once the job finishes, it will collect necessary logs. pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml post-run: @@ -139,6 +140,7 @@ roles: &multinode_edpm_roles - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: &multinode_edpm_pre_run + - ci/playbooks/read_global_vars.yml - ci/playbooks/multinode-customizations.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml @@ -281,6 +283,7 @@ roles: - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml post-run: @@ -305,6 +308,7 @@ CRC environment and before running ci-boostrap roles to configure networking between nodes. pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml - ci/playbooks/bootstrap-networking-mapper.yml diff --git a/zuul.d/end-to-end.yaml b/zuul.d/end-to-end.yaml index 5c0f1babdf..eb0ac9dd8a 100644 --- a/zuul.d/end-to-end.yaml +++ b/zuul.d/end-to-end.yaml @@ -9,6 +9,7 @@ zuul_log_collection: true registry_login_enabled: false pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml post-run: diff --git a/zuul.d/kuttl.yaml b/zuul.d/kuttl.yaml index 883e53cd3c..6ec48dd4ca 100644 --- a/zuul.d/kuttl.yaml +++ b/zuul.d/kuttl.yaml @@ -9,6 +9,7 @@ zuul_log_collection: true parent: base-simple-crc pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml run: - ci/playbooks/dump_zuul_data.yml diff --git a/zuul.d/kuttl_multinode.yaml b/zuul.d/kuttl_multinode.yaml index f0b38929b4..bd15970b38 100644 --- a/zuul.d/kuttl_multinode.yaml +++ b/zuul.d/kuttl_multinode.yaml @@ -41,6 +41,8 @@ ip: 172.18.0.5 tenant: ip: 172.19.0.5 + pre-run: + - ci/playbooks/read_global_vars.yml run: - ci/playbooks/dump_zuul_data.yml - ci/playbooks/kuttl/run.yml diff --git a/zuul.d/tcib.yaml b/zuul.d/tcib.yaml index f90fc3d9a3..642b8fd106 100644 --- a/zuul.d/tcib.yaml +++ b/zuul.d/tcib.yaml @@ -11,6 +11,7 @@ - github.com/openstack-k8s-operators/tcib - github.com/openstack-k8s-operators/install_yamls pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/content_provider/pre.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml diff --git a/zuul.d/test-job.yaml b/zuul.d/test-job.yaml index 9e7e1cd15c..30c0d35a20 100644 --- a/zuul.d/test-job.yaml +++ b/zuul.d/test-job.yaml @@ -5,6 +5,7 @@ nodeset: centos-stream-9 abstract: true pre-run: + - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml run: From 72d5b66610285777977fffffca958c7de0961b5a Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 11 Sep 2025 17:30:21 +0200 Subject: [PATCH 347/480] Add hook_retry parameter for run_hook role Sometimes, some hooks fails like on downloading tools. Let's add a new parameter: hook_retry that will make a retry on those hooks, where the parameter `hook_retry` is set to true. Signed-off-by: Daniel Pawlik --- roles/run_hook/README.md | 2 ++ roles/run_hook/molecule/default/converge.yml | 23 +++++++++++++++ roles/run_hook/molecule/default/molecule.yml | 6 ++++ roles/run_hook/molecule/default/prepare.yml | 11 ++++++++ .../default/templates/dummy-retry.yml.j2 | 25 +++++++++++++++++ roles/run_hook/tasks/playbook.yml | 28 ++++++++++++++++++- 6 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 roles/run_hook/molecule/default/templates/dummy-retry.yml.j2 diff --git a/roles/run_hook/README.md b/roles/run_hook/README.md index 5667daca65..8f78044a07 100644 --- a/roles/run_hook/README.md +++ b/roles/run_hook/README.md @@ -37,6 +37,7 @@ name: * `source`: (String) Source of the playbook. If it's a filename, the playbook is expected in `hooks/playbooks`. It can be an absolute path. * `type`: (String) Type of the hook. In this case, set it to `playbook`. * `extra_vars`: (Dict) Structure listing the extra variables you would like to pass down ([extra_vars explained](#extra_vars-explained)) +* `hook_retry` (Boolean) Set true, if the hook execution should be retried on failure ##### About OpenShift namespaces and install_yamls @@ -55,6 +56,7 @@ Since `install_yamls` might not be initialized, the `run_hook` is exposing two n * `source`: (String) Source of the playbook. If it's a filename, the playbook is expected in `hooks/playbooks`. It can be an absolute path. * `type`: (String) Type of the hook. In this case, set it to `playbook`. * `extra_vars`: (Dict) Structure listing the extra variables you would like to pass down ([extra_vars explained](#extra_vars-explained)) +* `hook_retry` (Boolean) Set true, if the hook execution should be retried on failure #### Hook callback diff --git a/roles/run_hook/molecule/default/converge.yml b/roles/run_hook/molecule/default/converge.yml index ba0d62910e..cf21d387cf 100644 --- a/roles/run_hook/molecule/default/converge.yml +++ b/roles/run_hook/molecule/default/converge.yml @@ -103,3 +103,26 @@ that: - test_list is defined - test_list | length == 2 + + - name: Hooks with retry + block: + - name: Run hook with retry + vars: + step: retry_hook + ansible.builtin.include_role: + name: run_hook + + - name: Check if fake file exists for retry playbook + ansible.builtin.stat: + path: /tmp/molecule-retry-fake-file + register: _molecule_fake_file + + - name: Ensure file exists and was created on retry + ansible.builtin.assert: + that: + - _molecule_fake_file.stat.exists + always: + - name: Remove generated file + ansible.builtin.file: + path: /tmp/molecule-retry-fake-file + state: absent diff --git a/roles/run_hook/molecule/default/molecule.yml b/roles/run_hook/molecule/default/molecule.yml index 4468af65e7..186751b6a4 100644 --- a/roles/run_hook/molecule/default/molecule.yml +++ b/roles/run_hook/molecule/default/molecule.yml @@ -50,3 +50,9 @@ provisioner: extra_vars: foo: bar file: "/tmp/dummy-env.yml" + + retry_hook: + - name: Run hook with retry + source: "/tmp/dummy-retry.yml" + type: playbook + retry_hook: true diff --git a/roles/run_hook/molecule/default/prepare.yml b/roles/run_hook/molecule/default/prepare.yml index 77ae5a826f..bad6d093d5 100644 --- a/roles/run_hook/molecule/default/prepare.yml +++ b/roles/run_hook/molecule/default/prepare.yml @@ -42,3 +42,14 @@ - dummy-4.yml - dummy-5.yml - dummy-6.yml + + - name: Remove dummy file for retry playbook test + ansible.builtin.file: + path: /tmp/molecule-retry-fake-file + state: absent + + - name: Create dummy retry playbook + ansible.builtin.template: + dest: "/tmp/dummy-retry.yml" + src: "dummy-retry.yml.j2" + mode: "0644" diff --git a/roles/run_hook/molecule/default/templates/dummy-retry.yml.j2 b/roles/run_hook/molecule/default/templates/dummy-retry.yml.j2 new file mode 100644 index 0000000000..0c4c6da3fc --- /dev/null +++ b/roles/run_hook/molecule/default/templates/dummy-retry.yml.j2 @@ -0,0 +1,25 @@ +--- +- hosts: localhost + gather_facts: true + tasks: +{% raw %} + - name: Check if fake file exists + ansible.builtin.stat: + path: /tmp/molecule-retry-fake-file + register: _molecule_fake_file + + - name: Create a file, if it does not exists + when: not _molecule_fake_file.stat.exists + ansible.builtin.file: + path: /tmp/molecule-retry-fake-file + state: touch + + - name: Finish if file does not exists + when: not _molecule_fake_file.stat.exists + ansible.builtin.meta: end_play + + - name: Print Hello world if file exists + when: _molecule_fake_file.stat.exists + ansible.builtin.debug: + msg: 'Hello retry world' +{% endraw %} diff --git a/roles/run_hook/tasks/playbook.yml b/roles/run_hook/tasks/playbook.yml index 3f3155460a..4cb6a1004c 100644 --- a/roles/run_hook/tasks/playbook.yml +++ b/roles/run_hook/tasks/playbook.yml @@ -89,7 +89,8 @@ # even less from a task. So the way to run a playbook from within a playbook # is to call a command. Though we may lose some of the data passed to the # "main" play. -- name: "Run {{ hook.name }}" +- name: "Run hook without retry - {{ hook.name }}" + when: not hook.hook_retry | default(false) no_log: "{{ cifmw_nolog | default(true) | bool }}" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir }}/artifacts" @@ -109,6 +110,31 @@ -e "playbook_dir={{ playbook_path | dirname }}" {{ playbook_path }} +- name: "Run hook with retry - {{ hook.name }}" + when: hook.hook_retry | default(false) + no_log: "{{ cifmw_nolog | default(true) | bool }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_basedir }}/artifacts" + extra_args: + ANSIBLE_CONFIG: "{{ hook.config_file | default(ansible_config_file) }}" + ANSIBLE_LOG_PATH: "{{ log_path }}" + creates: "{{ hook.creates | default(omit) }}" + script: >- + ansible-playbook -i {{ hook.inventory | default(inventory_file) }} + {% if hook.connection is defined -%} + -c {{ hook.connection }} + {% endif -%} + {{ extra_vars }} + -e "cifmw_basedir={{ cifmw_basedir }}" + -e "step={{ step }}" + -e "hook_name={{ hook_name }}" + -e "playbook_dir={{ playbook_path | dirname }}" + {{ playbook_path }} + register: hook_result + retries: 3 + delay: 10 + until: hook_result is not failed + - name: Load generated content if any block: - name: Check if we have a file From 1e0bd01fb82f5afff0999726dc7ef214f19f0f10 Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Tue, 16 Sep 2025 15:52:47 +0200 Subject: [PATCH 348/480] [BGP] Remove hooks duplicating nova_wait_for_compute_service.yml The nova_wait_for_compute_service.yml hook/playbook already covers what BGP jobs did with bgp-l3-computes-ready.yml and discover-hosts-loop.yaml OSPRH-19510 --- .github/CODEOWNERS | 1 - playbooks/bgp-l3-computes-ready.yml | 19 ------------------- playbooks/bgp/discover-hosts-loop.yaml | 24 ------------------------ 3 files changed, 44 deletions(-) delete mode 100644 playbooks/bgp-l3-computes-ready.yml delete mode 100644 playbooks/bgp/discover-hosts-loop.yaml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d74c2cd320..69528e8532 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,6 @@ roles/adoption_osp_deploy @openstack-k8s-operators/adoption-core-reviewers # BGP roles/ci_gen_kustomize_values/templates/bgp_dt01 @openstack-k8s-operators/bgp roles/ci_gen_kustomize_values/templates/bgp-l3-xl @openstack-k8s-operators/bgp -playbooks/bgp-l3-computes-ready.yml @openstack-k8s-operators/bgp playbooks/bgp @openstack-k8s-operators/bgp scenarios/reproducers/bgp-l3-xl.yml @openstack-k8s-operators/bgp diff --git a/playbooks/bgp-l3-computes-ready.yml b/playbooks/bgp-l3-computes-ready.yml deleted file mode 100644 index 7f9610e432..0000000000 --- a/playbooks/bgp-l3-computes-ready.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Wait until computes are ready - hosts: "{{ cifmw_target_host | default('localhost') }}" - tasks: - - name: Wait until number of computes is the expected one - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - ansible.builtin.command: >- - oc rsh - -n openstack - openstackclient - openstack compute service list -f value --service nova-compute - register: nova_compute_service_list - retries: 30 - delay: 4 - until: - - nova_compute_service_list.rc == 0 - - nova_compute_service_list.stdout | regex_findall('enabled up') | length == num_computes | int diff --git a/playbooks/bgp/discover-hosts-loop.yaml b/playbooks/bgp/discover-hosts-loop.yaml deleted file mode 100644 index 7069a398b1..0000000000 --- a/playbooks/bgp/discover-hosts-loop.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: BGP discover hosts - hosts: controller-0 - gather_facts: true - tasks: - - name: Wait for expected number of discovered hypervisor - vars: - num_computes: >- - {{ - groups['r0-computes'] | length + - groups['r1-computes'] | length + - groups['r2-computes'] | length - }} - ansible.builtin.shell: - cmd: > - oc rsh -n openstack nova-cell0-conductor-0 nova-manage cell_v2 discover_hosts > /dev/null && - oc -n openstack rsh openstackclient openstack hypervisor list -f value -c State - register: hv_result - changed_when: false - retries: 50 - delay: 2 - until: - - hv_result.rc == 0 - - hv_result.stdout_lines == ["up"] * (num_computes | int) From 98f252cd954555c13536a0617e4c7532ca5cb2a0 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Thu, 18 Sep 2025 15:25:30 +0530 Subject: [PATCH 349/480] Docs: Minor updates in multiple doc file. This commit updates the team and component that should be chosen while filling an issue in Jira. It also corrects a typo in another doc file. --- docs/dictionary/en-custom.txt | 1 + docs/source/index.rst | 2 +- docs/source/reproducers/03-zuul.md | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 1856049772..70e842a220 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -20,6 +20,7 @@ aqc args arx arxcruz +AssignedTeam auth authfile autohold diff --git a/docs/source/index.rst b/docs/source/index.rst index 5df69d35f1..19bf82e7f4 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -24,7 +24,7 @@ The project is under constant development, bugs happen. If you have such a bad encounter, please fill an `issue in Jira`_. -Chose **OSPRH** project, add **cifmw** label, and set the Workstream to **CI Framework** and the Team to **OSP CI Framework**. +Choose **OSPRH** project, add **cifmw** label, and set the Component to **ci-framework** and the AssignedTeam to **rhos-dfg-tooling**. Please provide the following information: diff --git a/docs/source/reproducers/03-zuul.md b/docs/source/reproducers/03-zuul.md index acda886c63..4701f52f19 100644 --- a/docs/source/reproducers/03-zuul.md +++ b/docs/source/reproducers/03-zuul.md @@ -33,7 +33,7 @@ will be accessible from the private network interface. [laptop]$ make setup_molecule ``` ### Create an inventory file in order to consume your hypervisor -You can create a file in `custom/inventor.yml` for instance (ensure you ignore +You can create a file in `custom/inventory.yml` for instance (ensure you ignore that path from git tree in order to NOT inject that custom inventory). The file should look like this: From 530ca37656ac992240733731e2429f72a60cec62 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Thu, 18 Sep 2025 16:26:17 +0530 Subject: [PATCH 350/480] .githooks/pre-push: Use /usr/bin/env sh instead of /usr/bin/sh While /usr/bin/sh runs fine on linux, it is better to use env to lookup sh from path, as macOS did not have sh in /usr/bin (It was in /bin). --- .githooks/pre-push | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.githooks/pre-push b/.githooks/pre-push index bed7770d0b..64f081c9e7 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -1,4 +1,4 @@ -#!/usr/bin/sh +#!/usr/bin/env sh set -e From 2437f9e5d3f4d46af4293b795cc013589cea2f63 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 18 Sep 2025 17:15:13 +0200 Subject: [PATCH 351/480] Add various_vars cifmw_helpers tasks In some places, we used to have variables like: cifmw_extras which can have files or dictionaries. Let's parse these variables and set as fact correctly. Signed-off-by: Daniel Pawlik --- roles/cifmw_helpers/README.md | 43 ++++++++++++++++++++++ roles/cifmw_helpers/tasks/various_vars.yml | 13 +++++++ 2 files changed, 56 insertions(+) create mode 100644 roles/cifmw_helpers/tasks/various_vars.yml diff --git a/roles/cifmw_helpers/README.md b/roles/cifmw_helpers/README.md index ea42af052c..0eef1cef7e 100644 --- a/roles/cifmw_helpers/README.md +++ b/roles/cifmw_helpers/README.md @@ -132,8 +132,25 @@ That code, can be replaced by: - logs ``` +#### Read var file and set as fact + +Example task execution: + +```yaml +- name: Read base centos-9 scenarios + vars: + provided_file: > + {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ + ci-framework/scenarios/centos-9/base.yml + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: var_file.yml +``` + Of course, before Zuul execute the playbook, it is mandatory to call `playbooks/cifmw_collection_zuul_executor.yml`. +#### Read directory and parse all files and then set as fact + For setting all files in the directory as fact, use `var_dir.yml` tasks. Example: @@ -147,3 +164,29 @@ Example: name: cifmw_helpers tasks_from: var_dir.yml ``` + +#### Set as fact various variables + +In some places in our workflow, we can have a list that contains +various variables like files: "@some_file.yml" or dictionaries like "some: var". +To parse them and set as a fact, use `various_vars.yml` task file. + +```yaml +- name: Example + hosts: localhost + tasks: + - name: Test various vars + vars: + various_vars: + - "@scenarios/centos-9/base.yml" + - test: ok + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: various_vars.yml + + - name: Print parsed variables + ansible.builtin.debug: + msg: | + "Value for file is: {{ cifmw_repo_setup_os_release }}" + "Value for dict is: {{ test }}" +``` diff --git a/roles/cifmw_helpers/tasks/various_vars.yml b/roles/cifmw_helpers/tasks/various_vars.yml new file mode 100644 index 0000000000..0bc17536fd --- /dev/null +++ b/roles/cifmw_helpers/tasks/various_vars.yml @@ -0,0 +1,13 @@ +--- +# various_vars +- name: Filter Ansible variable files and set as fact + vars: + provided_file: "{{ item | replace('@','') }}" + ansible.builtin.include_tasks: var_file.yml + loop: "{{ various_vars | select('match', '^@.*\\.(yml|yaml)$') | list }}" + +- name: Filter just dict and set as fact + ansible.builtin.set_fact: + "{{ item.key }}": "{{ item.value }}" + cacheable: true + loop: "{{ (various_vars | select('mapping') | list) | map('dict2items') | flatten }}" From 008f26f500f7c2e6a6142c7325ae0dc849d20795 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 16 Sep 2025 15:57:08 +0200 Subject: [PATCH 352/480] libvirt_manager: allow users to specify per-VM devices Add a new parameter 'devices' to the definition of virtual machine types in cifmw_libvirt_manager_configuration. The parameter is optional. If set, it contains a dictionary where the keys are the VMs of that type that needs devices to be attached, and the values are lists of strings, where each string must contain a valid libvirt XML element that will be passed to `virsh attach-device`. This is useful to expose for example passthrough devices to the virtual machines. --- roles/libvirt_manager/README.md | 8 +++++++ .../libvirt_manager/tasks/attach_devices.yml | 19 ++++++++++++++++ roles/libvirt_manager/tasks/create_vms.yml | 22 +++++++++++++++++++ 3 files changed, 49 insertions(+) create mode 100644 roles/libvirt_manager/tasks/attach_devices.yml diff --git a/roles/libvirt_manager/README.md b/roles/libvirt_manager/README.md index f9c79514a8..a8beb7c61f 100644 --- a/roles/libvirt_manager/README.md +++ b/roles/libvirt_manager/README.md @@ -96,6 +96,7 @@ cifmw_libvirt_manager_configuration: uefi: (boolean, toggle UEFI boot. Optional, defaults to false) bootmenu_enable: (string, toggle bootmenu. Optional, defaults to "no") networkconfig: (dict or list[dict], [network-config](https://cloudinit.readthedocs.io/en/latest/reference/network-config-format-v2.html#network-config-v2) v2 config, needed if a static ip address should be defined at boot time in absence of a dhcp server in special scenarios. Optional) + devices: (dict, optional, defaults to {}. The keys are the VMs of that type that needs devices to be attached, and the values are lists of strings, where each string must contain a valid libvirt XML element that will be passed to virsh attach-device) networks: net_name: ``` @@ -140,6 +141,13 @@ cifmw_libvirt_manager_configuration: - osp_trunk extra_disks_num: 5 extra_disks_size: '1G' + devices: + "0": >- + + +
+ + controller: image_url: "{{ cifmw_discovered_image_url }}" sha256_image_name: "{{ cifmw_discovered_hash }}" diff --git a/roles/libvirt_manager/tasks/attach_devices.yml b/roles/libvirt_manager/tasks/attach_devices.yml new file mode 100644 index 0000000000..40c424b8ac --- /dev/null +++ b/roles/libvirt_manager/tasks/attach_devices.yml @@ -0,0 +1,19 @@ +--- +- name: Create a temporary file to hold the device configuration + ansible.builtin.tempfile: + state: file + prefix: "{{ _vm_name }}_device_" + register: vm_devices_file + +- name: Copy the device configuration requested for the VM to a temporary file + ansible.builtin.copy: + content: "{{ _vm_device }}" + dest: "{{ vm_devices_file.path }}" + mode: '0660' + +- name: Attach the device configuration to the VM + ansible.builtin.shell: + cmd: >- + set -o pipefail; + virsh -c qemu:///system attach-device {{ _vm_name }} {{ vm_devices_file.path }} + --persistent; diff --git a/roles/libvirt_manager/tasks/create_vms.yml b/roles/libvirt_manager/tasks/create_vms.yml index 3705e08af1..a05520e6b3 100644 --- a/roles/libvirt_manager/tasks/create_vms.yml +++ b/roles/libvirt_manager/tasks/create_vms.yml @@ -180,3 +180,25 @@ --type cdrom --mode readonly --persistent + +- name: "Attach additional devices if specified" + when: + - vm_data.devices is defined + - vm_data.devices[_vm_specific_index] is defined + vars: + _vm_name: "cifmw-{{ vm }}" + _vm_all_devices: "{{ vm_data.devices[_vm_specific_index] }}" + # This is the index of the VM for its type. + # For example '1' if the VM is 'compute-1', or '2' if it is 'ocp-master-2' + _vm_specific_index: "{{ vm | regex_search('^.+-([0-9]+)','\\1') | first | default('0') | string }}" + # Make sure the value is always a list + _vm_devices_content: >- + {{ + _vm_all_devices + if (_vm_all_devices | type_debug == "list") + else [_vm_all_devices] + }} + ansible.builtin.include_tasks: attach_devices.yml + loop: "{{ _vm_devices_content }}" + loop_control: + loop_var: _vm_device From c799f413b31ab3477396b6fabe21ad0221f9b1ae Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 18 Sep 2025 22:37:22 +0200 Subject: [PATCH 353/480] Add parsing external inventory file and add host Sometimes, the VMs on which action would be done are not available when the main Ansible playbook is executed. In that case, to parse the new inventory file use `inventory_file.yml` task, then you would be able to use delegation to execute tasks on new host. Signed-off-by: Daniel Pawlik --- roles/cifmw_helpers/README.md | 19 +++++++++++++++++++ roles/cifmw_helpers/tasks/inventory_file.yml | 16 ++++++++++++++++ roles/cifmw_helpers/tasks/parse_inventory.yml | 14 ++++++++++++++ 3 files changed, 49 insertions(+) create mode 100644 roles/cifmw_helpers/tasks/inventory_file.yml create mode 100644 roles/cifmw_helpers/tasks/parse_inventory.yml diff --git a/roles/cifmw_helpers/README.md b/roles/cifmw_helpers/README.md index 0eef1cef7e..0b084df452 100644 --- a/roles/cifmw_helpers/README.md +++ b/roles/cifmw_helpers/README.md @@ -190,3 +190,22 @@ To parse them and set as a fact, use `various_vars.yml` task file. "Value for file is: {{ cifmw_repo_setup_os_release }}" "Value for dict is: {{ test }}" ``` + +#### Parse inventory file and add it to inventory + +Sometimes, the VMs on which action would be done are not available when the +main Ansible playbook is executed. In that case, to parse the new inventory file +use `inventory_file.yml` task, then you would be able to use delegation to +execute tasks on new host. + +```yaml +- name: Test parsing additional inventory file + hosts: localhost + tasks: + - name: Read inventory file and add it using add_host module + vars: + include_inventory_file: vms-inventory.yml + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: inventory_file.yml +``` diff --git a/roles/cifmw_helpers/tasks/inventory_file.yml b/roles/cifmw_helpers/tasks/inventory_file.yml new file mode 100644 index 0000000000..db8329a784 --- /dev/null +++ b/roles/cifmw_helpers/tasks/inventory_file.yml @@ -0,0 +1,16 @@ +--- +- name: Read inventory file + ansible.builtin.slurp: + src: "{{ include_inventory_file }}" + register: _inventory_file + +- name: Parse inventory file content + ansible.builtin.set_fact: + inventory_data: "{{ _inventory_file.content | b64decode | from_yaml }}" + +- name: Process each group with hosts + ansible.builtin.include_tasks: + file: parse_inventory.yml + loop: "{{ inventory_data | dict2items | selectattr('value.hosts', 'defined') | list }}" + loop_control: + loop_var: group_item diff --git a/roles/cifmw_helpers/tasks/parse_inventory.yml b/roles/cifmw_helpers/tasks/parse_inventory.yml new file mode 100644 index 0000000000..4f36be9933 --- /dev/null +++ b/roles/cifmw_helpers/tasks/parse_inventory.yml @@ -0,0 +1,14 @@ +--- +- name: "Add hosts for group {{ group_item.key }}" + ansible.builtin.add_host: + name: "{{ host_item.key }}" + groups: "{{ group_item.key }}" + ansible_host: "{{ host_item.value.ansible_host | default(omit) }}" + ansible_ssh_common_args: "{{ host_item.value.ansible_ssh_common_args | default(omit) }}" + ansible_ssh_private_key_file: "{{ host_item.value.ansible_ssh_private_key_file | default(omit) }}" + ansible_user: "{{ host_item.value.ansible_user | default(omit) }}" + ansible_connection: "{{ host_item.value.ansible_connection | default(omit) }}" + cifmw_hypervisor_host: "{{ host_item.value.cifmw_hypervisor_host | default(omit) }}" + loop: "{{ group_item.value.hosts | dict2items }}" + loop_control: + loop_var: host_item From a44a124cc7d606d930d0ae8123a60abe62da7e17 Mon Sep 17 00:00:00 2001 From: Adrian Fusco Arnejo Date: Mon, 22 Sep 2025 12:55:26 +0200 Subject: [PATCH 354/480] Remove KRB5_TRACE to avoid trace logging output We can remove the trace since OSPCIX-936 has been resolved --- roles/dlrn_report/tasks/dlrn_report_results.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/roles/dlrn_report/tasks/dlrn_report_results.yml b/roles/dlrn_report/tasks/dlrn_report_results.yml index 0d17970d01..7e474c1c29 100644 --- a/roles/dlrn_report/tasks/dlrn_report_results.yml +++ b/roles/dlrn_report/tasks/dlrn_report_results.yml @@ -20,8 +20,6 @@ kinit {{ cifmw_dlrn_report_krb_user_realm }} -k -t {{ cifmw_dlrn_report_keytab }} - environment: - KRB5_TRACE: /dev/stdout retries: 5 delay: 60 register: _kinit_status From 52d01a3ae3e9f753b90a7a877a2e5284bbb1ef88 Mon Sep 17 00:00:00 2001 From: bshewale Date: Mon, 15 Sep 2025 18:15:48 +0530 Subject: [PATCH 355/480] Cleanup NetworkManager Dummy Interface Reproducer cleanup fails with permission denied error when trying to remove NetworkManager dummy interface connection files created by libvirt_manager role. The framework creates NetworkManager dummy interface connection files (e.g., dummy-fssqrsss.nmconnection) during libvirt network setup but fails to properly clean them up during reproducer cleanup, resulting in permission denied errors. This PR will solve this issue by adding tasks to find and remove those dummy interface in the clean_layout.yml playbook. testresult https://softwarefactory-project.io/zuul/t/rdoproject.org/build/c48f86a9b817405dbd058fa87ca9135e/log/job-output.txt#2160-2169 --- roles/libvirt_manager/tasks/clean_layout.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/roles/libvirt_manager/tasks/clean_layout.yml b/roles/libvirt_manager/tasks/clean_layout.yml index 28d3fbaaba..11d22bceff 100644 --- a/roles/libvirt_manager/tasks/clean_layout.yml +++ b/roles/libvirt_manager/tasks/clean_layout.yml @@ -166,6 +166,21 @@ state: absent loop: "{{ cleanup_nets }}" + - name: Find dummy interface connection files + ansible.builtin.find: + paths: /etc/NetworkManager/system-connections/ + patterns: "dummy*" + file_type: file + register: dummy_connections + + - name: Remove dummy interface connections + become: true + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ dummy_connections.files }}" + when: dummy_connections.matched > 0 + - name: Clean firewalld libvirt zone become: true ansible.posix.firewalld: From deb0713d6c941fcd9134b8d2ebd46aaebdac7bee Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Tue, 16 Sep 2025 08:17:48 -0400 Subject: [PATCH 356/480] Add warning for ExtraConfigmapsMounts deprecation Now that test-operator supports ExtraMounts parameter, there is no need for a parameter adding extra configmap mounts. It can be directly done through ExtraMounts, so I think of it as a duplicate code. But it is not a good practice to remove parameters right away, so I am first adding warning for users who might be using extraConfigmapsMounts parameters about its deprecation. Let's remove it after this warning is old enough. --- roles/test_operator/README.md | 4 ++-- roles/test_operator/defaults/main.yml | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index fda3bb6138..e798998b44 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -85,7 +85,7 @@ cifmw_test_operator_stages: * `cifmw_test_operator_tempest_extra_images`: (List) A list of images that should be uploaded to OpenStack before the tests are executed. The value is passed to extraImages parameter in the [Tempest CR](https://openstack-k8s-operators.github.io/test-operator/crds.html#tempest-custom-resource). Default value: `[]` * `cifmw_test_operator_tempest_network_attachments`: (List) List of network attachment definitions to attach to the tempest pods spawned by test-operator. Default value: `[]`. * `cifmw_test_operator_tempest_extra_rpms`: (List) . A list of URLs that point to RPMs that should be installed before the execution of tempest. Note that this parameter has no effect when `cifmw_test_operator_tempest_external_plugin` is used. Default value: `[]` -* `cifmw_test_operator_tempest_extra_configmaps_mounts`: (List) A list of configmaps that should be mounted into the tempest test pods. Default value: `[]` +* `cifmw_test_operator_tempest_extra_configmaps_mounts`: WARNING: This parameter will be deprecated! Please use `cifmw_test_operator_tempest_extra_mounts` parameter instead. (List) A list of configmaps that should be mounted into the tempest test pods. Default value: `[]` * `cifmw_test_operator_tempest_extra_mounts`: (List) A list of additional volume mounts for the tempest test pods. Each item specifies a volume name, mount path, and other mount properties. Default value: `[]` * `cifmw_test_operator_tempest_debug`: (Bool) Run Tempest in debug mode, it keeps the operator pod sleeping infinity (it must only set to `true`only for debugging purposes). Default value: `false` * `cifmw_test_operator_tempest_rerun_failed_tests`: (Bool) Activate tempest re-run feature. When activated, tempest will perform another run of the tests that failed during the first execution. Default value: `false` @@ -196,7 +196,7 @@ Default value: {} * `cifmw_test_operator_ansibletest_openstack_config_secret`: (String) The name of the Secret containing the secure.yaml. Default value: "openstack-config-secret" * `cifmw_test_operator_ansibletest_debug`: (Bool) Run ansible playbook with -vvvv. Default value: `false` * `cifmw_test_operator_ansibletest_workflow`: (List) A parameter that contains a workflow definition. Default value: `[]` -* `cifmw_test_operator_ansibletest_extra_configmaps_mounts`: (List) Extra configmaps for mounting in the pod. Default value: `[]` +* `cifmw_test_operator_ansibletest_extra_configmaps_mounts`: WARNING: This parameter will be deprecated! Please use `cifmw_test_operator_ansibletest_extra_mounts` parameter instead. (List) Extra configmaps for mounting in the pod. Default value: `[]` * `cifmw_test_operator_ansibletest_extra_mounts`: (List) A list of additional volume mounts for the ansibletest test pods. Each item specifies a volume name, mount path, and other mount properties. Default value: `[]` * `cifmw_test_operator_ansibletest_resources`: (Dict) A dictionary that specifies resources (cpu, memory) for the test pods. When kept untouched it defaults to the resource limits specified on the test-operator side. Default value: `{}` * `cifmw_test_operator_ansibletest_config`: Definition of AnsibleTest CRD instance that is passed to the test-operator (see [the test-operator documentation](https://openstack-k8s-operators.github.io/test-operator/crds.html)). Default value: diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index ac626afbca..31f3d6cab5 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -149,6 +149,7 @@ cifmw_test_operator_tempest_config: networkAttachments: "{{ stage_vars_dict.cifmw_test_operator_tempest_network_attachments }}" tolerations: "{{ cifmw_test_operator_tolerations | default(omit) }}" nodeSelector: "{{ cifmw_test_operator_node_selector | default(omit) }}" + # Note: This parameter will be deprecated! Please use cifmw_test_operator_tempest_extra_mounts parameter instead extraConfigmapsMounts: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_configmaps_mounts | default(omit) }}" extraMounts: "{{ stage_vars_dict.cifmw_test_operator_tempest_extra_mounts | default(omit) }}" resources: "{{ stage_vars_dict.cifmw_test_operator_tempest_resources | default(omit) }}" @@ -251,6 +252,7 @@ cifmw_test_operator_ansibletest_config: spec: SELinuxLevel: "{{ cifmw_test_operator_selinux_level }}" containerImage: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_image }}:{{ stage_vars_dict.cifmw_test_operator_ansibletest_image_tag }}" + # Note: This parameter will be deprecated! Please use cifmw_test_operator_ansibletest_extra_mounts parameter instead extraConfigmapsMounts: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_extra_configmaps_mounts }}" extraMounts: "{{ stage_vars_dict.cifmw_test_operator_ansibletest_extra_mounts | default(omit) }}" storageClass: "{{ cifmw_test_operator_storage_class }}" From a54ba1a8d2ef78b7bb3cb794bbdcf037c59c933f Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 22 Sep 2025 13:08:37 +0200 Subject: [PATCH 357/480] Compress all log and artifact files that are in zuul-output dir Sometimes the logs might be huge and we are out of the storage on the log server. Let's try to compress all files before upload. According to the current Apache vhost config [1], all files with .gz extension should be tread as a "text/html". [1] https://github.com/softwarefactory-project/sf-config/blob/master/ansible/roles/sf-logserver/templates/logserver.conf.j2 Signed-off-by: Daniel Pawlik --- ci/playbooks/collect-logs.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ci/playbooks/collect-logs.yml b/ci/playbooks/collect-logs.yml index 46f265607a..1447508f9d 100644 --- a/ci/playbooks/collect-logs.yml +++ b/ci/playbooks/collect-logs.yml @@ -139,6 +139,16 @@ dest: "{{ ansible_user_dir }}/zuul-output/logs/docs_build" always: + - name: Compress logs bigger than 2MB + when: cifmw_compress_all_logs | default(true) + ansible.builtin.shell: > + find "{{ ansible_user_dir }}/zuul-output/" + -type f + ! -name "*.gz" + ! -name "*.xz" + -size +2M + -exec gzip "{}" + + - name: Copy files from workspace on node vars: work_dir: "{{ ansible_user_dir }}/workspace" From a8b9387269de3f425510b2aabebb5daef2ca7aea Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 23 Sep 2025 15:42:47 +0200 Subject: [PATCH 358/480] Use gzip with best parameter to make logs size smaller With the best parameter, the logs should be even smaller than before. Signed-off-by: Daniel Pawlik --- ci/playbooks/collect-logs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/playbooks/collect-logs.yml b/ci/playbooks/collect-logs.yml index 1447508f9d..62d0a8f7ad 100644 --- a/ci/playbooks/collect-logs.yml +++ b/ci/playbooks/collect-logs.yml @@ -147,7 +147,7 @@ ! -name "*.gz" ! -name "*.xz" -size +2M - -exec gzip "{}" + + -exec gzip --best "{}" + - name: Copy files from workspace on node vars: From e670a395c7f060e4a6746bb8bc3e6241505fa994 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Fri, 19 Sep 2025 07:36:22 -0400 Subject: [PATCH 359/480] nova_wait_for_compute_service: Add retry logic for transient auth failures Add configurable retry logic to handle transient OpenShift API authentication failures in the nova_wait_for_compute_service hook playbook. When OpenShift is under load, API authentication can temporarily fail with HTTP 401 "Unauthorized" errors, causing the hook to abort with "NoneType: None" exceptions. This change adds retry logic around 'oc project' commands to handle these transient authentication failures. The retry mechanism uses OC_RETRIES (5 attempts, 30s delay) specifically for OpenShift authentication failures before executing the main business logic. This ensures we can reliably connect to the cluster while allowing the existing RETRIES logic to handle legitimate OpenStack service startup delays. Changes: - Add _oc_retries and _oc_delay variables for authentication retry configuration - Add retry loops around 'oc project' commands in both script blocks - Provide clear logging for authentication retry attempts This prevents costly deployment failures when experiencing temporary OpenShift API authentication issues while preserving appropriate timeouts for service readiness checks. Co-Authored-By: Claude --- .../nova_wait_for_compute_service.yml | 36 +++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/hooks/playbooks/nova_wait_for_compute_service.yml b/hooks/playbooks/nova_wait_for_compute_service.yml index 435c2be8a8..feaae4f709 100644 --- a/hooks/playbooks/nova_wait_for_compute_service.yml +++ b/hooks/playbooks/nova_wait_for_compute_service.yml @@ -16,6 +16,9 @@ _number_of_computes: 0 _retries: 25 _cell_conductor: null + # Retry settings for oc commands to handle transient auth failures + _oc_retries: 5 + _oc_delay: 30 environment: KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" PATH: "{{ cifmw_path }}" @@ -29,7 +32,21 @@ COMPUTES={{ _number_of_computes }} RETRIES={{ _retries }} COUNTER=0 - oc project {{ namespace }} + OC_RETRIES={{ _oc_retries }} + OC_DELAY={{ _oc_delay }} + + # Retry oc project command to handle transient auth failures + oc_retry_counter=0 + until oc project {{ namespace }}; do + if [[ "$oc_retry_counter" -ge "$OC_RETRIES" ]]; then + echo "Failed to authenticate with OpenShift after $OC_RETRIES attempts" + exit 1 + fi + oc_retry_counter=$[$oc_retry_counter +1] + echo "OpenShift auth failed, retrying in ${OC_DELAY}s (attempt $oc_retry_counter/$OC_RETRIES)" + sleep $OC_DELAY + done + until [ $(oc rsh openstackclient openstack compute service list --service nova-compute -f value | wc -l) -eq "$COMPUTES" ]; do if [[ "$COUNTER" -ge "$RETRIES" ]]; then exit 1 @@ -37,6 +54,7 @@ COUNTER=$[$COUNTER +1] sleep 10 done + - name: Run nova-manage discover_hosts and wait for host records cifmw.general.ci_script: output_dir: "{{ cifmw_basedir }}/artifacts" @@ -46,7 +64,21 @@ COMPUTES={{ _number_of_computes | int + 4 }} RETRIES={{ _retries }} COUNTER=0 - oc project {{ namespace }} + OC_RETRIES={{ _oc_retries }} + OC_DELAY={{ _oc_delay }} + + # Retry oc project command to handle transient auth failures + oc_retry_counter=0 + until oc project {{ namespace }}; do + if [[ "$oc_retry_counter" -ge "$OC_RETRIES" ]]; then + echo "Failed to authenticate with OpenShift after $OC_RETRIES attempts" + exit 1 + fi + oc_retry_counter=$[$oc_retry_counter +1] + echo "OpenShift auth failed, retrying in ${OC_DELAY}s (attempt $oc_retry_counter/$OC_RETRIES)" + sleep $OC_DELAY + done + until [ $(oc rsh {{ _cell_conductor }} nova-manage cell_v2 list_hosts | wc -l) -eq "$COMPUTES" ]; do if [[ "$COUNTER" -ge "$RETRIES" ]]; then exit 1 From c1f373a7927c873ab3f68f45135f425ed17773c1 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Thu, 18 Sep 2025 14:07:57 -0400 Subject: [PATCH 360/480] Remove scenarios/reproducers/dz-storage.yml When the dz-storage scenario (uni10kappa) is run by ci-framwork using our downstream environment, it needs other variable files found in ci-framework-jobs. Thus, there is little value in keeping this scenario file upstream in ci-framework, which is a tooling repo. This file will be moved to ci-framework-jobs too. This is not a revert of PR 3217 because we still need roles/ci_gen_kustomize_values/templates/dz-storage (see also eb940a1e78b5601eccaa450c9be91519d21ac6d7) Signed-off-by: John Fulton --- scenarios/reproducers/dz-storage.yml | 1151 -------------------------- 1 file changed, 1151 deletions(-) delete mode 100644 scenarios/reproducers/dz-storage.yml diff --git a/scenarios/reproducers/dz-storage.yml b/scenarios/reproducers/dz-storage.yml deleted file mode 100644 index fea2ff330e..0000000000 --- a/scenarios/reproducers/dz-storage.yml +++ /dev/null @@ -1,1151 +0,0 @@ ---- -# Storage-specific settings for dz-storage -cifmw_devscripts_enable_iscsi_on_ocp_nodes: true -cifmw_devscripts_enable_multipath_on_ocp_nodes: true - -cifmw_architecture_scenario: dz-storage -cifmw_arch_automation_file: "dz-storage.yaml" -# Everything below is directly from bgp-l3-xl.yml but the vars -# cifmw_architecture_scenario and cifmw_arch_automation_file -# were removed and set by the two lines above. - -cifmw_os_net_setup_config: - - name: public - external: true - is_default: true - provider_network_type: flat - provider_physical_network: datacentre - shared: true - subnets: - - name: public_subnet - cidr: 192.168.133.0/24 - allocation_pool_start: 192.168.133.190 - allocation_pool_end: 192.168.133.250 - gateway_ip: 192.168.133.1 - enable_dhcp: true - - -cifmw_run_id: '' -cifmw_use_devscripts: true -cifmw_use_libvirt: true -cifmw_virtualbmc_daemon_port: 50881 -cifmw_use_uefi: >- - {{ (cifmw_repo_setup_os_release is defined - and cifmw_repo_setup_os_release == 'rhel') | bool }} -num_racks: 3 -cifmw_libvirt_manager_compute_amount: "{{ num_racks }}" -cifmw_libvirt_manager_networker_amount: 3 -cifmw_libvirt_manager_pub_net: ocpbm -cifmw_libvirt_manager_spineleaf_setup: true -cifmw_libvirt_manager_network_interface_types: - rtr-ocp: network - s0-rtr: network - s1-rtr: network - l00-s0: network - l01-s0: network - l00-s1: network - l01-s1: network - l10-s0: network - l11-s0: network - l10-s1: network - l11-s1: network - l20-s0: network - l21-s0: network - l20-s1: network - l21-s1: network - l00-node0: network - l00-node1: network - l00-node2: network - l00-ocp0: network - l00-ocp1: network - l00-ocp2: network - l00-ocp3: network - l01-node0: network - l01-node1: network - l01-node2: network - l01-ocp0: network - l01-ocp1: network - l01-ocp2: network - l01-ocp3: network - l10-node0: network - l10-node1: network - l10-node2: network - l10-ocp0: network - l10-ocp1: network - l10-ocp2: network - l10-ocp3: network - l11-node0: network - l11-node1: network - l11-node2: network - l11-ocp0: network - l11-ocp1: network - l11-ocp2: network - l11-ocp3: network - l20-node0: network - l20-node1: network - l20-node2: network - l20-ocp0: network - l20-ocp1: network - l20-ocp2: network - l20-ocp3: network - l21-node0: network - l21-node1: network - l21-node2: network - l21-ocp0: network - l21-ocp1: network - l21-ocp2: network - l21-ocp3: network - -cifmw_libvirt_manager_configuration: - networks: - osp_trunk: | - - osp_trunk - - - - - - # router to ocp network - rtr-ocp: | - - rtr-ocp - - - # spines to router networks - s0-rtr: | - - s0-rtr - - - s1-rtr: | - - s1-rtr - - - # leafs to spines networks - ## rack0 - l00-s0: | - - l00-s0 - - - l00-s1: | - - l00-s1 - - - l01-s0: | - - l01-s0 - - - l01-s1: | - - l01-s1 - - - ## rack1 - l10-s0: | - - l10-s0 - - - l10-s1: | - - l10-s1 - - - l11-s0: | - - l11-s0 - - - l11-s1: | - - l11-s1 - - - ## rack2 - l20-s0: | - - l20-s0 - - - l20-s1: | - - l20-s1 - - - l21-s0: | - - l21-s0 - - - l21-s1: | - - l21-s1 - - - # leafs to nodes and ocps - ## rack0 - l00-node0: | - - l00-node0 - - - l00-node1: | - - l00-node1 - - - l00-node2: | - - l00-node2 - - - l00-ocp0: | - - l00-ocp0 - - - l00-ocp1: | - - l00-ocp1 - - - l00-ocp2: | - - l00-ocp2 - - - l00-ocp3: | - - l00-ocp3 - - - l01-node0: | - - l01-node0 - - - l01-node1: | - - l01-node1 - - - l01-node2: | - - l01-node2 - - - l01-ocp0: | - - l01-ocp0 - - - l01-ocp1: | - - l01-ocp1 - - - l01-ocp2: | - - l01-ocp2 - - - l01-ocp3: | - - l01-ocp3 - - - ## rack1 - l10-node0: | - - l10-node0 - - - l10-node1: | - - l10-node1 - - - l10-node2: | - - l10-node2 - - - l10-ocp0: | - - l10-ocp0 - - - l10-ocp1: | - - l10-ocp1 - - - l10-ocp2: | - - l10-ocp2 - - - l10-ocp3: | - - l10-ocp3 - - - l11-node0: | - - l11-node0 - - - l11-node1: | - - l11-node1 - - - l11-node2: | - - l11-node2 - - - l11-ocp0: | - - l11-ocp0 - - - l11-ocp1: | - - l11-ocp1 - - - l11-ocp2: | - - l11-ocp2 - - - l11-ocp3: | - - l11-ocp3 - - - ## rack2 - l20-node0: | - - l20-node0 - - - l20-node1: | - - l20-node1 - - - l20-node2: | - - l20-node2 - - - l20-ocp0: | - - l20-ocp0 - - - l20-ocp1: | - - l20-ocp1 - - - l20-ocp2: | - - l20-ocp2 - - - l20-ocp3: | - - l20-ocp3 - - - l21-node0: | - - l21-node0 - - - l21-node1: | - - l21-node1 - - - l21-node2: | - - l21-node2 - - - l21-ocp0: | - - l21-ocp0 - - - l21-ocp1: | - - l21-ocp1 - - - l21-ocp2: | - - l21-ocp2 - - - l21-ocp3: | - - l21-ocp3 - - - ocpbm: | - - ocpbm - - - - - - - ocppr: | - - ocppr - - - - r0_tr: | - - r0_tr - - - - - - r1_tr: | - - r1_tr - - - - - - r2_tr: | - - r2_tr - - - - - - - vms: - controller: - root_part_id: >- - {{ - (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | - ternary(4, 1) - }} - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "base-os.qcow2" - disksize: 50 - memory: 8 - cpus: 4 - nets: - - ocpbm - - osp_trunk - r0-compute: &r0_compute_def - amount: 2 - root_part_id: >- - {{ - (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | - ternary(4, 1) - }} - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "base-os.qcow2" - disksize: 50 - memory: 8 - cpus: 4 - nets: - - "ocpbm" - - "r0_tr" - spineleafnets: - - # rack0 - compute0 - - "l00-node0" - - "l01-node0" - - # rack0 - compute0 - - "l00-node1" - - "l01-node1" - r1-compute: - amount: 2 - root_part_id: "{{ cifmw_root_partition_id }}" - uefi: "{{ cifmw_use_uefi }}" - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "centos-stream-9.qcow2" - disksize: 50 - memory: 8 - cpus: 4 - nets: - - ocpbm - - r1_tr - spineleafnets: - - # rack1 - compute0 - - "l10-node0" - - "l11-node0" - - # rack1 - compute1 - - "l10-node1" - - "l11-node1" - r2-compute: - amount: 2 - root_part_id: "{{ cifmw_root_partition_id }}" - uefi: "{{ cifmw_use_uefi }}" - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "centos-stream-9.qcow2" - disksize: 50 - memory: 8 - cpus: 4 - nets: - - ocpbm - - r2_tr - spineleafnets: - - # rack2 - compute0 - - "l20-node0" - - "l21-node0" - - # rack2 - compute1 - - "l20-node1" - - "l21-node1" - - r0-networker: - amount: 1 - root_part_id: >- - {{ - (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | - ternary(4, 1) - }} - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "base-os.qcow2" - disksize: 40 - memory: 8 - cpus: 4 - # ansible_group: networker - nets: - - "ocpbm" - - "r0_tr" - spineleafnets: - - # rack0 - networker0 - - "l00-node2" - - "l01-node2" - r1-networker: - amount: 1 - root_part_id: >- - {{ - (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | - ternary(4, 1) - }} - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "base-os.qcow2" - disksize: 40 - memory: 8 - cpus: 4 - # ansible_group: networker - nets: - - "ocpbm" - - "r1_tr" - spineleafnets: - - # rack1 - networker0 - - "l10-node2" - - "l11-node2" - r2-networker: - amount: 1 - root_part_id: >- - {{ - (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | - ternary(4, 1) - }} - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "base-os.qcow2" - disksize: 40 - memory: 8 - cpus: 4 - # ansible_group: networker - nets: - - "ocpbm" - - "r2_tr" - spineleafnets: - - # rack2 - networker0 - - "l20-node2" - - "l21-node2" - ocp: - amount: 3 - uefi: true - root_part_id: 4 - admin_user: core - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "ocp_master" - disksize: "105" - memory: 16 - cpus: 10 - extra_disks_num: 1 - extra_disks_size: "20G" - nets: # nets common to all the ocp nodes - - "ocppr" - - "ocpbm" - - "osp_trunk" - spineleafnets: - - # rack0 - ocp master 0 - - "l00-ocp0" - - "l01-ocp0" - - # rack1 - ocp master 1 - - "l10-ocp0" - - "l11-ocp0" - - # rack2 - ocp master 2 - - "l20-ocp0" - - "l21-ocp0" - ocp_worker: - amount: 10 - uefi: true - root_part_id: 4 - admin_user: core - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "ocp_worker" - disksize: "105" - memory: 16 - cpus: 10 - extra_disks_num: 1 - extra_disks_size: "20G" - nets: # nets common to all the ocp_worker nodes - - "ocppr" - - "ocpbm" - - "osp_trunk" - spineleafnets: - - # rack0 - ocp worker 0 - - "l00-ocp1" - - "l01-ocp1" - - # rack0 - ocp worker 1 - - "l00-ocp2" - - "l01-ocp2" - - # rack0 - ocp worker 2 - - "l00-ocp3" - - "l01-ocp3" - - # rack1 - ocp worker 3 - - "l10-ocp1" - - "l11-ocp1" - - # rack1 - ocp worker 4 - - "l10-ocp2" - - "l11-ocp2" - - # rack1 - ocp worker 5 - - "l10-ocp3" - - "l11-ocp3" - - # rack2 - ocp worker 6 - - "l20-ocp1" - - "l21-ocp1" - - # rack2 - ocp worker 7 - - "l20-ocp2" - - "l21-ocp2" - - # rack2 - ocp worker 8 - - "l20-ocp3" - - "l21-ocp3" - - # router - ocp_tester (worker 9) - - "rtr-ocp" - router: - amount: 1 - root_part_id: >- - {{ - (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | - ternary(4, 1) - }} - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "base-os.qcow2" - disksize: 25 - memory: 4 - cpus: 2 - nets: # nets common to all the router nodes - - "ocpbm" - spineleafnets: - - # router - ocp_tester - - "s0-rtr" - - "s1-rtr" - - "rtr-ocp" - spine: - amount: 2 - root_part_id: >- - {{ - (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | - ternary(4, 1) - }} - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "base-os.qcow2" - disksize: 25 - memory: 4 - cpus: 2 - nets: # nets common to all the spine nodes - - "ocpbm" - spineleafnets: - - # spine0 - - "l00-s0" - - "l01-s0" - - "l10-s0" - - "l11-s0" - - "l20-s0" - - "l21-s0" - - "s0-rtr" - - # spine1 - - "l00-s1" - - "l01-s1" - - "l10-s1" - - "l11-s1" - - "l20-s1" - - "l21-s1" - - "s1-rtr" - leaf: - amount: 6 - root_part_id: >- - {{ - (cifmw_repo_setup_os_release is defined and cifmw_repo_setup_os_release == 'rhel') | - ternary(4, 1) - }} - image_url: "{{ cifmw_discovered_image_url }}" - sha256_image_name: "{{ cifmw_discovered_hash }}" - image_local_dir: "{{ cifmw_basedir }}/images/" - disk_file_name: "base-os.qcow2" - disksize: 25 - memory: 4 - cpus: 2 - nets: # nets common to all the leaf nodes - - "ocpbm" - spineleafnets: - - # rack0 - leaf00 - - "l00-s0" - - "l00-s1" - - "l00-node0" - - "l00-node1" - - "l00-node2" - - "l00-ocp0" - - "l00-ocp1" - - "l00-ocp2" - - "l00-ocp3" - - # rack0 - leaf01 - - "l01-s0" - - "l01-s1" - - "l01-node0" - - "l01-node1" - - "l01-node2" - - "l01-ocp0" - - "l01-ocp1" - - "l01-ocp2" - - "l01-ocp3" - - # rack1 - leaf10 - - "l10-s0" - - "l10-s1" - - "l10-node0" - - "l10-node1" - - "l10-node2" - - "l10-ocp0" - - "l10-ocp1" - - "l10-ocp2" - - "l10-ocp3" - - # rack1 - leaf11 - - "l11-s0" - - "l11-s1" - - "l11-node0" - - "l11-node1" - - "l11-node2" - - "l11-ocp0" - - "l11-ocp1" - - "l11-ocp2" - - "l11-ocp3" - - # rack2 - leaf20 - - "l20-s0" - - "l20-s1" - - "l20-node0" - - "l20-node1" - - "l20-node2" - - "l20-ocp0" - - "l20-ocp1" - - "l20-ocp2" - - "l20-ocp3" - - # rack2 - leaf21 - - "l21-s0" - - "l21-s1" - - "l21-node0" - - "l21-node1" - - "l21-node2" - - "l21-ocp0" - - "l21-ocp1" - - "l21-ocp2" - - "l21-ocp3" - -## devscript support for OCP deploy -cifmw_devscripts_config_overrides: - fips_mode: "{{ cifmw_fips_enabled | default(false) | bool }}" - cluster_subnet_v4: "192.172.0.0/16" - network_config_folder: "/home/zuul/netconf" - -# Required for egress traffic from pods to the osp_trunk network -cifmw_devscripts_enable_ocp_nodes_host_routing: true - -# Automation section. Most of those parameters will be passed to the -# controller-0 as-is and be consumed by the `deploy-va.sh` script. -# Please note, all paths are on the controller-0, meaning managed by the -# Framework. Please do not edit them! -_arch_repo: "/home/zuul/src/github.com/openstack-k8s-operators/architecture" -cifmw_kustomize_deploy_architecture_examples_path: "examples/dt/" -cifmw_architecture_automation_file: >- - {{ - (_arch_repo, - 'automation/vars', - cifmw_arch_automation_file) | - path_join - }} - -cifmw_kustomize_deploy_metallb_source_files: >- - {{ - (_arch_repo, - 'examples/dt/bgp-l3-xl/metallb') | - path_join - }} - -# bgp_spines_leaves_playbook: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/ci-framework']. -# src_dir }}/playbooks/bgp/prepare-bgp-spines-leaves.yaml" -# bgp_computes_playbook: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/ci-framework']. -# src_dir }}/playbooks/bgp/prepare-bgp-computes.yaml" - - -pre_deploy: - - name: BGP spines and leaves configuration - type: playbook - source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/bgp/prepare-bgp-spines-leaves.yaml" - extra_vars: - num_racks: "{{ num_racks }}" - router_bool: true - edpm_nodes_per_rack: 3 - ocp_nodes_per_rack: 4 - router_uplink_ip: 100.64.10.1 - -# post_deploy: -# - name: BGP computes configuration -# type: playbook -# source: "{{ bgp_computes_playbook }}" -# extra_vars: -# #networkers_bool: true -# networkers_bool: false - -cifmw_libvirt_manager_default_gw_nets: - - ocpbm - - r0_tr - - r1_tr - - r2_tr -cifmw_networking_mapper_interfaces_info_translations: - osp_trunk: - - controlplane - - ctlplane - r0_tr: - - ctlplaner0 - r1_tr: - - ctlplaner1 - r2_tr: - - ctlplaner2 - - -cifmw_networking_definition: - networks: - ctlplane: - network: "192.168.125.0/24" - gateway: "192.168.125.1" - dns: - - "192.168.122.1" - mtu: 1500 - tools: - multus: - ranges: - - start: 30 - end: 70 - metallb: - ranges: - - start: 80 - end: 90 - netconfig: - ranges: - - start: 100 - end: 120 - - start: 150 - end: 200 - - ctlplaner0: - network: "192.168.122.0/24" - gateway: "192.168.122.1" - dns: - - "192.168.122.1" - mtu: 1500 - tools: - multus: - ranges: - - start: 30 - end: 70 - metallb: - ranges: - - start: 80 - end: 90 - netconfig: - ranges: - - start: 100 - end: 130 - - start: 150 - end: 200 - - ctlplaner1: - network: "192.168.123.0/24" - gateway: "192.168.123.1" - dns: - - "192.168.123.1" - mtu: 1500 - tools: - multus: - ranges: - - start: 30 - end: 70 - netconfig: - ranges: - - start: 100 - end: 130 - - start: 150 - end: 170 - metallb: - ranges: - - start: 80 - end: 90 - ctlplaner2: - network: "192.168.124.0/24" - gateway: "192.168.124.1" - dns: - - "192.168.124.1" - mtu: 1500 - tools: - multus: - ranges: - - start: 30 - end: 70 - netconfig: - ranges: - - start: 100 - end: 130 - - start: 150 - end: 170 - metallb: - ranges: - - start: 80 - end: 90 - - internalapi: - network: "172.17.0.0/24" - vlan: 20 - mtu: 1500 - tools: - multus: - ranges: - - start: 30 - end: 70 - metallb: - ranges: - - start: 80 - end: 90 - netconfig: - ranges: - - start: 100 - end: 250 - - storage: - network: "172.18.0.0/24" - vlan: 21 - mtu: 1500 - tools: - multus: - ranges: - - start: 30 - end: 70 - metallb: - ranges: - - start: 80 - end: 90 - netconfig: - ranges: - - start: 100 - end: 250 - - tenant: - network: "172.19.0.0/24" - vlan: 22 - mtu: 1500 - tools: - multus: - ranges: - - start: 30 - end: 70 - metallb: - ranges: - - start: 80 - end: 90 - netconfig: - ranges: - - start: 100 - end: 250 - - octavia: - vlan: 23 - mtu: 1500 - network: "172.23.0.0/24" - tools: - multus: - ranges: - - start: 30 - end: 70 - netconfig: - ranges: - - start: 100 - end: 250 - - # Not really used, but required by architecture - # https://github.com/openstack-k8s-operators/architecture/blob/main/lib/networking/netconfig/kustomization.yaml#L28-L36 - external: - network: "192.168.32.0/20" - vlan: 99 - mtu: 1500 - tools: - netconfig: - ranges: - - start: 130 - end: 250 - - group-templates: - r0-computes: - network-template: - range: - start: 100 - length: 5 - networks: - ctlplaner0: {} - internalapi: - trunk-parent: ctlplaner0 - tenant: - trunk-parent: ctlplaner0 - storage: - trunk-parent: ctlplaner0 - r1-computes: - network-template: - range: - start: 110 - length: 5 - networks: - ctlplaner1: {} - internalapi: - trunk-parent: ctlplaner1 - tenant: - trunk-parent: ctlplaner1 - storage: - trunk-parent: ctlplaner1 - r2-computes: - network-template: - range: - start: 120 - length: 5 - networks: - ctlplaner2: {} - internalapi: - trunk-parent: ctlplaner2 - tenant: - trunk-parent: ctlplaner2 - storage: - trunk-parent: ctlplaner2 - r0-networkers: - network-template: - range: - start: 200 - length: 5 - networks: - ctlplaner0: {} - internalapi: - trunk-parent: ctlplaner0 - tenant: - trunk-parent: ctlplaner0 - storage: - trunk-parent: ctlplaner0 - r1-networkers: - network-template: - range: - start: 210 - length: 5 - networks: - ctlplaner1: {} - internalapi: - trunk-parent: ctlplaner1 - tenant: - trunk-parent: ctlplaner1 - storage: - trunk-parent: ctlplaner1 - r2-networkers: - network-template: - range: - start: 220 - length: 5 - networks: - ctlplaner2: {} - internalapi: - trunk-parent: ctlplaner2 - tenant: - trunk-parent: ctlplaner2 - storage: - trunk-parent: ctlplaner2 - ocps: - network-template: - range: - start: 10 - length: 10 - networks: {} - ocp_workers: - network-template: - range: - start: 20 - length: 10 - networks: {} - - instances: - controller-0: - networks: - ctlplane: - ip: "192.168.125.9" From 4fe4e791c97fa7352dea5685617160a79e8459ae Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 23 Sep 2025 09:24:21 +0200 Subject: [PATCH 361/480] Add ansible_ssh_args and ansible_port into cifmw_helpers parse inventory The ansible_ssh_args and ansible_port argument might be required to establish connection to some host. Signed-off-by: Daniel Pawlik --- roles/cifmw_helpers/tasks/parse_inventory.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/cifmw_helpers/tasks/parse_inventory.yml b/roles/cifmw_helpers/tasks/parse_inventory.yml index 4f36be9933..4bb65128c6 100644 --- a/roles/cifmw_helpers/tasks/parse_inventory.yml +++ b/roles/cifmw_helpers/tasks/parse_inventory.yml @@ -4,7 +4,9 @@ name: "{{ host_item.key }}" groups: "{{ group_item.key }}" ansible_host: "{{ host_item.value.ansible_host | default(omit) }}" + ansible_port: "{{ host_item.value.ansible_port | default(omit) }}" ansible_ssh_common_args: "{{ host_item.value.ansible_ssh_common_args | default(omit) }}" + ansible_ssh_args: "{{ host_item.value.ansible_ssh_args | default(omit) }}" ansible_ssh_private_key_file: "{{ host_item.value.ansible_ssh_private_key_file | default(omit) }}" ansible_user: "{{ host_item.value.ansible_user | default(omit) }}" ansible_connection: "{{ host_item.value.ansible_connection | default(omit) }}" From 26d53bf5a58c152143116477b2a6e8e07dcd93f2 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Mon, 22 Sep 2025 10:33:06 +0200 Subject: [PATCH 362/480] Add empty adoption scenario uni09iota --- scenarios/adoption/uni09iota.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 scenarios/adoption/uni09iota.yml diff --git a/scenarios/adoption/uni09iota.yml b/scenarios/adoption/uni09iota.yml new file mode 100644 index 0000000000..4e9e5200e7 --- /dev/null +++ b/scenarios/adoption/uni09iota.yml @@ -0,0 +1,2 @@ +libvirt_manager_patch_layout: {} +networking_mapper_definition_patch: {} From 73de44d5ac6041f5f101ae0f18044b70b66d52be Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Thu, 12 Dec 2024 16:13:49 +0100 Subject: [PATCH 363/480] Multi-cell adoption support for osp-deploy plugin Adapt templating vars for multi-cell layout Remove no longer used edpm_computes. Change data formats to become compliant with multi-cell topology. Signed-off-by: Bohdan Dobrelia --- .../adoption_multicell_post_stack.yml | 71 ++++++++++++++ .../adoption_multicell_post_stack_all.yml | 37 ++++++++ .../adoption_multicell_pre_stack.yml | 35 +++++++ .../tasks/generate_adoption_vars.yml | 10 +- .../tasks/prepare_overcloud.yml | 5 + .../templates/adoption_vars.yaml.j2 | 92 +++++++++++++------ .../templates/os_net_config_overcloud.yml.j2 | 2 +- .../tripleo-ansible-inventory.yaml.j2 | 11 ++- scenarios/adoption/uni05epsilon.yml | 2 + 9 files changed, 230 insertions(+), 35 deletions(-) create mode 100644 hooks/playbooks/adoption_multicell_post_stack.yml create mode 100644 hooks/playbooks/adoption_multicell_post_stack_all.yml create mode 100644 hooks/playbooks/adoption_multicell_pre_stack.yml create mode 100644 scenarios/adoption/uni05epsilon.yml diff --git a/hooks/playbooks/adoption_multicell_post_stack.yml b/hooks/playbooks/adoption_multicell_post_stack.yml new file mode 100644 index 0000000000..95f9fed367 --- /dev/null +++ b/hooks/playbooks/adoption_multicell_post_stack.yml @@ -0,0 +1,71 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: OSP 17 - Multi-stack multi-cell post overcloud + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + module_defaults: + ansible.builtin.shell: + executable: /bin/bash + vars: + _stack: "{{ stack | default('overcloud') }}" + _ansible_user_dir: "{{ ansible_user_dir | default('/home/zuul') }}" + tasks: + - name: Ensure merged inventory directory exists + delegate_to: osp-undercloud-0 + ansible.builtin.file: + state: directory + path: "{{ _ansible_user_dir }}/inventories" + mode: "0755" + + - name: Copy stack inventory file + delegate_to: osp-undercloud-0 + ansible.builtin.copy: + src: "{{ _ansible_user_dir }}/overcloud-deploy/{{ _stack }}/config-download/{{ _stack }}/tripleo-ansible-inventory.yaml" + dest: "{{ _ansible_user_dir }}/inventories/{{ _stack }}.yaml" + remote_src: true + mode: "0644" + + - name: Install crudini + delegate_to: osp-undercloud-0 + environment: + ANSIBLE_HOST_KEY_CHECKING: "False" + ANSIBLE_SSH_RETRIES: "3" + OS_CLOUD: overcloud + ansible.builtin.command: + cmd: >- + ansible -bi {{ _ansible_user_dir }}/inventories/{{ _stack }}.yaml + -m ansible.builtin.package -a "name=crudini" all + + - name: Manage cells + when: stack != "overcloud" + delegate_to: osp-undercloud-0 + environment: + ANSIBLE_HOST_KEY_CHECKING: "False" + ANSIBLE_SSH_RETRIES: "3" + OS_CLOUD: overcloud + ansible.builtin.shell: | + set -e -o pipefail + ansible-playbook -i {{ _ansible_user_dir }}/inventories \ + /usr/share/ansible/tripleo-playbooks/create-nova-cell-v2.yaml \ + -e tripleo_cellv2_cell_name={{ _stack }} \ + -e tripleo_cellv2_containercli=podman \ + -e tripleo_cellv2_cellcontroller_rolename=CellController + + openstack aggregate create {{ _stack }} --zone {{ _stack }} + for i in $(openstack hypervisor list -f value -c 'Hypervisor Hostname'| grep {{ _stack }}); do + openstack aggregate add host {{ _stack }} $i + done diff --git a/hooks/playbooks/adoption_multicell_post_stack_all.yml b/hooks/playbooks/adoption_multicell_post_stack_all.yml new file mode 100644 index 0000000000..e75eef9c40 --- /dev/null +++ b/hooks/playbooks/adoption_multicell_post_stack_all.yml @@ -0,0 +1,37 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: OSP 17 - Multi-stack post overcloud + hosts: "{{ cifmw_target_host | default('localhost') }}" + vars: + _ansible_user_dir: "{{ ansible_user_dir | default('/home/zuul') }}" + tasks: + - name: Manage cells + delegate_to: osp-undercloud-0 + environment: + ANSIBLE_HOST_KEY_CHECKING: "False" + ANSIBLE_SSH_RETRIES: "3" + ANSIBLE_REMOTE_USER: tripleo-admin + OS_CLOUD: overcloud + ansible.builtin.shell: | + set -eu + ansible allovercloud \ + -i {{ _ansible_user_dir }}/inventories -m include_role \ + -a name=tripleo_hosts_entries \ + -e tripleo_stack_name=all \ + -e role_networks='["InternalApi"]' \ + -e hostname_resolve_network=ctlplane -e plan=overcloud \ + -e @{{ _ansible_user_dir }}/overcloud-deploy/overcloud/config-download/overcloud/global_vars.yaml diff --git a/hooks/playbooks/adoption_multicell_pre_stack.yml b/hooks/playbooks/adoption_multicell_pre_stack.yml new file mode 100644 index 0000000000..4d742ece0c --- /dev/null +++ b/hooks/playbooks/adoption_multicell_pre_stack.yml @@ -0,0 +1,35 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: OSP 17 - Multi-stack pre overcloud + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: false + module_defaults: + ansible.builtin.shell: + executable: /bin/bash + vars: + _stack: "{{ stack | default('overcloud') }}" + _ansible_user_dir: "{{ ansible_user_dir | default('/home/zuul') }}" + tasks: + - name: Export the stack data from the overcloud stack + delegate_to: osp-undercloud-0 + environment: + OS_CLOUD: overcloud + ansible.builtin.command: + cmd: >- + openstack overcloud cell export --control-plane-stack overcloud -f + --output-file {{ _ansible_user_dir }}/{{ _stack }}-input.yaml + --working-dir {{ _ansible_user_dir }}/overcloud-deploy/overcloud/ diff --git a/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml b/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml index a90a1a7b92..d984a71dc1 100644 --- a/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml +++ b/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml @@ -30,8 +30,14 @@ _controller_1_name: "{{ _vm_groups['osp-controllers'] | first }}" _controller_1_net: "{{ cifmw_networking_env_definition.instances[_controller_1_name] }}" _controller_1_internalapi_ip: "{{ _controller_1_net.networks.internalapi[ip_version|default('ip_v4')] }}" - _compute_1_name: "{{ _vm_groups['osp-computes'] | first }}" - _compute_1_net: "{{ cifmw_networking_env_definition.instances[_compute_1_name] }}" + _compute_1_name: >- + {%- if _vm_groups['osp-computes'] | default([]) | length > 0 -%} + {{ _vm_groups['osp-computes'] | first }} + {%- else -%} + standalone + {%- endif -%} + _compute_1_net: "{{ cifmw_networking_env_definition.instances[_compute_1_name] | default({'networks': {'ctlplane': {'ip_v4': '192.168.122.100'}}}) }}" + _stack_names: "{{ cifmw_adoption_osp_deploy_scenario.stacks | map(attribute='stackname') | list }}" _compute_1_ip: "{{ _compute_1_net.networks.ctlplane[ip_version|default('ip_v4')] }}" ansible.builtin.template: src: "adoption_vars.yaml.j2" diff --git a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml index cbbdb2d56b..1af52e981a 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml @@ -33,6 +33,11 @@ cifmw_adoption_osp_deploy_scenario.hostname_groups_map | ansible.utils.keep_keys(target=_stack.stack_nodes) }} + _role_map_translation: >- + {{ + cifmw_adoption_osp_deploy_scenario.roles_groups_map | + ansible.utils.keep_keys(target=_stack.stack_nodes) + }} _network_data_file: >- {{ [cifmw_adoption_source_scenario_path, diff --git a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 index 699c262efd..40fd93f1f2 100644 --- a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 +++ b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 @@ -1,17 +1,23 @@ #jinja2: trim_blocks:True, lstrip_blocks:True +{%+ if multi_cell|default(false) +%} +source_mariadb_ip: + {% for stack in _stack_names %} + {% if stack == 'overcloud' %} + {% set cell = 'default' %} + {% set prefix = '' %} + {% else %} + {% set cell = stack %} + {% set prefix = stack ~ '-' %} + {% endif %} + {{ cell }}: {{ cifmw_networking_env_definition.instances[_vm_groups[prefix ~ 'osp-controllers'] | first].networks.internalapi[ip_version|default('ip_v4')] }} + {% endfor %} +{%+ else +%} source_mariadb_ip: {{ _controller_1_internalapi_ip }} +{%+ endif +%} + source_ovndb_ip: {{ _controller_1_internalapi_ip }} edpm_node_hostname: {{ _compute_1_name }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }} edpm_node_ip: {{ _compute_1_ip }} - -{% if _vm_groups['osp-computes'] | default([]) | length > 0 %} -edpm_computes: | - {% for compute in _vm_groups['osp-computes'] %} - {% set node_nets = cifmw_networking_env_definition.instances[compute] %} - ["{{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }}" - {% endfor %} -{% endif %} - {% if _vm_groups['osp-networkers'] | default([]) | length > 0 %} edpm_networkers: | {% for networker in _vm_groups['osp-networkers'] | default([]) %} @@ -20,31 +26,59 @@ edpm_networkers: | {% endfor %} {% endif %} -source_galera_members: | +source_galera_members: +{%+ if multi_cell|default(false) +%} + {% for stack in _stack_names %} + {% if stack == 'overcloud' %} + {% set cell = 'default' %} + {% set prefix = '' %} + {% else %} + {% set cell = stack %} + {% set prefix = stack ~ '-' %} + {% endif %} + {{ cell }}: + {% for controller in _vm_groups[prefix ~ 'osp-controllers'] %} + {% set node_nets = cifmw_networking_env_definition.instances[controller] %} + - name: "{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}" + ip: "{{ node_nets.networks.internalapi[ip_version|default('ip_v4')] }}" + {% endfor %} + {% endfor %} +{%+ else +%} {% for controller in _vm_groups['osp-controllers'] %} - {% set node_nets = cifmw_networking_env_definition.instances[controller] %} + {% set node_nets = cifmw_networking_env_definition.instances[controller] %} ["{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.internalapi[ip_version|default('ip_v4')] }}" {% endfor %} +{%+ endif +%} -{% if _vm_groups['osp-computes'] | default([]) | length > 0 %} edpm_nodes: - {% for compute in _vm_groups['osp-computes'] %} - {% set node_nets = cifmw_networking_env_definition.instances[compute] %} - {{ compute }}: - hostName: {{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }} - ansible: - ansibleHost: {{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }} - networks: - {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %} - - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }} - name: {{ net }} - subnetName: subnet1 -{% if net == 'ctlplane' %} - defaultRoute: true -{% endif %} +{% for stack in _stack_names %} + {% if stack == 'overcloud' %} + {% set cell = 'default' %} + {% set prefix = '' %} + {% else %} + {% set cell = stack %} + {% set prefix = stack ~ '-' %} + {% endif %} + {% if _vm_groups[prefix ~ 'osp-computes'] | default([]) | length > 0 %} + {{ cell }}: + {% for compute in _vm_groups[prefix ~ 'osp-computes'] %} + {% set node_nets = cifmw_networking_env_definition.instances[compute] %} + {{ compute }}: + hostName: {{ compute }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }} + ansible: + ansibleHost: {{ node_nets.networks.ctlplane[ip_version|default('ip_v4')] }} + networks: + {% for net in node_nets.networks.keys() if net not in cifmw_adoption_osp_deploy_adoption_vars_exclude_nets %} + - fixedIP: {{ node_nets.networks[net][ip_version|default('ip_v4')] }} + name: {{ net }} + subnetName: subnet1 + {% if net == 'ctlplane' %} + defaultRoute: true + {% endif %} + {% endfor %} {% endfor %} - {% endfor %} -{% endif %} + {%+ endif +%} +{% endfor %} edpm_nodes_networker: {% if _vm_groups['osp-networkers'] | default([]) | length > 0 %} @@ -84,5 +118,5 @@ edpm_nodes_networker: upstream_dns: {{ cifmw_networking_env_definition.networks.ctlplane[dns_version|default('dns_v4')] | first }} -os_cloud_name: {{ cifmw_adoption_osp_deploy_scenario.stacks[0].stackname }} +os_cloud_name: {{ _stack_names[0] }} standalone_ip: {{ _undercloud_ip }} diff --git a/roles/adoption_osp_deploy/templates/os_net_config_overcloud.yml.j2 b/roles/adoption_osp_deploy/templates/os_net_config_overcloud.yml.j2 index 5a22412a01..7cd90787a7 100644 --- a/roles/adoption_osp_deploy/templates/os_net_config_overcloud.yml.j2 +++ b/roles/adoption_osp_deploy/templates/os_net_config_overcloud.yml.j2 @@ -13,7 +13,7 @@ network_config: addresses: - ip_netmask: {{ _ctlplane_ip }}/{{ _ctlplane_cidr }} {% if _stack.routes is defined %} - {%- for route in stack.routes %} + {%- for route in _stack.routes %} routes: - ip_netmask: {{ route.ip_netmask }} next_hop: {{ route.next_hop }} diff --git a/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory.yaml.j2 b/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory.yaml.j2 index f12efe0ffa..9cf21bd78d 100644 --- a/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory.yaml.j2 +++ b/roles/adoption_osp_deploy/templates/tripleo-ansible-inventory.yaml.j2 @@ -28,15 +28,20 @@ Undercloud: ansible_host: localhost allovercloud: children: - {% for _, role in cifmw_adoption_osp_deploy_scenario.roles_groups_map.items() %} + {% for _, role in _role_map_translation.items() %} {{ role }}: {} {% endfor %} computes: children: - {{ cifmw_adoption_osp_deploy_scenario.roles_groups_map['osp-computes'] }}: {} +{% if _role_map_translation['osp-computes'] | default([]) | length > 0 %} + {{ _role_map_translation['osp-computes'] }}: {} +{% endif %} +{% if _role_map_translation[_overcloud_name ~ '-osp-computes'] | default([]) | length > 0 %} + {{ _role_map_translation[_overcloud_name ~ '-osp-computes'] }}: {} +{% endif %} {{ _overcloud_name }}: hosts: - {% for group in ['osp-controllers', 'osp-computes'] %} + {% for group in _role_map_translation.keys() %} {% for node in _vm_groups[group] %} {% set node_nets = cifmw_networking_env_definition.instances[node] %} {{ node }}: diff --git a/scenarios/adoption/uni05epsilon.yml b/scenarios/adoption/uni05epsilon.yml new file mode 100644 index 0000000000..4e9e5200e7 --- /dev/null +++ b/scenarios/adoption/uni05epsilon.yml @@ -0,0 +1,2 @@ +libvirt_manager_patch_layout: {} +networking_mapper_definition_patch: {} From d5a6419a867213947be28626e879a33b98126ba5 Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Wed, 3 Sep 2025 13:34:14 +0200 Subject: [PATCH 364/480] Fix prepare overcloud for multiple stacks Aggregate nodes by a stack name instead of the common pool of all nodes Signed-off-by: Bohdan Dobrelia --- .../tasks/prepare_overcloud.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml index 1af52e981a..8d4ec9ad32 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml @@ -72,8 +72,8 @@ ansible.builtin.set_fact: _tripleo_nodes_stack: >- {{ - _tripleo_nodes_stack | default([]) + - group.value + (_tripleo_nodes_stack | default({})) | + combine({ _overcloud_name: (_tripleo_nodes_stack[_overcloud_name] | default([]) + group.value) }) }} loop: "{{ _vm_groups | dict2items }}" loop_control: @@ -110,7 +110,7 @@ args: apply: delegate_to: "{{ _vm }}" - loop: "{{ _tripleo_nodes_stack }}" + loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}" loop_control: loop_var: _vm pause: 1 @@ -121,7 +121,7 @@ community.general.rhsm_repository: name: "{{ cifmw_adoption_osp_deploy_repos }}" state: enabled - loop: "{{ _tripleo_nodes_stack }}" + loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}" loop_control: loop_var: _vm pause: 1 @@ -205,7 +205,7 @@ - os-net-config - openvswitch state: present - loop: "{{ _tripleo_nodes_stack }}" + loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}" loop_control: loop_var: overcloud_vm @@ -216,7 +216,7 @@ path: "/etc/os-net-config" state: directory mode: '0755' - loop: "{{ _tripleo_nodes_stack }}" + loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}" loop_control: loop_var: overcloud_vm @@ -235,7 +235,7 @@ src: "os_net_config_overcloud.yml.j2" dest: /etc/os-net-config/tripleo_config.yaml mode: "0644" - loop: "{{ _tripleo_nodes_stack }}" + loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}" loop_control: loop_var: overcloud_vm @@ -244,7 +244,7 @@ delegate_to: "{{ overcloud_vm }}" ansible.builtin.command: cmd: "os-net-config -c /etc/os-net-config/tripleo_config.yaml" - loop: "{{ _tripleo_nodes_stack }}" + loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}" loop_control: loop_var: overcloud_vm @@ -259,6 +259,6 @@ ansible.posix.authorized_key: user: "{{ ansible_user_id }}" key: "{{ undercloud_ssh_pub['content'] | b64decode | trim }}" - loop: "{{ _tripleo_nodes_stack }}" + loop: "{{ _tripleo_nodes_stack[_overcloud_name] }}" loop_control: loop_var: overcloud_vm From 8022f9ea3da2f917edb61cb384cc375e86c02c1a Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Wed, 3 Sep 2025 16:35:25 +0200 Subject: [PATCH 365/480] Add molecule test for multi-stack osp deploy Also make the vars as facts to keep the logic around it testable Signed-off-by: Bohdan Dobrelia --- .ansible-lint | 1 + .../molecule/default/containerfile | 12 ++++ .../molecule/default/converge.yml | 26 ++++++++ .../molecule/default/molecule.yml | 32 ++++++++++ .../molecule/default/vars.yaml | 62 ++++++++++++++++++ .../molecule/default/verify.yml | 23 +++++++ .../tasks/gather_stack_nodes.yml | 64 +++++++++++++++++++ .../tasks/generate_adoption_vars.yml | 1 + .../tasks/prepare_overcloud.yml | 58 +---------------- .../templates/adoption_vars.yaml.j2 | 5 +- 10 files changed, 226 insertions(+), 58 deletions(-) create mode 100644 roles/adoption_osp_deploy/molecule/default/containerfile create mode 100644 roles/adoption_osp_deploy/molecule/default/converge.yml create mode 100644 roles/adoption_osp_deploy/molecule/default/molecule.yml create mode 100644 roles/adoption_osp_deploy/molecule/default/vars.yaml create mode 100644 roles/adoption_osp_deploy/molecule/default/verify.yml create mode 100644 roles/adoption_osp_deploy/tasks/gather_stack_nodes.yml diff --git a/.ansible-lint b/.ansible-lint index 7f7c4b5b64..4c12fb7024 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -12,6 +12,7 @@ exclude_paths: - zuul.d/projects.yaml # Generated, pyYAML is bad at indentation - zuul.d/molecule.yaml # Generated, pyYAML is bad at indentation - ci/ + - roles/adoption_osp_deploy/molecule/default/vars.yaml # vars_file - roles/ci_gen_kustomize_values/molecule/default/files/networking-environment-definition.yml # Generated - roles/ci_gen_kustomize_values/molecule/default/files/3-ocp-net-def.yml # Generated - roles/ci_gen_kustomize_values/molecule/default/converge.yml # invalid due to calls to "lookup('file')" diff --git a/roles/adoption_osp_deploy/molecule/default/containerfile b/roles/adoption_osp_deploy/molecule/default/containerfile new file mode 100644 index 0000000000..f9556ae5c4 --- /dev/null +++ b/roles/adoption_osp_deploy/molecule/default/containerfile @@ -0,0 +1,12 @@ +FROM registry.access.redhat.com/ubi9/ubi-init + +RUN curl -o /etc/yum.repos.d/delorean.repo https://trunk.rdoproject.org/centos9-master/current/delorean.repo && \ + dnf upgrade -y && \ + dnf -y install sudo python3 python3-libselinux selinux-policy git python3-pip && \ + git clone https://github.com/openstack-k8s-operators/repo-setup && \ + cd repo-setup && \ + pip install -r requirements.txt && \ + python3 setup.py install && \ + dnf clean all -y + +CMD [ '/sbin/init' ] diff --git a/roles/adoption_osp_deploy/molecule/default/converge.yml b/roles/adoption_osp_deploy/molecule/default/converge.yml new file mode 100644 index 0000000000..9e88ebe7d3 --- /dev/null +++ b/roles/adoption_osp_deploy/molecule/default/converge.yml @@ -0,0 +1,26 @@ +--- +- name: Converge + hosts: all + gather_facts: false + + vars_files: + - vars.yaml + vars: + # Vars required by prepare_overcloud.yml + cifmw_libvirt_manager_images_path: /tmp/images + cifmw_libvirt_manager_image_name: centos-stream-9.qcow2 + cifmw_adoption_osp_deploy_repos: [] + cifmw_adoption_source_scenario_path: "." + cifmw_basedir: "{{ playbook_dir }}" + + tasks: + - name: Gather stack nodes and facts + ansible.builtin.include_tasks: ../../tasks/gather_stack_nodes.yml + loop: "{{ stacks }}" + loop_control: + loop_var: _stack + + - name: Store result for verification as persistent fact + ansible.builtin.set_fact: + tripleo_nodes_stack: "{{ _tripleo_nodes_stack }}" + cacheable: true diff --git a/roles/adoption_osp_deploy/molecule/default/molecule.yml b/roles/adoption_osp_deploy/molecule/default/molecule.yml new file mode 100644 index 0000000000..37f95426b3 --- /dev/null +++ b/roles/adoption_osp_deploy/molecule/default/molecule.yml @@ -0,0 +1,32 @@ +--- +dependency: + name: galaxy +driver: + name: podman +platforms: + - name: instance + image: registry.access.redhat.com/ubi9/ubi-init + dockerfile: containerfile + command: /sbin/init + pre_build_image: true +provisioner: + inventory: + hosts: + all: + hosts: + instance: + ansible_python_interpreter: /usr/bin/python3 + name: ansible + log: true + env: + ANSIBLE_STDOUT_CALLBACK: yaml +verifier: + name: ansible +scenario: + test_sequence: + - destroy + - create + - prepare + - converge + - verify + - destroy diff --git a/roles/adoption_osp_deploy/molecule/default/vars.yaml b/roles/adoption_osp_deploy/molecule/default/vars.yaml new file mode 100644 index 0000000000..7498e130b9 --- /dev/null +++ b/roles/adoption_osp_deploy/molecule/default/vars.yaml @@ -0,0 +1,62 @@ +stacks: + - stackname: overcloud + network_data_file: "network_data.yaml.j2" + vips_data_file: "vips_data_overcloud.yaml" + stack_nodes: + - osp-controllers + - stackname: cell1 + network_data_file: "network_data.yaml.j2" + vips_data_file: "vips_data_cell1.yaml" + stack_nodes: + - cell1-osp-computes + - cell1-osp-controllers + - stackname: cell2 + network_data_file: "network_data.yaml.j2" + vips_data_file: "vips_data_cell2.yaml" + stack_nodes: + - cell2-osp-computes + - cell2-osp-controllers + +expected_nodes: + overcloud: + - osp-controller-uni05epsilon-0 + cell1: + - cell1-osp-compute-uni05epsilon-0 + - cell1-osp-controller-uni05epsilon-0 + cell2: + - cell2-osp-compute-uni05epsilon-0 + - cell2-osp-controller-uni05epsilon-0 + +cifmw_adoption_osp_deploy_scenario: + hostname_groups_map: + cell1-osp-computes: cell1-novacompute + cell1-osp-controllers: cell1-controller + cell2-osp-computes: cell2-novacompute + cell2-osp-controllers: cell2-controller + osp-controllers: overcloud-controller + roles_groups_map: + # map ansible groups to tripleo Role names + osp-controllers: Controller + cell1-osp-controllers: CellController + cell2-osp-controllers: CellController + +# Vars to simulate playbook execution context +_vm_groups: + cell1-osp-computes: + - cell1-osp-compute-uni05epsilon-0 + cell1-osp-controllers: + - cell1-osp-controller-uni05epsilon-0 + cell2-osp-computes: + - cell2-osp-compute-uni05epsilon-0 + cell2-osp-controllers: + - cell2-osp-controller-uni05epsilon-0 + controllers: + - controller-0 + ocps: + - ocp-master-0 + - ocp-master-1 + - ocp-master-2 + osp-controllers: + - osp-controller-uni05epsilon-0 + osp-underclouds: + - osp-undercloud-uni05epsilon-0 diff --git a/roles/adoption_osp_deploy/molecule/default/verify.yml b/roles/adoption_osp_deploy/molecule/default/verify.yml new file mode 100644 index 0000000000..23756be169 --- /dev/null +++ b/roles/adoption_osp_deploy/molecule/default/verify.yml @@ -0,0 +1,23 @@ +--- +- name: Verify + hosts: all + gather_facts: false + vars_files: + - vars.yaml + tasks: + - name: "Set _tripleo_nodes_stack from persistent fact" + ansible.builtin.set_fact: + tripleo_nodes_stack: "{{ hostvars[inventory_hostname]._tripleo_nodes_stack }}" + when: hostvars[inventory_hostname]._tripleo_nodes_stack is defined + + - name: "Assert gathered nodes for stacks" + ansible.builtin.assert: + that: + - "tripleo_nodes_stack[_stack.stackname] is defined" + - "tripleo_nodes_stack[_stack.stackname] | type_debug == 'list'" + - "tripleo_nodes_stack[_stack.stackname] | sort == expected_nodes[_stack.stackname] | sort" + fail_msg: "Verification failed for gathered nodes in stack {{ _stack.stackname }}" + success_msg: "Successfully verified gathered nodes for stack {{ _stack.stackname }}" + loop: "{{ stacks }}" + loop_control: + loop_var: _stack diff --git a/roles/adoption_osp_deploy/tasks/gather_stack_nodes.yml b/roles/adoption_osp_deploy/tasks/gather_stack_nodes.yml new file mode 100644 index 0000000000..4926349fed --- /dev/null +++ b/roles/adoption_osp_deploy/tasks/gather_stack_nodes.yml @@ -0,0 +1,64 @@ +--- +- name: Get main facts for the overcloud stack + ansible.builtin.set_fact: + _overcloud_name: >- + {{ + _stack.stackname | + default('overcloud') + }} + _network_data_file: >- + {{ + [cifmw_adoption_source_scenario_path, + _stack.network_data_file + ] | path_join + }} + +- name: Gather other facts for stack {{ _overcloud_name }}" + ansible.builtin.set_fact: + _hostname_map_translation: >- + {{ + cifmw_adoption_osp_deploy_scenario.hostname_groups_map | + ansible.utils.keep_keys(target=_stack.stack_nodes) + }} + _role_map_translation: >- + {{ + cifmw_adoption_osp_deploy_scenario.roles_groups_map | + ansible.utils.keep_keys(target=_stack.stack_nodes) + }} + _network_data_file_dest: >- + {{ + [ansible_user_dir, + 'network_data_' ~ _overcloud_name ~'.yaml' + ] | path_join + }} + + _network_data_extension: "{{ _network_data_file | splitext | last }}" + _vips_data_file: >- + {{ + [cifmw_adoption_source_scenario_path, + _stack.vips_data_file + ] | path_join + }} + _vips_data_file_dest: >- + {{ + [ansible_user_dir, + 'vips_data_' ~ _overcloud_name ~ '.yaml' + ] | path_join + }} + _source_cmd: "source {{ ansible_user_dir }}/stackrc" + _network_provision_output: "network_provision_{{ _overcloud_name }}_out.yaml" + _vips_provision_output: "vips_provision_{{ _overcloud_name }}_out.yaml" + +- name: "Gather nodes for stack {{ _overcloud_name }}" + when: group.key is in _hostname_map_translation + vars: + tripleo_nodes_stack: "{{ _tripleo_nodes_stack | default({}) }}" + ansible.builtin.set_fact: + _tripleo_nodes_stack: >- + {{ + tripleo_nodes_stack | combine({ _overcloud_name: (tripleo_nodes_stack.get(_overcloud_name, []) + group.value) }) + }} + loop: "{{ _vm_groups | dict2items }}" + loop_control: + loop_var: group + label: "{{ group.key }}" diff --git a/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml b/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml index d984a71dc1..a6913bc271 100644 --- a/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml +++ b/roles/adoption_osp_deploy/tasks/generate_adoption_vars.yml @@ -38,6 +38,7 @@ {%- endif -%} _compute_1_net: "{{ cifmw_networking_env_definition.instances[_compute_1_name] | default({'networks': {'ctlplane': {'ip_v4': '192.168.122.100'}}}) }}" _stack_names: "{{ cifmw_adoption_osp_deploy_scenario.stacks | map(attribute='stackname') | list }}" + _default_cell_name: "{{ cifmw_adoption_osp_deploy_scenario.default_cell_name | default('cell1') }}" _compute_1_ip: "{{ _compute_1_net.networks.ctlplane[ip_version|default('ip_v4')] }}" ansible.builtin.template: src: "adoption_vars.yaml.j2" diff --git a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml index 8d4ec9ad32..d259b8c866 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_overcloud.yml @@ -22,63 +22,9 @@ - user_dir - name: Prepare enviornment for 17.1 overcloud deployment - vars: - _overcloud_name: >- - {{ - _stack.stackname | - default('overcloud') - }} - _hostname_map_translation: >- - {{ - cifmw_adoption_osp_deploy_scenario.hostname_groups_map | - ansible.utils.keep_keys(target=_stack.stack_nodes) - }} - _role_map_translation: >- - {{ - cifmw_adoption_osp_deploy_scenario.roles_groups_map | - ansible.utils.keep_keys(target=_stack.stack_nodes) - }} - _network_data_file: >- - {{ - [cifmw_adoption_source_scenario_path, - _stack.network_data_file - ] | path_join - }} - _network_data_file_dest: >- - {{ - [ansible_user_dir, - 'network_data_' ~ _overcloud_name ~'.yaml' - ] | path_join - }} - _network_data_extension: "{{ _network_data_file | splitext | last }}" - _vips_data_file: >- - {{ - [cifmw_adoption_source_scenario_path, - _stack.vips_data_file - ] | path_join - }} - _vips_data_file_dest: >- - {{ - [ansible_user_dir, - 'vips_data_' ~ _overcloud_name ~ '.yaml' - ] | path_join - }} - _source_cmd: "source {{ ansible_user_dir }}/stackrc" - _network_provision_output: "network_provision_{{ _overcloud_name }}_out.yaml" - _vips_provision_output: "vips_provision_{{ _overcloud_name }}_out.yaml" block: - - name: "Gather nodes for stack {{ _overcloud_name }}" - when: group.key is in _hostname_map_translation - ansible.builtin.set_fact: - _tripleo_nodes_stack: >- - {{ - (_tripleo_nodes_stack | default({})) | - combine({ _overcloud_name: (_tripleo_nodes_stack[_overcloud_name] | default([]) + group.value) }) - }} - loop: "{{ _vm_groups | dict2items }}" - loop_control: - loop_var: group - label: "{{ group.key }}" + - name: Gather stack nodes and facts + ansible.builtin.include_tasks: gather_stack_nodes.yml - name: Ensure overcloud vms are started vars: diff --git a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 index 40fd93f1f2..143104dff8 100644 --- a/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 +++ b/roles/adoption_osp_deploy/templates/adoption_vars.yaml.j2 @@ -26,8 +26,8 @@ edpm_networkers: | {% endfor %} {% endif %} -source_galera_members: {%+ if multi_cell|default(false) +%} +source_galera_members: {% for stack in _stack_names %} {% if stack == 'overcloud' %} {% set cell = 'default' %} @@ -44,6 +44,7 @@ source_galera_members: {% endfor %} {% endfor %} {%+ else +%} +source_galera_members: | {% for controller in _vm_groups['osp-controllers'] %} {% set node_nets = cifmw_networking_env_definition.instances[controller] %} ["{{ controller }}.{{ cifmw_adoption_osp_deploy_scenario.cloud_domain }}"]="{{ node_nets.networks.internalapi[ip_version|default('ip_v4')] }}" @@ -53,7 +54,7 @@ source_galera_members: edpm_nodes: {% for stack in _stack_names %} {% if stack == 'overcloud' %} - {% set cell = 'default' %} + {% set cell = _default_cell_name %} {% set prefix = '' %} {% else %} {% set cell = stack %} From 9e3bb08e57b448f219eb7ad09f65c3bcb91302ea Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Wed, 24 Sep 2025 13:05:24 +0200 Subject: [PATCH 366/480] [test_operator] Allow to read files from remote hosts Several tasks from the test_operator role only worked when the ansible playbook was run on localhost, because they used lookup file to read the file content. With this change, the file content is obtained with slurp, which allows to execute the playbook from a different host. OSPRH-20268 --- roles/test_operator/tasks/tempest-tests.yml | 12 ++++--- roles/test_operator/tasks/tobiko-tests.yml | 40 +++++++++++++++------ 2 files changed, 36 insertions(+), 16 deletions(-) diff --git a/roles/test_operator/tasks/tempest-tests.yml b/roles/test_operator/tasks/tempest-tests.yml index 70ea6c61d8..c1b4b7a6f8 100644 --- a/roles/test_operator/tasks/tempest-tests.yml +++ b/roles/test_operator/tasks/tempest-tests.yml @@ -92,6 +92,11 @@ - stage_vars_dict.cifmw_test_operator_tempest_ssh_key_secret_name is not defined - private_key_file.stat.exists block: + - name: Slurp cifmw private key file + ansible.builtin.slurp: + path: "{{ cifmw_test_operator_controller_priv_key_file_path }}" + register: private_key_file_content + - name: Ensure a secret for the cifmw private key file exists kubernetes.core.k8s: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" @@ -107,11 +112,8 @@ name: "{{ cifmw_test_operator_controller_priv_key_secret_name }}" namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" data: - ssh-privatekey: >- - {{ - lookup('file', cifmw_test_operator_controller_priv_key_file_path, rstrip=False) | - b64encode - }} + # b64decode not needed because the text has to be encoded + ssh-privatekey: "{{ private_key_file_content.content }}" - name: Add SSHKeySecretName section to Tempest CR ansible.builtin.set_fact: diff --git a/roles/test_operator/tasks/tobiko-tests.yml b/roles/test_operator/tasks/tobiko-tests.yml index ea389404d8..eced2cf816 100644 --- a/roles/test_operator/tasks/tobiko-tests.yml +++ b/roles/test_operator/tasks/tobiko-tests.yml @@ -26,15 +26,19 @@ loop_control: loop_var: tobikoconf_section +- name: Slurp tobiko.conf + ansible.builtin.slurp: + path: "{{ cifmw_test_operator_artifacts_basedir }}/tobiko.conf" + register: tobikoconf_content + - name: Add config section to tobiko CR + vars: + tobikoconf_content_decoded: "{{ tobikoconf_content.content | b64decode }}" ansible.builtin.set_fact: test_operator_cr: >- {{ test_operator_cr | - combine({'spec': {'config': - lookup('file', - cifmw_test_operator_artifacts_basedir + '/tobiko.conf') - }}, recursive=true) + combine({'spec': {'config': tobikoconf_content_decoded}}, recursive=true) }} - name: Add ssh keys used for the VMs that tobiko creates to tobiko CR @@ -51,22 +55,30 @@ size: "{{ stage_vars_dict.cifmw_test_operator_tobiko_ssh_keysize }}" when: not check_ssh_key.stat.exists + - name: Slurp key files + vars: + keyfilename: "id_{{ stage_vars_dict.cifmw_test_operator_tobiko_ssh_keytype }}{{ '.pub' if item == 'public' else '' }}" + ansible.builtin.slurp: + path: "{{ cifmw_test_operator_artifacts_basedir }}/{{ keyfilename }}" + register: key_file_content + loop: + - private + - public + - name: Add private and public keys to tobiko CR vars: keyname: "{{ item }}Key" - keyfilename: "id_{{ stage_vars_dict.cifmw_test_operator_tobiko_ssh_keytype }}{{ '.pub' if item == 'public' else '' }}" ansible.builtin.set_fact: test_operator_cr: >- {{ test_operator_cr | - combine({'spec': {keyname: - lookup('file', - cifmw_test_operator_artifacts_basedir + '/' + keyfilename) - }}, recursive=true) + combine({'spec': {keyname: key_file_content.results[idx].content | b64decode}}, recursive=true) }} - with_items: + loop: - private - public + loop_control: + index_var: idx - name: Add preventCreate if it is defined ansible.builtin.set_fact: @@ -88,6 +100,11 @@ }} when: stage_vars_dict.cifmw_test_operator_tobiko_num_processes is not none +- name: Slurp kubeconfig file + ansible.builtin.slurp: + path: "{{ cifmw_openshift_kubeconfig }}" + register: kubeconfig_file_content + - name: Ensure a secret for the kubeconfig file exists kubernetes.core.k8s: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" @@ -103,5 +120,6 @@ name: "{{ stage_vars_dict.cifmw_test_operator_tobiko_kubeconfig_secret }}" namespace: "{{ stage_vars_dict.cifmw_test_operator_namespace }}" data: - config: "{{ lookup('file', cifmw_openshift_kubeconfig) | b64encode }}" + # b64decode not needed because the text has to be encoded + config: "{{ kubeconfig_file_content.content }}" when: not cifmw_test_operator_dry_run | bool From d2e1f181877f9a358e3594b577172e010d065780 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 2 Sep 2025 10:46:38 +0200 Subject: [PATCH 367/480] Drop nested ansible execution - e2e-collect-logs We want to drop nested Ansible execution where it is possible. It is wrong, to execute sometimes 4 or 5 time nested Ansible. It makes debugging difficult, raise more complication and sometimes variable overwrite is just problematic. Depends-On: https://github.com/openstack-k8s-operators/ci-framework/pull/3235 Depends-On: https://github.com/openstack-k8s-operators/ci-framework/pull/3287 More: OSPRH-20006 Signed-off-by: Daniel Pawlik --- ci/playbooks/e2e-collect-logs.yml | 29 ++++++++++++---------- ci/playbooks/edpm/run.yml | 7 ++++++ roles/cifmw_setup/tasks/run_logs.yml | 36 +++++++++++++++------------- 3 files changed, 42 insertions(+), 30 deletions(-) diff --git a/ci/playbooks/e2e-collect-logs.yml b/ci/playbooks/e2e-collect-logs.yml index 3932feb899..1760900b4f 100644 --- a/ci/playbooks/e2e-collect-logs.yml +++ b/ci/playbooks/e2e-collect-logs.yml @@ -20,22 +20,25 @@ - not cifmw_status.stat.exists ansible.builtin.meta: end_host -- name: Run log collection when zuul_log_collection - hosts: "{{ cifmw_target_host | default(cifmw_zuul_target_host) | default('controller') }}" - gather_facts: true - tasks: - - name: Run run_logs tasks from cifmw_setup - ansible.builtin.command: > - ansible localhost - -m include_role - -a "name=cifmw_setup tasks_from=run_logs.yml" - -e "@scenarios/centos-9/base.yml" - args: - chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" + - name: Read base centos-9 scenarios + vars: + provided_file: > + {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ + ci-framework/scenarios/centos-9/base.yml + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: var_file.yml + + - name: Run log collection + ansible.builtin.import_role: + name: cifmw_setup + tasks_from: run_logs.yml + tags: + - logs environment: ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/ci-framework-data/logs/e2e-collect-logs-must-gather.log" -- name: "Run ci/playbooks/collect-logs.yml on CRC host" +- name: "Run ci/playbooks/e2e-collect-logs.yml on CRC host" hosts: crc gather_facts: false tasks: diff --git a/ci/playbooks/edpm/run.yml b/ci/playbooks/edpm/run.yml index 028bbd4755..e49364271d 100644 --- a/ci/playbooks/edpm/run.yml +++ b/ci/playbooks/edpm/run.yml @@ -15,6 +15,13 @@ path: "{{ ansible_user_dir }}/ci-framework-data/artifacts/edpm-ansible.yml" register: edpm_file + ### + # Make cifmw general plugins available when nested Ansible executed + - name: Make a symlink to local .ansible collection dir + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: symlink_cifmw_collection.yml + - name: Run Podified EDPM deployment ansible.builtin.command: chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" diff --git a/roles/cifmw_setup/tasks/run_logs.yml b/roles/cifmw_setup/tasks/run_logs.yml index 8240ee7627..b8e5992112 100644 --- a/roles/cifmw_setup/tasks/run_logs.yml +++ b/roles/cifmw_setup/tasks/run_logs.yml @@ -6,23 +6,25 @@ - name: Try to load parameters files block: - - name: Check directory availability - register: param_dir - ansible.builtin.stat: - path: "{{ cifmw_basedir }}/artifacts/parameters" - - - name: Load parameters files - when: - - param_dir.stat.exists | bool - ansible.builtin.include_vars: - dir: "{{ cifmw_basedir }}/artifacts/parameters" - always: - - name: Set custom cifmw PATH reusable fact - when: - - cifmw_path is not defined - ansible.builtin.set_fact: - cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" - cacheable: true + - name: Try to load parameters files + block: + # NOTE: We should not check, if the parameters dir exists + # on remote host, due in later stage, we use "include_vars", + # which is reading variable ONLY on localhost. + # Ensure, that the directory exists on localhost before continue. + - name: Read artifacts parameters dir and set as facts + vars: + provided_dir: "{{ cifmw_basedir }}/artifacts/parameters" + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: var_dir.yml + always: + - name: Set custom cifmw PATH reusable fact + when: + - cifmw_path is not defined + ansible.builtin.set_fact: + cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" + cacheable: true - name: Set destination folder for the logs ansible.builtin.set_fact: From a7af7d8b8b091b923196d15228745012357f4b6e Mon Sep 17 00:00:00 2001 From: Jenkins Date: Tue, 16 Sep 2025 17:13:17 +0200 Subject: [PATCH 368/480] Fix: search openstack-operator-index in all namespaces Currently it is looking for it in openstack-operator-index, but it is configured by default in openshift-marketplace. Task will look for it in any namespace --- roles/env_op_images/tasks/main.yml | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/roles/env_op_images/tasks/main.yml b/roles/env_op_images/tasks/main.yml index 39a6e55d55..f57acfb937 100644 --- a/roles/env_op_images/tasks/main.yml +++ b/roles/env_op_images/tasks/main.yml @@ -77,6 +77,24 @@ ansible.builtin.set_fact: cifmw_openstack_service_images_content: "{{ _sa_images_content.stdout | from_json }}" + - name: Get all pods from all namespaces to find openstack-operator-index + kubernetes.core.k8s_info: + kind: Pod + api_version: v1 + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit)}}" + context: "{{ cifmw_openshift_context | default(omit)}}" + field_selectors: + - status.phase=Running + register: all_pods_list + + - name: Retrieve openstack-operator-index pod + vars: + selected_pod: "{{ all_pods_list.resources | selectattr('metadata.generateName', 'defined') | selectattr('metadata.generateName', 'search', 'openstack-operator-index-') | list | first }}" + ansible.builtin.set_fact: + cifmw_install_yamls_vars_content: + OPENSTACK_IMG: "{{ selected_pod.status.containerStatuses[0].imageID }}" + - name: Get all the pods in openstack-operator namespace vars: csv_items: "{{ (_csvs_out.stdout | from_yaml)['items'] }}" @@ -94,13 +112,6 @@ - status.phase=Running register: pod_list - - name: Retrieve openstack-operator-index pod - vars: - selected_pod: "{{ pod_list.resources| selectattr('metadata.generateName', 'equalto', 'openstack-operator-index-') | list | first }}" - ansible.builtin.set_fact: - cifmw_install_yamls_vars_content: - OPENSTACK_IMG: "{{ selected_pod.status.containerStatuses[0].imageID }}" - - name: Get operator images and pods when: not cifmw_env_op_images_dryrun | bool vars: From 027944b083b346c8667b956a681d8721edf21623 Mon Sep 17 00:00:00 2001 From: Jeremy Agee Date: Mon, 22 Sep 2025 14:49:53 -0400 Subject: [PATCH 369/480] Add support for multiple IdP providers in Federation This patch will setup two realms in keycloak. Keystone will be configured to work with these two realms as different IdPs. Each realm will get its own mapping in openstack. It will also enable these two IdP choices to the horizon UI. Jira: https://issues.redhat.com/browse/OSPRH-14033 --- docs/dictionary/en-custom.txt | 3 + .../federation-controlplane-config.yml | 116 +------------ ...federation-horizon-controlplane-config.yml | 48 +----- ...eration-multirealm-controlplane-config.yml | 18 ++ hooks/playbooks/federation-post-deploy.yml | 21 +-- hooks/playbooks/federation-pre-deploy.yml | 21 +-- roles/federation/defaults/main.yml | 138 ++++++++++++++- .../tasks/hook_controlplane_config.yml | 85 ++++++++++ .../hook_horizon_controlplane_config.yml | 60 +++++++ .../hook_multirealm_controlplane_config.yml | 158 ++++++++++++++++++ roles/federation/tasks/hook_post_deploy.yml | 80 +++++++++ roles/federation/tasks/hook_pre_deploy.yml | 83 +++++++++ .../tasks/run_keycloak_realm_setup.yml | 9 +- .../tasks/run_openstack_auth_setup.yml | 77 +++++++++ .../tasks/run_openstack_auth_test.yml | 67 +------- .../federation/tasks/run_openstack_setup.yml | 12 +- .../templates/federation-multirealm.conf.j2 | 40 +++++ .../templates/federation-single.conf.j2 | 30 ++++ roles/federation/templates/kctestuser1.j2 | 6 +- roles/federation/templates/kctestuser2.j2 | 17 ++ roles/federation/templates/rules.json.j2 | 2 +- 21 files changed, 834 insertions(+), 257 deletions(-) create mode 100644 hooks/playbooks/federation-multirealm-controlplane-config.yml create mode 100644 roles/federation/tasks/hook_controlplane_config.yml create mode 100644 roles/federation/tasks/hook_horizon_controlplane_config.yml create mode 100644 roles/federation/tasks/hook_multirealm_controlplane_config.yml create mode 100644 roles/federation/tasks/hook_post_deploy.yml create mode 100644 roles/federation/tasks/hook_pre_deploy.yml create mode 100644 roles/federation/tasks/run_openstack_auth_setup.yml create mode 100644 roles/federation/templates/federation-multirealm.conf.j2 create mode 100644 roles/federation/templates/federation-single.conf.j2 create mode 100644 roles/federation/templates/kctestuser2.j2 diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 70e842a220..547e6b3346 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -389,6 +389,7 @@ nwy nzgdh oauth observability +oidc oc ocp ocpbm @@ -399,6 +400,7 @@ ol olm oob opendev +openid openrc openscap openshift @@ -617,6 +619,7 @@ vvvv vxlan vynxgdagahaac vzcg +websso wget whitebox wljewmdozmzawlzasdje diff --git a/hooks/playbooks/federation-controlplane-config.yml b/hooks/playbooks/federation-controlplane-config.yml index afaad2c767..002c1b5087 100644 --- a/hooks/playbooks/federation-controlplane-config.yml +++ b/hooks/playbooks/federation-controlplane-config.yml @@ -2,117 +2,17 @@ - name: Create kustomization to update Keystone to use Federation hosts: "{{ cifmw_target_hook_host | default('localhost') }}" tasks: - - name: Set urls for install type uni + - name: Set uni domain name var from federation role ansible.builtin.set_fact: - cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps.ocp.openstack.lab' - cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps.ocp.openstack.lab' - cifmw_federation_horizon_url: 'https://horizon-openstack.apps.ocp.openstack.lab' + cifmw_federation_domain: "apps.ocp.openstack.lab" when: cifmw_federation_deploy_type == "uni" - - name: Set urls for install type crc + - name: Set crc domain name var from federation role ansible.builtin.set_fact: - cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps-crc.testing' - cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps-crc.testing' - cifmw_federation_horizon_url: 'https://horizon-openstack.apps-crc.testing' + cifmw_federation_domain: "apps-crc.testing" when: cifmw_federation_deploy_type == "crc" - - name: Create file to customize keystone for Federation resources deployed in the control plane - ansible.builtin.copy: - dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/keystone_federation.yaml" - content: |- - apiVersion: kustomize.config.k8s.io/v1beta1 - kind: Kustomization - resources: - - namespace: {{ namespace }} - patches: - - target: - kind: OpenStackControlPlane - name: .* - patch: |- - - op: add - path: /spec/tls - value: {} - - op: add - path: /spec/tls/caBundleSecretName - value: keycloakca - - op: add - path: /spec/keystone/template/httpdCustomization - value: - customConfigSecret: keystone-httpd-override - - op: add - path: /spec/keystone/template/customServiceConfig - value: | - [DEFAULT] - insecure_debug=true - debug=true - [federation] - trusted_dashboard={{ cifmw_federation_horizon_url }}/dashboard/auth/websso/ - sso_callback_template=/etc/keystone/sso_callback_template.html - [openid] - remote_id_attribute=HTTP_OIDC_ISS - [auth] - methods = password,token,oauth1,mapped,application_credential,openid - mode: "0644" - - - name: Get ingress operator CA cert - ansible.builtin.slurp: - src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" - register: federation_sso_ca - - - name: Add Keycloak CA secret - kubernetes.core.k8s: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - state: present - definition: - apiVersion: v1 - kind: Secret - type: Opaque - metadata: - name: keycloakca - namespace: "openstack" - data: - KeyCloakCA: "{{ federation_sso_ca.content }}" - - - name: Create Keystone httpd override secret for Federation - kubernetes.core.k8s: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - state: present - definition: - apiVersion: v1 - kind: Secret - metadata: - name: keystone-httpd-override - namespace: openstack - type: Opaque - stringData: - federation.conf: | - OIDCClaimPrefix "{{ cifmw_keystone_OIDC_ClaimPrefix }}" - OIDCResponseType "{{ cifmw_keystone_OIDC_ResponseType }}" - OIDCScope "{{ cifmw_keystone_OIDC_Scope }}" - OIDCClaimDelimiter "{{ cifmw_keystone_OIDC_ClaimDelimiter }}" - OIDCPassUserInfoAs "{{ cifmw_keystone_OIDC_PassUserInfoAs }}" - OIDCPassClaimsAs "{{ cifmw_keystone_OIDC_PassClaimsAs }}" - OIDCProviderMetadataURL "{{ cifmw_keystone_OIDC_ProviderMetadataURL }}" - OIDCClientID "{{ cifmw_keystone_OIDC_ClientID }}" - OIDCClientSecret "{{ cifmw_keystone_OIDC_ClientSecret }}" - OIDCCryptoPassphrase "{{ cifmw_keystone_OIDC_CryptoPassphrase }}" - OIDCOAuthClientID "{{ cifmw_keystone_OIDC_OAuthClientID }}" - OIDCOAuthClientSecret "{{ cifmw_keystone_OIDC_OAuthClientSecret }}" - OIDCOAuthIntrospectionEndpoint "{{ cifmw_keystone_OIDC_OAuthIntrospectionEndpoint }}" - OIDCRedirectURI "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/identity_providers/{{ cifmw_keystone_OIDC_provider_name }}/protocols/openid/websso/" - LogLevel debug - - - AuthType "openid-connect" - Require valid-user - - - - AuthType oauth20 - Require valid-user - - - - AuthType "openid-connect" - Require valid-user - + - name: Run SSO controlplane setup + ansible.builtin.import_role: + name: federation + tasks_from: hook_controlplane_config.yml diff --git a/hooks/playbooks/federation-horizon-controlplane-config.yml b/hooks/playbooks/federation-horizon-controlplane-config.yml index f363fb21e2..0731e5e69d 100644 --- a/hooks/playbooks/federation-horizon-controlplane-config.yml +++ b/hooks/playbooks/federation-horizon-controlplane-config.yml @@ -2,49 +2,17 @@ - name: Create kustomization to update Horizon to use Federation hosts: "{{ cifmw_target_hook_host | default('localhost') }}" tasks: - - name: Set urls for install type uni + - name: Read uni vars from federation role ansible.builtin.set_fact: - cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps.ocp.openstack.lab' - cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps.ocp.openstack.lab' - cifmw_federation_horizon_url: 'https://horizon-openstack.apps.ocp.openstack.lab' + cifmw_federation_domain: "apps.ocp.openstack.lab" when: cifmw_federation_deploy_type == "uni" - - name: Set urls for install type crc + - name: Read crc vars from federation role ansible.builtin.set_fact: - cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps-crc.testing' - cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps-crc.testing' - cifmw_federation_horizon_url: 'https://horizon-openstack.apps-crc.testing' + cifmw_federation_domain: "apps-crc.testing" when: cifmw_federation_deploy_type == "crc" - - name: Create file to customize horizon for Federation resources deployed in the control plane - ansible.builtin.copy: - dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/horizon_federation.yaml" - mode: preserve - content: |- - apiVersion: kustomize.config.k8s.io/v1beta1 - kind: Kustomization - resources: - - namespace: {{ namespace }} - patches: - - target: - kind: OpenStackControlPlane - name: .* - patch: |- - - op: add - path: /spec/horizon/enabled - value: true - - op: add - path: /spec/horizon/template/memcachedInstance - value: memcached - - op: add - path: /spec/horizon/template/customServiceConfig - value: | - OPENSTACK_KEYSTONE_URL = "{{ cifmw_federation_keystone_url }}/v3" - WEBSSO_ENABLED = True - WEBSSO_CHOICES = ( - ("credentials", _("Keystone Credentials")), - ("OIDC", _("OpenID Connect")), - ) - WEBSSO_IDP_MAPPING = { - "OIDC": ("{{ cifmw_keystone_OIDC_provider_name }}", "openid"), - } + - name: Run SSO MultiRealm controlplane setup + ansible.builtin.import_role: + name: federation + tasks_from: hook_horizon_controlplane_config.yml diff --git a/hooks/playbooks/federation-multirealm-controlplane-config.yml b/hooks/playbooks/federation-multirealm-controlplane-config.yml new file mode 100644 index 0000000000..42bd597893 --- /dev/null +++ b/hooks/playbooks/federation-multirealm-controlplane-config.yml @@ -0,0 +1,18 @@ +--- +- name: Create kustomization to update Keystone to use MultiRealm Federation + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + tasks: + - name: Set uni domain name var from federation role + ansible.builtin.set_fact: + cifmw_federation_domain: "apps.ocp.openstack.lab" + when: cifmw_federation_deploy_type == "uni" + + - name: Set crc domain name var from federation role + ansible.builtin.set_fact: + cifmw_federation_domain: "apps-crc.testing" + when: cifmw_federation_deploy_type == "crc" + + - name: Run SSO MultiRealm controlplane setup + ansible.builtin.import_role: + name: federation + tasks_from: hook_multirealm_controlplane_config.yml diff --git a/hooks/playbooks/federation-post-deploy.yml b/hooks/playbooks/federation-post-deploy.yml index bcd45e7754..c56a0207e7 100644 --- a/hooks/playbooks/federation-post-deploy.yml +++ b/hooks/playbooks/federation-post-deploy.yml @@ -18,26 +18,17 @@ hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true tasks: - - name: Set urls for install type uni + - name: Set uni domain name var from federation role ansible.builtin.set_fact: - cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps.ocp.openstack.lab' - cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps.ocp.openstack.lab' - cifmw_federation_horizon_url: 'https://horizon-openstack.apps.ocp.openstack.lab' + cifmw_federation_domain: "apps.ocp.openstack.lab" when: cifmw_federation_deploy_type == "uni" - - name: Set urls for install type crc + - name: Set crc domain name var from federation role ansible.builtin.set_fact: - cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps-crc.testing' - cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps-crc.testing' - cifmw_federation_horizon_url: 'https://horizon-openstack.apps-crc.testing' + cifmw_federation_domain: "apps-crc.testing" when: cifmw_federation_deploy_type == "crc" - - name: Run federation setup on OSP + - name: Run federation post hook setup on OSP ansible.builtin.import_role: name: federation - tasks_from: run_openstack_setup.yml - - - name: Run federation OSP User Auth test - ansible.builtin.import_role: - name: federation - tasks_from: run_openstack_auth_test.yml + tasks_from: hook_post_deploy.yml diff --git a/hooks/playbooks/federation-pre-deploy.yml b/hooks/playbooks/federation-pre-deploy.yml index 791c48624c..43693fcb62 100644 --- a/hooks/playbooks/federation-pre-deploy.yml +++ b/hooks/playbooks/federation-pre-deploy.yml @@ -18,26 +18,17 @@ hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true tasks: - - name: Set urls for install type uni + - name: Set uni domain name var from federation role ansible.builtin.set_fact: - cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps.ocp.openstack.lab' - cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps.ocp.openstack.lab' - cifmw_federation_horizon_url: 'https://horizon-openstack.apps.ocp.openstack.lab' + cifmw_federation_domain: "apps.ocp.openstack.lab" when: cifmw_federation_deploy_type == "uni" - - name: Set urls for install type crc + - name: Set crc domain name var from federation role ansible.builtin.set_fact: - cifmw_federation_keycloak_url: 'https://keycloak-openstack.apps-crc.testing' - cifmw_federation_keystone_url: 'https://keystone-public-openstack.apps-crc.testing' - cifmw_federation_horizon_url: 'https://horizon-openstack.apps-crc.testing' + cifmw_federation_domain: "apps-crc.testing" when: cifmw_federation_deploy_type == "crc" - - name: Run SSO pod setup on Openshift + - name: Run SSO pre deploy setup ansible.builtin.import_role: name: federation - tasks_from: run_keycloak_setup.yml - - - name: Run SSO realm setup for OSP - ansible.builtin.import_role: - name: federation - tasks_from: run_keycloak_realm_setup.yml + tasks_from: hook_pre_deploy.yml diff --git a/roles/federation/defaults/main.yml b/roles/federation/defaults/main.yml index 44a835be2a..acab89258d 100644 --- a/roles/federation/defaults/main.yml +++ b/roles/federation/defaults/main.yml @@ -1,25 +1,149 @@ --- -# defaults file for federation +# ============================================================================= +# CI Framework - Federation Role Default Variables +# ============================================================================= +# This file contains all default variables for the federation role, which +# configures OpenStack Keystone federation with Keycloak (Red Hat SSO). # + +# ============================================================================= +# INFRASTRUCTURE CONFIGURATION +# ============================================================================= +# Basic namespace and domain settings for the federation deployment + +# Kubernetes namespaces cifmw_federation_keycloak_namespace: openstack +cifmw_federation_run_osp_cmd_namespace: openstack + +# Service URLs - dynamically constructed based on domain +cifmw_federation_keycloak_url: 'https://keycloak-{{ cifmw_federation_keycloak_namespace }}.{{ cifmw_federation_domain }}' +cifmw_federation_keystone_url: 'https://keystone-public-{{ cifmw_federation_run_osp_cmd_namespace }}.{{ cifmw_federation_domain }}' +cifmw_federation_horizon_url: 'https://horizon-{{ cifmw_federation_run_osp_cmd_namespace }}.{{ cifmw_federation_domain }}' + +# ============================================================================= +# KEYCLOAK REALM CONFIGURATION +# ============================================================================= +# Keycloak realm names and administrative credentials + +# Realm names cifmw_federation_keycloak_realm: openstack +cifmw_federation_keycloak_realm2: openstack2 + +# Keycloak admin credentials cifmw_federation_keycloak_admin_username: admin cifmw_federation_keycloak_admin_password: nomoresecrets + +# URL validation settings +cifmw_federation_keycloak_url_validate_certs: false + +# Deploy one realm by default. Add true to job vars for multirealm deploys. +cifmw_federation_deploy_multirealm: false + +# ============================================================================= +# KEYCLOAK TEST USERS AND GROUPS - REALM 1 +# ============================================================================= +# Test users and groups for the first Keycloak realm + cifmw_federation_keycloak_testuser1_username: kctestuser1 cifmw_federation_keycloak_testuser1_password: nomoresecrets1 cifmw_federation_keycloak_testuser2_username: kctestuser2 cifmw_federation_keycloak_testuser2_password: nomoresecrets2 cifmw_federation_keycloak_testgroup1_name: kctestgroup1 cifmw_federation_keycloak_testgroup2_name: kctestgroup2 -cifmw_federation_keycloak_client_id: rhoso -cifmw_federation_keycloak_client_secret: COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f -cifmw_federation_keycloak_url_validate_certs: false -cifmw_federation_run_osp_cmd_namespace: openstack -cifmw_federation_domain: SSO + +# ============================================================================= +# KEYCLOAK TEST USERS AND GROUPS - REALM 2 (MULTIREALM) +# ============================================================================= +# Test users and groups for the second Keycloak realm (multirealm deployments) + +cifmw_federation_keycloak_testuser3_username: kctestuser3 +cifmw_federation_keycloak_testuser3_password: nomoresecrets3 +cifmw_federation_keycloak_testuser4_username: kctestuser4 +cifmw_federation_keycloak_testuser4_password: nomoresecrets4 +cifmw_federation_keycloak_testgroup3_name: kctestgroup3 +cifmw_federation_keycloak_testgroup4_name: kctestgroup4 + +# ============================================================================= +# OPENSTACK KEYSTONE INTEGRATION - REALM 1 +# ============================================================================= +# Identity Provider and domain configuration for the first realm + +# Identity Provider settings cifmw_federation_IdpName: kcIDP +cifmw_federation_keystone_domain: SSO cifmw_federation_remote_id: '{{ cifmw_federation_keycloak_url }}/auth/realms/{{ cifmw_federation_keycloak_realm }}' + +# Keystone mapping and project configuration +cifmw_federation_mapping_name: SSOmap cifmw_federation_project_name: SSOproject cifmw_federation_group_name: SSOgroup -cifmw_federation_mapping_name: SSOmap cifmw_federation_rules_file: rules.json cifmw_federation_clame_id: OIDC-preferred_username + +# ============================================================================= +# OPENSTACK KEYSTONE INTEGRATION - REALM 2 (MULTIREALM) +# ============================================================================= +# Identity Provider and domain configuration for the second realm + +# Identity Provider settings +cifmw_federation_IdpName2: kcIDP2 +cifmw_federation_keystone_domain2: SSO2 +cifmw_federation_remote_id2: '{{ cifmw_federation_keycloak_url }}/auth/realms/{{ cifmw_federation_keycloak_realm2 }}' + +# Keystone mapping and project configuration +cifmw_federation_mapping_name2: SSOmap2 +cifmw_federation_project_name2: SSOproject2 +cifmw_federation_group_name2: SSOgroup2 + +# ============================================================================= +# OIDC CONFIGURATION FOR KEYSTONE +# ============================================================================= +# OpenID Connect settings for Apache mod_auth_openidc in Keystone + +# OIDC Protocol settings +cifmw_federation_keystone_OIDC_ClaimDelimiter: ";" +cifmw_federation_keystone_OIDC_ClaimPrefix: "OIDC-" +cifmw_federation_keystone_OIDC_PassClaimsAs: "both" +cifmw_federation_keystone_OIDC_PassUserInfoAs: "claims" +cifmw_federation_keystone_OIDC_ResponseType: "id_token" +cifmw_federation_keystone_OIDC_Scope: "openid email profile" +cifmw_federation_keystone_OIDC_CryptoPassphrase: "openstack" + +# OIDC Provider URLs +cifmw_federation_keystone_OIDC_ProviderMetadataURL: "{{ cifmw_federation_keycloak_url }}/auth/realms/{{ cifmw_federation_keycloak_realm }}/.well-known/openid-configuration" +cifmw_federation_keystone_OIDC_ProviderMetadataURL2: "{{ cifmw_federation_keycloak_url }}/auth/realms/{{ cifmw_federation_keycloak_realm2 }}/.well-known/openid-configuration" +cifmw_federation_keystone_OIDC_OAuthIntrospectionEndpoint: "{{ cifmw_federation_keycloak_url }}/auth/realms/{{ cifmw_federation_keycloak_realm }}/protocol/openid-connect/token/introspect" + +# ============================================================================= +# OIDC CLIENT CREDENTIALS - REALM 1 +# ============================================================================= +# OIDC client credentials for the first realm + +cifmw_federation_keystone_OIDC_ClientID: "rhoso" +cifmw_federation_keystone_OIDC_ClientSecret: "COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f" + +# ============================================================================= +# OIDC CLIENT CREDENTIALS - REALM 2 (MULTIREALM) +# ============================================================================= +# OIDC client credentials for the second realm + +cifmw_federation_keystone_OIDC_ClientID2: "rhoso2" +cifmw_federation_keystone_OIDC_ClientSecret2: "U0nM9j2qyDp1Qc3uytXleJrFI1SntJWF" + +# ============================================================================= +# KEYSTONE FEDERATION METADATA FILES - REALM 1 +# ============================================================================= +# File names for Keystone federation metadata configuration (URL encoded) + +cifmw_federation_keystone_idp1_conf_filename: "keycloak-{{ cifmw_federation_keycloak_namespace }}.{{ cifmw_federation_domain }}%2Fauth%2Frealms%2F{{ cifmw_federation_keycloak_realm }}.conf" +cifmw_federation_keystone_idp1_client_filename: "keycloak-{{ cifmw_federation_keycloak_namespace }}.{{ cifmw_federation_domain }}%2Fauth%2Frealms%2F{{ cifmw_federation_keycloak_realm }}.client" +cifmw_federation_keystone_idp1_provider_filename: "keycloak-{{ cifmw_federation_keycloak_namespace }}.{{ cifmw_federation_domain }}%2Fauth%2Frealms%2F{{ cifmw_federation_keycloak_realm }}.provider" + +# ============================================================================= +# KEYSTONE FEDERATION METADATA FILES - REALM 2 (MULTIREALM) +# ============================================================================= +# File names for Keystone federation metadata configuration for second realm + +cifmw_federation_keystone_idp2_conf_filename: "keycloak-{{ cifmw_federation_keycloak_namespace }}.{{ cifmw_federation_domain }}%2Fauth%2Frealms%2F{{ cifmw_federation_keycloak_realm2 }}.conf" +cifmw_federation_keystone_idp2_client_filename: "keycloak-{{ cifmw_federation_keycloak_namespace }}.{{ cifmw_federation_domain }}%2Fauth%2Frealms%2F{{ cifmw_federation_keycloak_realm2 }}.client" +cifmw_federation_keystone_idp2_provider_filename: "keycloak-{{ cifmw_federation_keycloak_namespace }}.{{ cifmw_federation_domain }}%2Fauth%2Frealms%2F{{ cifmw_federation_keycloak_realm2 }}.provider" diff --git a/roles/federation/tasks/hook_controlplane_config.yml b/roles/federation/tasks/hook_controlplane_config.yml new file mode 100644 index 0000000000..c1974c1e50 --- /dev/null +++ b/roles/federation/tasks/hook_controlplane_config.yml @@ -0,0 +1,85 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Create file to customize keystone for Federation resources deployed in the control plane + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/keystone_federation.yaml" + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - namespace: {{ cifmw_federation_run_osp_cmd_namespace }} + patches: + - target: + kind: OpenStackControlPlane + name: .* + patch: |- + - op: add + path: /spec/tls + value: {} + - op: add + path: /spec/tls/caBundleSecretName + value: keycloakca + - op: add + path: /spec/keystone/template/httpdCustomization + value: + customConfigSecret: keystone-httpd-override + - op: add + path: /spec/keystone/template/customServiceConfig + value: | + [DEFAULT] + insecure_debug=true + debug=true + [federation] + trusted_dashboard={{ cifmw_federation_horizon_url }}/dashboard/auth/websso/ + [openid] + remote_id_attribute=HTTP_OIDC_ISS + [auth] + methods = password,token,oauth1,mapped,application_credential,openid + mode: "0644" + +- name: Get ingress operator CA cert + ansible.builtin.slurp: + src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" + register: federation_sso_ca + +- name: Add Keycloak CA secret + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: present + definition: + apiVersion: v1 + kind: Secret + type: Opaque + metadata: + name: keycloakca + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + data: + KeyCloakCA: "{{ federation_sso_ca.content }}" + +- name: Create Keystone httpd override secret for Federation + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: keystone-httpd-override + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + type: Opaque + stringData: + federation.conf: "{{ lookup('template', 'federation-single.conf.j2') }}" diff --git a/roles/federation/tasks/hook_horizon_controlplane_config.yml b/roles/federation/tasks/hook_horizon_controlplane_config.yml new file mode 100644 index 0000000000..43b42d3668 --- /dev/null +++ b/roles/federation/tasks/hook_horizon_controlplane_config.yml @@ -0,0 +1,60 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Set websso settings for single IdP + ansible.builtin.set_fact: + cifmw_federation_websso_choices: '("OIDC", _("OpenID Connect")),' + cifmw_federation_websso_idp_mapping: '"OIDC": ("{{ cifmw_federation_IdpName }}", "openid"),' + when: cifmw_federation_deploy_multirealm is false + +- name: Set websso settings for multiple IdP + ansible.builtin.set_fact: + cifmw_federation_websso_choices: '("OIDC1", _("OpenID Connect IdP1")),("OIDC2", _("OpenID Connect IdP2")),' + cifmw_federation_websso_idp_mapping: '"OIDC1": ("{{ cifmw_federation_IdpName }}", "openid"),"OIDC2": ("{{ cifmw_federation_IdpName2 }}", "openid"),' + when: cifmw_federation_deploy_multirealm is true + +- name: Create file to customize horizon for Federation resources deployed in the control plane + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/horizon_federation.yaml" + mode: preserve + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - namespace: {{ namespace }} + patches: + - target: + kind: OpenStackControlPlane + name: .* + patch: |- + - op: add + path: /spec/horizon/enabled + value: true + - op: add + path: /spec/horizon/template/memcachedInstance + value: memcached + - op: add + path: /spec/horizon/template/customServiceConfig + value: | + OPENSTACK_KEYSTONE_URL = "{{ cifmw_federation_keystone_url }}/v3" + WEBSSO_ENABLED = True + WEBSSO_CHOICES = ( + ("credentials", _("Keystone Credentials")), + {{ cifmw_federation_websso_choices }} + ) + WEBSSO_IDP_MAPPING = { + {{ cifmw_federation_websso_idp_mapping }} + } diff --git a/roles/federation/tasks/hook_multirealm_controlplane_config.yml b/roles/federation/tasks/hook_multirealm_controlplane_config.yml new file mode 100644 index 0000000000..5e5ca60e7a --- /dev/null +++ b/roles/federation/tasks/hook_multirealm_controlplane_config.yml @@ -0,0 +1,158 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Create file to customize keystone for IPA deployed in the control plane + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/keystone_multirealm_federation.yaml" + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - namespace: {{ cifmw_federation_run_osp_cmd_namespace }} + patches: + - target: + kind: OpenStackControlPlane + name: .* + patch: |- + - op: add + path: /spec/tls + value: {} + - op: add + path: /spec/tls/caBundleSecretName + value: keycloakca + - op: add + path: /spec/keystone/template/httpdCustomization + value: + customConfigSecret: keystone-httpd-override + - op: add + path: /spec/keystone/template/federatedRealmConfig + value: federation-realm-data + - op: add + path: /spec/keystone/template/customServiceConfig + value: | + [DEFAULT] + insecure_debug=true + debug=true + [federation] + trusted_dashboard={{ cifmw_federation_horizon_url }}/dashboard/auth/websso/ + [openid] + remote_id_attribute=HTTP_OIDC_ISS + [auth] + methods = password,token,oauth1,mapped,application_credential,openid + mode: "0644" + +- name: Get ingress operator CA cert + ansible.builtin.slurp: + src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" + register: federation_sso_ca + +- name: Add Keycloak CA secret + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: present + definition: + apiVersion: v1 + kind: Secret + type: Opaque + metadata: + name: keycloakca + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + data: + KeyCloakCA: "{{ federation_sso_ca.content }}" + +- name: Create Keystone httpd override secret for Federation + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: keystone-httpd-override + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + type: Opaque + stringData: + federation.conf: "{{ lookup('template', 'federation-multirealm.conf.j2') }}" + +- name: Download realm1 OpenID configuration + ansible.builtin.uri: + url: "{{ cifmw_federation_keystone_OIDC_ProviderMetadataURL }}" + method: GET + return_content: true + validate_certs: false + register: openid_wellknown_config1 + +- name: Download realm2 OpenID configuration + ansible.builtin.uri: + url: "{{ cifmw_federation_keystone_OIDC_ProviderMetadataURL2 }}" + method: GET + return_content: true + validate_certs: false + register: openid_wellknown_config2 + +- name: Set federation_config_items + ansible.builtin.set_fact: + federation_config_items: + - filename: "{{ cifmw_federation_keystone_idp1_conf_filename }}" + contents: | + { + "scope" : "openid email profile" + } + - filename: "{{ cifmw_federation_keystone_idp1_client_filename }}" + contents: "{{ {'client_id': cifmw_federation_keystone_OIDC_ClientID, 'client_secret': cifmw_federation_keystone_OIDC_ClientSecret } | to_json }}" + - filename: "{{ cifmw_federation_keystone_idp1_provider_filename }}" + contents: | + {{ openid_wellknown_config1.content }} + - filename: "{{ cifmw_federation_keystone_idp2_conf_filename }}" + contents: | + { + "scope" : "openid email profile" + } + - filename: "{{ cifmw_federation_keystone_idp2_client_filename }}" + contents: "{{ {'client_id': cifmw_federation_keystone_OIDC_ClientID2, 'client_secret': cifmw_federation_keystone_OIDC_ClientSecret2 } | to_json }}" + - filename: "{{ cifmw_federation_keystone_idp2_provider_filename }}" + contents: | + {{ openid_wellknown_config2.content }} + +- name: Generate the final federation_config.json string (as a dictionary) + ansible.builtin.set_fact: + _raw_federation_config_json_value: | + { + {% for item in federation_config_items %} + "{{ item.filename }}": {{ item.contents }}{% if not loop.last %},{% endif %} + {% endfor %} + } + +- name: Final JSON string for Secret stringData + ansible.builtin.set_fact: + federation_config_json_string: "{{ _raw_federation_config_json_value }}" + +- name: Print the generated JSON string for verification + ansible.builtin.debug: + var: federation_config_json_string + +- name: Create a Kubernetes Secret with federation metadata + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + type: Opaque + metadata: + name: federation-realm-data + namespace: openstack + stringData: + federation-config.json: "{{ federation_config_json_string }}" diff --git a/roles/federation/tasks/hook_post_deploy.yml b/roles/federation/tasks/hook_post_deploy.yml new file mode 100644 index 0000000000..7b49c46330 --- /dev/null +++ b/roles/federation/tasks/hook_post_deploy.yml @@ -0,0 +1,80 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Build realm configurations for single realm OpenStack setup + ansible.builtin.set_fact: + _federation_openstack_realms_to_process: + - realm_id: 1 + keystone_domain: "{{ cifmw_federation_keystone_domain }}" + remote_id: "{{ cifmw_federation_remote_id }}" + IdpName: "{{ cifmw_federation_IdpName }}" + mapping_name: "{{ cifmw_federation_mapping_name }}" + group_name: "{{ cifmw_federation_group_name }}" + project_name: "{{ cifmw_federation_project_name }}" + when: not cifmw_federation_deploy_multirealm|bool + +- name: Build realm configurations for multirealm OpenStack setup + ansible.builtin.set_fact: + _federation_openstack_realms_to_process: + - realm_id: 1 + keystone_domain: "{{ cifmw_federation_keystone_domain }}" + remote_id: "{{ cifmw_federation_remote_id }}" + IdpName: "{{ cifmw_federation_IdpName }}" + mapping_name: "{{ cifmw_federation_mapping_name }}" + group_name: "{{ cifmw_federation_group_name }}" + project_name: "{{ cifmw_federation_project_name }}" + - realm_id: 2 + keystone_domain: "{{ cifmw_federation_keystone_domain2 }}" + remote_id: "{{ cifmw_federation_remote_id2 }}" + IdpName: "{{ cifmw_federation_IdpName2 }}" + mapping_name: "{{ cifmw_federation_mapping_name2 }}" + group_name: "{{ cifmw_federation_group_name2 }}" + project_name: "{{ cifmw_federation_project_name2 }}" + when: cifmw_federation_deploy_multirealm|bool + +- name: Run federation setup on OSP for all realms + ansible.builtin.include_role: + name: federation + tasks_from: run_openstack_setup.yml + vars: + cifmw_federation_keystone_domain: "{{ realm.keystone_domain }}" + cifmw_federation_remote_id: "{{ realm.remote_id }}" + cifmw_federation_IdpName: "{{ realm.IdpName }}" + cifmw_federation_mapping_name: "{{ realm.mapping_name }}" + cifmw_federation_group_name: "{{ realm.group_name }}" + cifmw_federation_project_name: "{{ realm.project_name }}" + loop: "{{ _federation_openstack_realms_to_process }}" + loop_control: + loop_var: realm + label: "Realm {{ realm.realm_id }}: {{ realm.IdpName }}" + +- name: Run federation OSP User Auth setup + ansible.builtin.import_role: + name: federation + tasks_from: run_openstack_auth_setup.yml + +# MultiRole CLI testing is not available. It is only currently supported in Horizon. +# Auth tests only run in single realm mode - not supported in multirealm +- name: Run federation OSP User Auth test for first realm + ansible.builtin.include_role: + name: federation + tasks_from: run_openstack_auth_test.yml + vars: + cifmw_federation_keycloak_testuser_username: "{{ item }}" + loop: + - "{{ cifmw_federation_keycloak_testuser1_username }}" + - "{{ cifmw_federation_keycloak_testuser2_username }}" + when: not cifmw_federation_deploy_multirealm|bool diff --git a/roles/federation/tasks/hook_pre_deploy.yml b/roles/federation/tasks/hook_pre_deploy.yml new file mode 100644 index 0000000000..9e59cf390d --- /dev/null +++ b/roles/federation/tasks/hook_pre_deploy.yml @@ -0,0 +1,83 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Run SSO pod setup on Openshift + ansible.builtin.import_role: + name: federation + tasks_from: run_keycloak_setup.yml + +- name: Build realm configurations for single realm + ansible.builtin.set_fact: + _federation_realms_to_process: + - realm_id: 1 + keycloak_realm: "{{ cifmw_federation_keycloak_realm }}" + testuser1_username: "{{ cifmw_federation_keycloak_testuser1_username }}" + testuser1_password: "{{ cifmw_federation_keycloak_testuser1_password }}" + testuser2_username: "{{ cifmw_federation_keycloak_testuser2_username }}" + testuser2_password: "{{ cifmw_federation_keycloak_testuser2_password }}" + testgroup1_name: "{{ cifmw_federation_keycloak_testgroup1_name }}" + testgroup2_name: "{{ cifmw_federation_keycloak_testgroup2_name }}" + IdpName: "{{ cifmw_federation_IdpName }}" + keystone_client_id: "{{ cifmw_federation_keystone_OIDC_ClientID }}" + keystone_client_secret: "{{ cifmw_federation_keystone_OIDC_ClientSecret }}" + when: not cifmw_federation_deploy_multirealm|bool + +- name: Build realm configurations for multirealm + ansible.builtin.set_fact: + _federation_realms_to_process: + - realm_id: 1 + keycloak_realm: "{{ cifmw_federation_keycloak_realm }}" + testuser1_username: "{{ cifmw_federation_keycloak_testuser1_username }}" + testuser1_password: "{{ cifmw_federation_keycloak_testuser1_password }}" + testuser2_username: "{{ cifmw_federation_keycloak_testuser2_username }}" + testuser2_password: "{{ cifmw_federation_keycloak_testuser2_password }}" + testgroup1_name: "{{ cifmw_federation_keycloak_testgroup1_name }}" + testgroup2_name: "{{ cifmw_federation_keycloak_testgroup2_name }}" + IdpName: "{{ cifmw_federation_IdpName }}" + keystone_client_id: "{{ cifmw_federation_keystone_OIDC_ClientID }}" + keystone_client_secret: "{{ cifmw_federation_keystone_OIDC_ClientSecret }}" + - realm_id: 2 + keycloak_realm: "{{ cifmw_federation_keycloak_realm2 }}" + testuser1_username: "{{ cifmw_federation_keycloak_testuser3_username }}" + testuser1_password: "{{ cifmw_federation_keycloak_testuser3_password }}" + testuser2_username: "{{ cifmw_federation_keycloak_testuser4_username }}" + testuser2_password: "{{ cifmw_federation_keycloak_testuser4_password }}" + testgroup1_name: "{{ cifmw_federation_keycloak_testgroup3_name }}" + testgroup2_name: "{{ cifmw_federation_keycloak_testgroup4_name }}" + IdpName: "{{ cifmw_federation_IdpName2 }}" + keystone_client_id: "{{ cifmw_federation_keystone_OIDC_ClientID2 }}" + keystone_client_secret: "{{ cifmw_federation_keystone_OIDC_ClientSecret2 }}" + when: cifmw_federation_deploy_multirealm|bool + +- name: Run SSO realm setup for all configured realms + ansible.builtin.include_role: + name: federation + tasks_from: run_keycloak_realm_setup.yml + vars: + cifmw_federation_keycloak_realm: '{{ realm.keycloak_realm }}' + cifmw_federation_keycloak_testuser1_username: '{{ realm.testuser1_username }}' + cifmw_federation_keycloak_testuser1_password: '{{ realm.testuser1_password }}' + cifmw_federation_keycloak_testuser2_username: '{{ realm.testuser2_username }}' + cifmw_federation_keycloak_testuser2_password: '{{ realm.testuser2_password }}' + cifmw_federation_keycloak_testgroup1_name: '{{ realm.testgroup1_name }}' + cifmw_federation_keycloak_testgroup2_name: '{{ realm.testgroup2_name }}' + cifmw_federation_IdpName: '{{ realm.IdpName }}' + cifmw_federation_keystone_client_id: '{{ realm.keystone_client_id }}' + cifmw_federation_keystone_client_secret: '{{ realm.keystone_client_secret }}' + loop: "{{ _federation_realms_to_process }}" + loop_control: + loop_var: realm + label: "Setting up Keycloak realm {{ realm.realm_id }}: {{ realm.keycloak_realm }}" diff --git a/roles/federation/tasks/run_keycloak_realm_setup.yml b/roles/federation/tasks/run_keycloak_realm_setup.yml index b001e5ebff..bc04458b70 100644 --- a/roles/federation/tasks/run_keycloak_realm_setup.yml +++ b/roles/federation/tasks/run_keycloak_realm_setup.yml @@ -37,8 +37,7 @@ auth_password: "{{ cifmw_federation_keycloak_admin_password }}" state: present realm: "{{ cifmw_federation_keycloak_realm }}" - client_id: "{{ cifmw_federation_keycloak_client_id }}" - id: 3fb4f68d-ad2c-46e7-a579-ea418f5d150b + client_id: "{{ cifmw_federation_keystone_client_id }}" name: 'RHOSO Client' description: 'RHOSO client for keystone federation' root_url: "{{ cifmw_federation_keystone_url }}" @@ -46,10 +45,12 @@ base_url: '/dashboard/project' enabled: true client_authenticator_type: client-secret - secret: "{{ cifmw_federation_keycloak_client_secret }}" + secret: "{{ cifmw_federation_keystone_client_secret }}" redirect_uris: - - "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/identity_providers/kcIDP/protocols/openid/websso/" + - "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/identity_providers/{{ cifmw_federation_IdpName }}/protocols/openid/websso/" + - "{{ cifmw_federation_keystone_url }}/v3/OS-FEDERATION/identity_providers/{{ cifmw_federation_IdpName }}/protocols/openid/auth" - "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/websso/openid" + - "{{ cifmw_federation_keystone_url }}/v3/redirect_uri" - "{{ cifmw_federation_horizon_url }}/dashboard/auth/websso/" web_origins: - "{{ cifmw_federation_keystone_url }}" diff --git a/roles/federation/tasks/run_openstack_auth_setup.yml b/roles/federation/tasks/run_openstack_auth_setup.yml new file mode 100644 index 0000000000..55c2a30ce1 --- /dev/null +++ b/roles/federation/tasks/run_openstack_auth_setup.yml @@ -0,0 +1,77 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Render federation get token script + ansible.builtin.template: + src: get-token.sh.j2 + dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'get-token.sh' ] | path_join }}" + mode: '0755' + +- name: Copy federation get token script file into pod + kubernetes.core.k8s_cp: + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + pod: openstackclient + remote_path: "/home/cloud-admin/get-token.sh" + local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'get-token.sh' ] | path_join }}" + +- name: Render federation test user1 cloudrc template + ansible.builtin.template: + src: kctestuser1.j2 + dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser1_username ] | path_join }}" + mode: "0644" + +- name: Render federation test user2 cloudrc template + ansible.builtin.template: + src: kctestuser2.j2 + dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser2_username ] | path_join }}" + mode: "0644" + +- name: Copy federation test user1 cloudrc file into pod + kubernetes.core.k8s_cp: + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + pod: openstackclient + remote_path: "/home/cloud-admin/{{ cifmw_federation_keycloak_testuser1_username }}" + local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser1_username ] | path_join }}" + +- name: Copy federation test user2 cloudrc file into pod + kubernetes.core.k8s_cp: + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + pod: openstackclient + remote_path: "/home/cloud-admin/{{ cifmw_federation_keycloak_testuser2_username }}" + local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser2_username ] | path_join }}" + +- name: Copy system CA bundle + ansible.builtin.copy: + src: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" + dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" + mode: "0444" + +- name: Get ingress operator CA cert + ansible.builtin.slurp: + src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" + register: federation_sso_ca + +- name: Add ingress operator CA to bundle + ansible.builtin.blockinfile: + path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" + block: "{{ federation_sso_ca.content | b64decode }}" + +- name: Copy CA bundle to openstackclient pod + kubernetes.core.k8s_cp: + namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" + pod: openstackclient + remote_path: "/home/cloud-admin/full-ca-list.crt" + local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" diff --git a/roles/federation/tasks/run_openstack_auth_test.yml b/roles/federation/tasks/run_openstack_auth_test.yml index f87b2d9a53..0bd505ed93 100644 --- a/roles/federation/tasks/run_openstack_auth_test.yml +++ b/roles/federation/tasks/run_openstack_auth_test.yml @@ -14,71 +14,22 @@ # License for the specific language governing permissions and limitations # under the License. -- name: Read federation get token script - ansible.builtin.template: - src: get-token.sh.j2 - dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'get-token.sh' ] | path_join }}" - mode: '0755' - -- name: Copy federation get token script file into pod - kubernetes.core.k8s_cp: - namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" - pod: openstackclient - remote_path: "/home/cloud-admin/get-token.sh" - local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'get-token.sh' ] | path_join }}" - -- name: Read federation test user1 cloudrc template - ansible.builtin.template: - src: kctestuser1.j2 - dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser1_username ] | path_join }}" - mode: "0644" - -- name: Copy federation test user1 cloudrc file into pod - kubernetes.core.k8s_cp: - namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" - pod: openstackclient - remote_path: "/home/cloud-admin/{{ cifmw_federation_keycloak_testuser1_username }}" - local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', cifmw_federation_keycloak_testuser1_username ] | path_join }}" - -- name: Copy system CA bundle - ansible.builtin.copy: - src: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" - dest: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" - mode: "0444" - -- name: Get ingress operator CA cert - ansible.builtin.slurp: - src: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'ingress-operator-ca.crt'] | path_join }}" - register: federation_sso_ca - -- name: Add ingress operator CA to bundle - ansible.builtin.blockinfile: - path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" - block: "{{ federation_sso_ca.content | b64decode }}" - -- name: Copy CA bundle to openstackclient pod - kubernetes.core.k8s_cp: - namespace: "{{ cifmw_federation_run_osp_cmd_namespace }}" - pod: openstackclient - remote_path: "/home/cloud-admin/full-ca-list.crt" - local_path: "{{ [ ansible_user_dir, 'ci-framework-data', 'tmp', 'full-ca-list.crt' ] | path_join }}" - -- name: Get test user1 token +- name: Get test user token vars: - _osp_cmd: "/home/cloud-admin/get-token.sh {{ cifmw_federation_keycloak_testuser1_username }}" + _osp_cmd: "/home/cloud-admin/get-token.sh {{ cifmw_federation_keycloak_testuser_username }}" ansible.builtin.include_tasks: run_osp_cmd.yml -- name: Read test user1 token info +- name: Read test user token info ansible.builtin.set_fact: - federation_sso_testuser1_token_json: "{{ federation_run_ocp_cmd.stdout | from_json }}" + federation_sso_testuser_token_json: "{{ federation_run_ocp_cmd.stdout | from_json }}" -- name: Output test user1 token info +- name: Output test user token info ansible.builtin.debug: - msg: "{{ federation_sso_testuser1_token_json }}" + msg: "{{ federation_sso_testuser_token_json }}" - name: Get openstack project vars: - _osp_cmd: "openstack project show {{ federation_sso_testuser1_token_json.project_id}} -f json" + _osp_cmd: "openstack project show {{ federation_sso_testuser_token_json.project_id}} -f json" ansible.builtin.include_tasks: run_osp_cmd.yml - name: Read openstack project info @@ -89,8 +40,8 @@ ansible.builtin.debug: msg: "{{ federation_sso_ssoproject_json }}" -- name: Test user1 successful token +- name: Test user successful token ansible.builtin.assert: that: - "cifmw_federation_project_name in federation_sso_ssoproject_json.name" - - federation_sso_testuser1_token_json.id|length >= 180 + - federation_sso_testuser_token_json.id|length >= 180 diff --git a/roles/federation/tasks/run_openstack_setup.yml b/roles/federation/tasks/run_openstack_setup.yml index a4abd325c4..4affbde457 100644 --- a/roles/federation/tasks/run_openstack_setup.yml +++ b/roles/federation/tasks/run_openstack_setup.yml @@ -23,14 +23,14 @@ - name: Run federation create domain vars: - _osp_cmd: "openstack domain create {{ cifmw_federation_domain }}" + _osp_cmd: "openstack domain create {{ cifmw_federation_keystone_domain }}" ansible.builtin.include_tasks: run_osp_cmd.yml - name: Run federation identity provider create vars: _osp_cmd: "openstack identity provider create --remote-id {{ cifmw_federation_remote_id }} - --domain {{ cifmw_federation_domain }} + --domain {{ cifmw_federation_keystone_domain }} {{ cifmw_federation_IdpName }}" ansible.builtin.include_tasks: run_osp_cmd.yml @@ -57,14 +57,14 @@ - name: Run federation group create vars: _osp_cmd: "openstack group create - --domain {{ cifmw_federation_domain }} + --domain {{ cifmw_federation_keystone_domain }} {{ cifmw_federation_group_name }}" ansible.builtin.include_tasks: run_osp_cmd.yml - name: Run federation project create vars: _osp_cmd: "openstack project create - --domain {{ cifmw_federation_domain }} + --domain {{ cifmw_federation_keystone_domain }} {{ cifmw_federation_project_name }}" ansible.builtin.include_tasks: run_osp_cmd.yml @@ -72,9 +72,9 @@ vars: _osp_cmd: "openstack role add --group {{ cifmw_federation_group_name }} - --group-domain {{ cifmw_federation_domain }} + --group-domain {{ cifmw_federation_keystone_domain }} --project {{ cifmw_federation_project_name }} - --project-domain {{ cifmw_federation_domain }} + --project-domain {{ cifmw_federation_keystone_domain }} member" ansible.builtin.include_tasks: run_osp_cmd.yml diff --git a/roles/federation/templates/federation-multirealm.conf.j2 b/roles/federation/templates/federation-multirealm.conf.j2 new file mode 100644 index 0000000000..59e7af480c --- /dev/null +++ b/roles/federation/templates/federation-multirealm.conf.j2 @@ -0,0 +1,40 @@ +OIDCClaimPrefix "{{ cifmw_federation_keystone_OIDC_ClaimPrefix }}" +OIDCResponseType "{{ cifmw_federation_keystone_OIDC_ResponseType }}" +OIDCScope "{{ cifmw_federation_keystone_OIDC_Scope }}" +OIDCClaimDelimiter "{{ cifmw_federation_keystone_OIDC_ClaimDelimiter }}" +OIDCPassUserInfoAs "{{ cifmw_federation_keystone_OIDC_PassUserInfoAs }}" +OIDCPassClaimsAs "{{ cifmw_federation_keystone_OIDC_PassClaimsAs }}" +OIDCCryptoPassphrase "{{ cifmw_federation_keystone_OIDC_CryptoPassphrase }}" +OIDCMetadataDir "/var/lib/httpd/metadata" +OIDCRedirectURI "{{ cifmw_federation_keystone_url }}/v3/redirect_uri" +LogLevel debug + + + AuthType "openid-connect" + Require valid-user + + + + AuthType oauth20 + Require valid-user + + + + AuthType "openid-connect" + Require valid-user + + + + AuthType oauth20 + Require valid-user + + + + Require valid-user + AuthType openid-connect + + + + AuthType "openid-connect" + Require valid-user + diff --git a/roles/federation/templates/federation-single.conf.j2 b/roles/federation/templates/federation-single.conf.j2 new file mode 100644 index 0000000000..cc8f3f0d1f --- /dev/null +++ b/roles/federation/templates/federation-single.conf.j2 @@ -0,0 +1,30 @@ +OIDCClaimPrefix "{{ cifmw_federation_keystone_OIDC_ClaimPrefix }}" +OIDCResponseType "{{ cifmw_federation_keystone_OIDC_ResponseType }}" +OIDCScope "{{ cifmw_federation_keystone_OIDC_Scope }}" +OIDCClaimDelimiter "{{ cifmw_federation_keystone_OIDC_ClaimDelimiter }}" +OIDCPassUserInfoAs "{{ cifmw_federation_keystone_OIDC_PassUserInfoAs }}" +OIDCPassClaimsAs "{{ cifmw_federation_keystone_OIDC_PassClaimsAs }}" +OIDCProviderMetadataURL "{{ cifmw_federation_keystone_OIDC_ProviderMetadataURL }}" +OIDCClientID "{{ cifmw_federation_keystone_OIDC_ClientID }}" +OIDCClientSecret "{{ cifmw_federation_keystone_OIDC_ClientSecret }}" +OIDCCryptoPassphrase "{{ cifmw_federation_keystone_OIDC_CryptoPassphrase }}" +OIDCOAuthClientID "{{ cifmw_federation_keystone_OIDC_ClientID }}" +OIDCOAuthClientSecret "{{ cifmw_federation_keystone_OIDC_ClientSecret }}" +OIDCOAuthIntrospectionEndpoint "{{ cifmw_federation_keystone_OIDC_OAuthIntrospectionEndpoint }}" +OIDCRedirectURI "{{ cifmw_federation_keystone_url }}/v3/auth/OS-FEDERATION/identity_providers/{{ cifmw_federation_IdpName }}/protocols/openid/websso/" +LogLevel debug + + + AuthType "openid-connect" + Require valid-user + + + + AuthType oauth20 + Require valid-user + + + + AuthType "openid-connect" + Require valid-user + diff --git a/roles/federation/templates/kctestuser1.j2 b/roles/federation/templates/kctestuser1.j2 index c64e21cb4c..fcd123812c 100644 --- a/roles/federation/templates/kctestuser1.j2 +++ b/roles/federation/templates/kctestuser1.j2 @@ -1,7 +1,7 @@ unset OS_CLOUD export OS_CACERT=/home/cloud-admin/full-ca-list.crt export OS_PROJECT_NAME="{{ cifmw_federation_project_name }}" -export OS_PROJECT_DOMAIN_NAME="{{ cifmw_federation_domain }}" +export OS_PROJECT_DOMAIN_NAME="{{ cifmw_federation_keystone_domain }}" export OS_AUTH_URL="{{ cifmw_federation_keystone_url }}/v3" export OS_IDENTITY_API_VERSION=3 export OS_AUTH_PLUGIN=openid @@ -9,8 +9,8 @@ export OS_AUTH_TYPE=v3oidcpassword export OS_USERNAME="{{ cifmw_federation_keycloak_testuser1_username }}" export OS_PASSWORD="{{ cifmw_federation_keycloak_testuser1_password }}" export OS_IDENTITY_PROVIDER="{{ cifmw_federation_IdpName }}" -export OS_CLIENT_ID="{{ cifmw_federation_keycloak_client_id }}" -export OS_CLIENT_SECRET="{{ cifmw_federation_keycloak_client_secret }}" +export OS_CLIENT_ID="{{ cifmw_federation_keystone_OIDC_ClientID }}" +export OS_CLIENT_SECRET="{{ cifmw_federation_keystone_OIDC_ClientSecret }}" export OS_OPENID_SCOPE="openid profile email" export OS_PROTOCOL=openid export OS_ACCESS_TOKEN_TYPE=access_token diff --git a/roles/federation/templates/kctestuser2.j2 b/roles/federation/templates/kctestuser2.j2 new file mode 100644 index 0000000000..269a2d1233 --- /dev/null +++ b/roles/federation/templates/kctestuser2.j2 @@ -0,0 +1,17 @@ +unset OS_CLOUD +export OS_CACERT=/home/cloud-admin/full-ca-list.crt +export OS_PROJECT_NAME="{{ cifmw_federation_project_name }}" +export OS_PROJECT_DOMAIN_NAME="{{ cifmw_federation_keystone_domain }}" +export OS_AUTH_URL="{{ cifmw_federation_keystone_url }}/v3" +export OS_IDENTITY_API_VERSION=3 +export OS_AUTH_PLUGIN=openid +export OS_AUTH_TYPE=v3oidcpassword +export OS_USERNAME="{{ cifmw_federation_keycloak_testuser2_username }}" +export OS_PASSWORD="{{ cifmw_federation_keycloak_testuser2_password }}" +export OS_IDENTITY_PROVIDER="{{ cifmw_federation_IdpName }}" +export OS_CLIENT_ID="{{ cifmw_federation_keystone_OIDC_ClientID }}" +export OS_CLIENT_SECRET="{{ cifmw_federation_keystone_OIDC_ClientSecret }}" +export OS_OPENID_SCOPE="openid profile email" +export OS_PROTOCOL=openid +export OS_ACCESS_TOKEN_TYPE=access_token +export OS_DISCOVERY_ENDPOINT="{{ cifmw_federation_keycloak_url }}/auth/realms/{{ cifmw_federation_keycloak_realm }}/.well-known/openid-configuration" diff --git a/roles/federation/templates/rules.json.j2 b/roles/federation/templates/rules.json.j2 index 444f4e315d..65c7d15fe0 100644 --- a/roles/federation/templates/rules.json.j2 +++ b/roles/federation/templates/rules.json.j2 @@ -8,7 +8,7 @@ "group": { "name": "{{ cifmw_federation_group_name }}", "domain": { - "name": "{{ cifmw_federation_domain }}" + "name": "{{ cifmw_federation_keystone_domain }}" } } } From 9ecca9e54fceb1c234ad0faaeda076a3ed7341e9 Mon Sep 17 00:00:00 2001 From: James Slagle Date: Thu, 18 Sep 2025 14:44:51 -0400 Subject: [PATCH 370/480] Add cifmw-crc-edpm-podified-baremetal-bootc job The job uses bootc as the base OS for the edpm baremetal compute node. Adds a new podified-multinode-edpm-baremetal-bootc-pipeline which will run the new job. Depends-On: https://github.com/openstack-k8s-operators/install_yamls/pull/1091 Signed-off-by: James Slagle --- roles/edpm_deploy_baremetal/defaults/main.yml | 1 + roles/edpm_deploy_baremetal/tasks/main.yml | 2 ++ zuul.d/edpm.yaml | 15 +++++++++++++++ zuul.d/project-templates.yaml | 9 +++++++++ 4 files changed, 27 insertions(+) diff --git a/roles/edpm_deploy_baremetal/defaults/main.yml b/roles/edpm_deploy_baremetal/defaults/main.yml index 2db42adeaa..58d519d999 100644 --- a/roles/edpm_deploy_baremetal/defaults/main.yml +++ b/roles/edpm_deploy_baremetal/defaults/main.yml @@ -29,3 +29,4 @@ cifmw_edpm_deploy_baremetal_update_os_containers: false cifmw_edpm_deploy_baremetal_repo_setup_override: false cifmw_edpm_deploy_baremetal_create_vms: true cifmw_edpm_deploy_baremetal_nova_compute_extra_config: "" +cifmw_edpm_deploy_baremetal_bootc: false diff --git a/roles/edpm_deploy_baremetal/tasks/main.yml b/roles/edpm_deploy_baremetal/tasks/main.yml index 9659825f63..e67a7f654f 100644 --- a/roles/edpm_deploy_baremetal/tasks/main.yml +++ b/roles/edpm_deploy_baremetal/tasks/main.yml @@ -132,9 +132,11 @@ value: ["{{ content_provider_registry_ip }}:5001"] {% endif %} + {% if not cifmw_edpm_deploy_baremetal_bootc %} - op: add path: /spec/nodeTemplate/ansible/ansibleVars/edpm_bootstrap_command value: sudo dnf -y update + {% endif %} kustomizations_paths: >- {{ [ diff --git a/zuul.d/edpm.yaml b/zuul.d/edpm.yaml index 349c4159fb..8b65d21f38 100644 --- a/zuul.d/edpm.yaml +++ b/zuul.d/edpm.yaml @@ -21,6 +21,21 @@ cifmw_manage_secrets_pullsecret_content: '{}' cifmw_rhol_crc_binary_folder: "/usr/local/bin" +# Virtual Baremetal job with CRC and single bootc compute node. +- job: + name: cifmw-crc-podified-edpm-baremetal-bootc + nodeset: centos-9-crc-2-48-0-6xlarge + parent: cifmw-base-crc-openstack + run: ci/playbooks/edpm_baremetal_deployment/run.yml + vars: + crc_parameters: "--memory 32000 --disk-size 240 --cpus 12" + cifmw_manage_secrets_pullsecret_content: '{}' + cifmw_rhol_crc_binary_folder: "/usr/local/bin" + cifmw_install_yamls_vars: + BAREMETAL_OS_CONTAINER_IMG: quay.io/openstack-k8s-operators/edpm-bootc:latest-qcow2 + BAREMETAL_OS_IMG: edpm-bootc.qcow2 + cifmw_edpm_deploy_baremetal_bootc: true + # Podified galera job - job: name: cifmw-crc-podified-galera-deployment diff --git a/zuul.d/project-templates.yaml b/zuul.d/project-templates.yaml index f2462560f4..6fe9ae944e 100644 --- a/zuul.d/project-templates.yaml +++ b/zuul.d/project-templates.yaml @@ -14,6 +14,15 @@ - cifmw-crc-podified-edpm-baremetal: *content_provider - cifmw-pod-zuul-files +- project-template: + name: podified-multinode-edpm-baremetal-bootc-pipeline + description: | + Project template to run content provider with EDPM with bootc and + baremetal job. + github-check: + jobs: + - cifmw-crc-podified-edpm-baremetal-bootc: *content_provider + - project-template: name: podified-multinode-edpm-pipeline description: | From 924f688788cbe7b1e07e663f069e636663bfcf9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 25 Sep 2025 08:21:50 +0200 Subject: [PATCH 371/480] hook: ironic_networks.yml - IPv6 support Add support to set IP version and IPv6 specific settings for the ironic provisioning network created by this hook. Jira: OSPRH-20084 --- hooks/playbooks/ironic_network.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/hooks/playbooks/ironic_network.yml b/hooks/playbooks/ironic_network.yml index 77f810068f..2097423b52 100644 --- a/hooks/playbooks/ironic_network.yml +++ b/hooks/playbooks/ironic_network.yml @@ -9,6 +9,9 @@ _subnet_nameserver: '192.168.122.80' _subnet_alloc_pool_start: '172.20.1.100' _subnet_alloc_pool_end: '172.20.1.200' + _subnet_ip_version: 4 + _subnet_ipv6_address_mode: null + _subnet_ipv6_ra_mode: null _provider_physical_network: ironic _provider_network_type: flat _availability_zone_hints: null # Comma separated list of strings @@ -24,7 +27,7 @@ openstack network create provisioning \ --share \ --provider-physical-network {{ _provider_physical_network }} \ - {% if _availability_zone_hints is not none -%} + {% if _availability_zone_hints -%} {% for zone in _availability_zone_hints | split(',') -%} --availability-zone-hint {{ zone }} \ {% endfor -%} @@ -33,6 +36,13 @@ oc rsh openstackclient \ openstack subnet create provisioning-subnet \ --network provisioning \ + --ip-version {{ _subnet_ip_version }} \ + {% if _subnet_ipv6_address_mode -%} + --ipv6-address-mode {{ _subnet_ipv6_address_mode }} \ + {% endif -%} + {% if _subnet_ipv6_ra_mode -%} + --ipv6-ra-mode {{ _subnet_ipv6_ra_mode }} \ + {% endif -%} --subnet-range {{ _subnet_range }} \ --gateway {{ _subnet_gateway }} \ --dns-nameserver {{ _subnet_nameserver }} \ From e3756d9cd96a07706f6254b6331b9853c4d85ae3 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Tue, 23 Sep 2025 17:30:24 +0200 Subject: [PATCH 372/480] feat: replace hardcoded /home/zuul with a dedicated variable --- roles/cifmw_cephadm/README.md | 2 ++ roles/cifmw_cephadm/defaults/main.yml | 1 + roles/cifmw_cephadm/tasks/configure_object.yml | 6 +++--- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/roles/cifmw_cephadm/README.md b/roles/cifmw_cephadm/README.md index 4b414fd2a0..d75a219022 100644 --- a/roles/cifmw_cephadm/README.md +++ b/roles/cifmw_cephadm/README.md @@ -33,6 +33,8 @@ Requires an Ansible user who can become root to install Ceph server. The `hooks/playbooks/ceph.yml` hook playbook defaults these parameters so that they do not need to be changed for a typical EDPM deployment. +* `cifmw_cephadm_basedir`: (String) Base directory for artifacts and logs. Defaults to `cifmw_basedir`, which defaults to `{{ ansible_user_dir ~ '/ci-framework-data' }}`. + * `cifmw_cephadm_default_container`: If this is value is `true`, then `cephadm bootstrap` is not passed the `--image` parameter and whatever default Ceph container defined inside of `cephadm` is used. Otherwise diff --git a/roles/cifmw_cephadm/defaults/main.yml b/roles/cifmw_cephadm/defaults/main.yml index bc245d45be..5e049aeca7 100644 --- a/roles/cifmw_cephadm/defaults/main.yml +++ b/roles/cifmw_cephadm/defaults/main.yml @@ -1,5 +1,6 @@ --- # defaults file for cifmw_cephadm +cifmw_cephadm_basedir: "{{ cifmw_basedir | default( ansible_user_dir ~ '/ci-framework-data') }}" cifmw_cephadm_spec_on_bootstrap: false # not recommended due to https://tracker.ceph.com/issues/49277 cifmw_cephadm_ssh_user: ceph-admin cifmw_cephadm_bin: /usr/sbin/cephadm diff --git a/roles/cifmw_cephadm/tasks/configure_object.yml b/roles/cifmw_cephadm/tasks/configure_object.yml index 87b49f4ed7..45bf02fecc 100644 --- a/roles/cifmw_cephadm/tasks/configure_object.yml +++ b/roles/cifmw_cephadm/tasks/configure_object.yml @@ -49,7 +49,7 @@ cifmw.general.ci_script: extra_args: KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - output_dir: "/home/zuul/ci-framework-data/artifacts" + output_dir: "{{ cifmw_cephadm_basedir }}/artifacts" script: |- oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack service create --name swift --description 'OpenStack Object Storage' object-store oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack user create --project {{ project_service_uuid.stdout }} --password {{ cifmw_ceph_rgw_keystone_psw }} swift @@ -119,7 +119,7 @@ cifmw.general.ci_script: extra_args: KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - output_dir: "/home/zuul/ci-framework-data/artifacts" + output_dir: "{{ cifmw_cephadm_basedir }}/artifacts" script: |- oc -n {{ cifmw_cephadm_ns }} rsh openstackclient \ openstack endpoint set \ @@ -142,7 +142,7 @@ cifmw.general.ci_script: extra_args: KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - output_dir: "/home/zuul/ci-framework-data/artifacts" + output_dir: "{{ cifmw_cephadm_basedir }}/artifacts" script: |- oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack role add --user {{ all_uuids.results.0.stdout }} --project {{ project_service_uuid.stdout }} {{ all_uuids.results.2.stdout }} oc -n {{ cifmw_cephadm_ns }} rsh openstackclient openstack role add --user {{ all_uuids.results.0.stdout }} --project {{ project_service_uuid.stdout }} {{ all_uuids.results.3.stdout }} From 98c075ceb2292b5b38ff2768c80d6c2b11e3fb27 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Thu, 25 Sep 2025 11:09:52 +0200 Subject: [PATCH 373/480] feat: formatted documentation --- roles/cifmw_cephadm/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/cifmw_cephadm/README.md b/roles/cifmw_cephadm/README.md index d75a219022..717b570ab5 100644 --- a/roles/cifmw_cephadm/README.md +++ b/roles/cifmw_cephadm/README.md @@ -33,7 +33,9 @@ Requires an Ansible user who can become root to install Ceph server. The `hooks/playbooks/ceph.yml` hook playbook defaults these parameters so that they do not need to be changed for a typical EDPM deployment. -* `cifmw_cephadm_basedir`: (String) Base directory for artifacts and logs. Defaults to `cifmw_basedir`, which defaults to `{{ ansible_user_dir ~ '/ci-framework-data' }}`. +* `cifmw_cephadm_basedir`: (String) Base directory for artifacts and logs. + Defaults to `cifmw_basedir`, which defaults + to `{{ ansible_user_dir ~ '/ci-framework-data' }}`. * `cifmw_cephadm_default_container`: If this is value is `true`, then `cephadm bootstrap` is not passed the `--image` parameter and whatever From fe71df5cd3655ccee1d364acb893fb57ff1b54a5 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Thu, 25 Sep 2025 14:25:36 +0200 Subject: [PATCH 374/480] fix: pre commit checks - cs --- roles/cifmw_cephadm/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/cifmw_cephadm/README.md b/roles/cifmw_cephadm/README.md index 717b570ab5..bd940e9ecc 100644 --- a/roles/cifmw_cephadm/README.md +++ b/roles/cifmw_cephadm/README.md @@ -34,7 +34,7 @@ The `hooks/playbooks/ceph.yml` hook playbook defaults these parameters so that they do not need to be changed for a typical EDPM deployment. * `cifmw_cephadm_basedir`: (String) Base directory for artifacts and logs. - Defaults to `cifmw_basedir`, which defaults + Defaults to `cifmw_basedir`, which defaults to `{{ ansible_user_dir ~ '/ci-framework-data' }}`. * `cifmw_cephadm_default_container`: If this is value is `true`, then @@ -59,8 +59,8 @@ that they do not need to be changed for a typical EDPM deployment. * `cifmw_cephadm_keys`: see below - `cifmw_cephadm_certs`: The path on the ceph host where TLS/SSL certificates - are located. It points to `/etc/pki/tls`. +* `cifmw_cephadm_certs`: The path on the ceph host where TLS/SSL certificates + are located. It points to `/etc/pki/tls`. * `cifmw_cephadm_certificate`: The SSL/TLS certificate signed by CA which is an optional parameter. If it is provided, ceph dashboard and RGW will be From c04a711e1e84beb37e1679b9d8dd9739da44d390 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 22 Sep 2025 11:37:18 +0200 Subject: [PATCH 375/480] Make static log dir for must-gather In some cases, the os_must_gather role have an error: task path: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/os_must_gather/tasks/main.yml:97 fatal: [localhost]: FAILED! => msg: | The task includes an option with an undefined variable. The error was: list object has no element 0. list object has no element 0 The error appears to be in '/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/os_must_gather/tasks/main.yml': line 97, column 7, but may be elsewhere in the file depending on the exact syntax problem. The offending line appears to be: - name: Move must-gather folder name to a fixed name ^ here Print the _must_gather_output_folder folder name and run the move task if the path is available. We can drop making the folder name recognition and put the logs in static dir. Signed-off-by: Daniel Pawlik --- roles/os_must_gather/tasks/main.yml | 36 +++-------------------------- 1 file changed, 3 insertions(+), 33 deletions(-) diff --git a/roles/os_must_gather/tasks/main.yml b/roles/os_must_gather/tasks/main.yml index bb899ccedd..a7dfd5886d 100644 --- a/roles/os_must_gather/tasks/main.yml +++ b/roles/os_must_gather/tasks/main.yml @@ -16,11 +16,9 @@ - name: Ensure directories are present ansible.builtin.file: - path: "{{ cifmw_os_must_gather_output_dir }}/{{ item }}" + path: "{{ cifmw_os_must_gather_output_dir }}/logs/openstack-k8s-operators-openstack-must-gather" state: directory mode: "0755" - loop: - - logs - name: Construct project change list ansible.builtin.set_fact: @@ -65,40 +63,12 @@ oc adm must-gather --image {{ cifmw_os_must_gather_image }} --timeout {{ cifmw_os_must_gather_timeout }} --host-network={{ cifmw_os_must_gather_host_network }} - --dest-dir {{ cifmw_os_must_gather_output_dir }}/logs + --dest-dir {{ cifmw_os_must_gather_output_dir }}/logs/openstack-k8s-operators-openstack-must-gather -- ADDITIONAL_NAMESPACES={{ cifmw_os_must_gather_additional_namespaces }} OPENSTACK_DATABASES=$OPENSTACK_DATABASES SOS_EDPM=$SOS_EDPM SOS_DECOMPRESS=$SOS_DECOMPRESS - gather &> {{ cifmw_os_must_gather_output_dir }}/logs/os_must_gather.log - - # directory name will be generated starting from cifmw_os_must_gather_image - # variable e.g.: - # EXAMPLE 1 - # original value: "quay.io/openstack-k8s-operators/openstack-must-gather:latest" - # pattern value: "quay-io-openstack-k8s-operators-openstack-must-gather*" - # EXAMPLE 2 - # original value: "foo.bar.example.com/repofoo/openstack-must-gather-rhel9:1.0.0" - # patterns value: "foo-bar-example-com-repofoo-openstack-must-gather-rhel9*" - # TODO: add molecule testing - - name: Get exact must-gather output folder name - ansible.builtin.find: - paths: "{{ cifmw_os_must_gather_output_dir }}/logs" - patterns: >- - {{ - cifmw_os_must_gather_image | - ansible.builtin.split(':') | - first | - ansible.builtin.regex_replace('([.]|[/])', '-') ~ '*' - }} - file_type: directory - register: _must_gather_output_folder - - - name: Move must-gather folder name to a fixed name - ansible.builtin.command: - cmd: > - mv "{{ _must_gather_output_folder.files[0].path }}/" - "{{ cifmw_os_must_gather_output_dir }}/logs/openstack-k8s-operators-openstack-must-gather" + gather &> {{ cifmw_os_must_gather_output_dir }}/logs/openstack-k8s-operators-openstack-must-gather/os_must_gather.log rescue: - name: Openstack-must-gather failure From e0af6a7219985d4b094de6f042c0e8c3c75596e2 Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Fri, 19 Sep 2025 16:53:38 +0200 Subject: [PATCH 376/480] [BGP+AmphoraLBs] Fix network configuration Amphora LBs did not work properly with BGP. With this change, network configuration is modified to configure properly octavia NAD. OSPRH-10768 --- .../templates/bgp_dt01/network-values/values.yaml.j2 | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 index 9cdbcaaaa2..504d282316 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 @@ -130,11 +130,13 @@ data: "isDefaultGateway": true, "isGateway": true, "forceAddress": false, +{% if network.network_name == "ctlplane" %} "ipMasq": true, +{% else %} + "ipMasq": false, +{% endif %} "hairpinMode": true, -{% if network.network_name == "octavia" %} - "bridge": "octbr", -{% elif network.network_name == "ctlplane" %} +{% if network.network_name == "ctlplane" %} "bridge": "ospbr", {% else %} "bridge": "{{ network.network_name }}", From 8bb0a789cdc26cb55732a80cda9498c9508cdbd8 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 29 Sep 2025 16:20:42 +0530 Subject: [PATCH 377/480] Add all molecule files to the scope of check_zuul_jobs check_zuul_jobs take care of multiple zuul related files. It also includes molecule job file. If any change is made to molecule within role definition, this job is not triggered, and it causes manual changes issue for molecule.yml zuul file. This commit ensure this job is run every time when any change is made to molecule file of any role. It will ensure jobs are not broken due to such changes. --- zuul.d/pods.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/zuul.d/pods.yaml b/zuul.d/pods.yaml index 8955abeb0e..5c22d47279 100644 --- a/zuul.d/pods.yaml +++ b/zuul.d/pods.yaml @@ -34,6 +34,7 @@ - ^zuul.d/.* - ^ci/templates/.* - ^ci/config/.* + - ^roles/.*/molecule/.* - job: name: cifmw-pod-k8s-snippets-source From 2e37866e82418615b73386db4e79b9896f6f1449 Mon Sep 17 00:00:00 2001 From: Milana Levy Date: Wed, 7 May 2025 21:47:30 +0300 Subject: [PATCH 378/480] Add pre deploy settings for PCI DSS Add pre deploy settings for PCI DSS that sets the security compliance into Keystone with changing the openstackcontrolplain costume resurce. --- hooks/playbooks/PCI-DSS-pre-deploy.yml | 43 ++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 hooks/playbooks/PCI-DSS-pre-deploy.yml diff --git a/hooks/playbooks/PCI-DSS-pre-deploy.yml b/hooks/playbooks/PCI-DSS-pre-deploy.yml new file mode 100644 index 0000000000..be2fc45083 --- /dev/null +++ b/hooks/playbooks/PCI-DSS-pre-deploy.yml @@ -0,0 +1,43 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +- name: Create kustomization to update Keystone to use security compliance configuration + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + tasks: + - name: Create file to customize keystone for pci dss deployed in the control plane + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/keystone_pci_dss.yaml" + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - namespace: {{ namespace }} + patches: + - target: + kind: OpenStackControlPlane + name: .* + patch: |- + - op: add + path: /spec/keystone/template/customServiceConfig + value: | + [security_compliance] + lockout_failure_attempts = 2 + lockout_duration = 5 + password_regex = ^.{7,}$ + unique_last_password_count = 2 + user_minimum_password_age = 0 + disable_user_account_days_inactive = 1 + password_expires_days = 90 + mode: "0644" From 427d4b87878d92e4c0c6384674d2533b694f423c Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Thu, 25 Sep 2025 13:16:29 +0530 Subject: [PATCH 379/480] Remove read_global_vars pre-task from jobs The reason to remove the playbook from pre-run stage from Zuul is, Zuul is not storing the cached vars that we added in pre-run. Maybe it is possible by some other way, but lets remove this for now. --- zuul.d/adoption.yaml | 2 -- zuul.d/base.yaml | 4 ---- zuul.d/edpm_build_images.yaml | 1 - zuul.d/edpm_build_images_content_provider.yaml | 1 - zuul.d/end-to-end.yaml | 1 - zuul.d/kuttl.yaml | 1 - zuul.d/kuttl_multinode.yaml | 2 -- zuul.d/molecule-base.yaml | 2 -- zuul.d/pods.yaml | 3 --- zuul.d/tcib.yaml | 1 - zuul.d/test-job.yaml | 1 - 11 files changed, 19 deletions(-) diff --git a/zuul.d/adoption.yaml b/zuul.d/adoption.yaml index f879f0ea52..cdce1c15bf 100644 --- a/zuul.d/adoption.yaml +++ b/zuul.d/adoption.yaml @@ -11,7 +11,6 @@ roles: - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/multinode-customizations.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml @@ -209,7 +208,6 @@ roles: &multinode-roles - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: &multinode-prerun - - ci/playbooks/read_global_vars.yml - ci/playbooks/multinode-customizations.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 5832869ca8..a41ad6c290 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -13,7 +13,6 @@ and prepare the environment for running ci-framework playbooks. Once the job finishes, it will collect necessary logs. pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml post-run: @@ -140,7 +139,6 @@ roles: &multinode_edpm_roles - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: &multinode_edpm_pre_run - - ci/playbooks/read_global_vars.yml - ci/playbooks/multinode-customizations.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml @@ -283,7 +281,6 @@ roles: - zuul: github.com/openstack-k8s-operators/ci-framework pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml post-run: @@ -308,7 +305,6 @@ CRC environment and before running ci-boostrap roles to configure networking between nodes. pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml - ci/playbooks/bootstrap-networking-mapper.yml diff --git a/zuul.d/edpm_build_images.yaml b/zuul.d/edpm_build_images.yaml index 71caac4311..e790dad22e 100644 --- a/zuul.d/edpm_build_images.yaml +++ b/zuul.d/edpm_build_images.yaml @@ -8,7 +8,6 @@ required-projects: - github.com/openstack-k8s-operators/edpm-image-builder pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/molecule-prepare.yml run: - ci/playbooks/dump_zuul_data.yml diff --git a/zuul.d/edpm_build_images_content_provider.yaml b/zuul.d/edpm_build_images_content_provider.yaml index 9c28efd63f..da36923a60 100644 --- a/zuul.d/edpm_build_images_content_provider.yaml +++ b/zuul.d/edpm_build_images_content_provider.yaml @@ -9,7 +9,6 @@ - github.com/openstack-k8s-operators/edpm-image-builder - github.com/openstack-k8s-operators/ci-framework pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/content_provider/pre.yml run: - ci/playbooks/e2e-prepare.yml diff --git a/zuul.d/end-to-end.yaml b/zuul.d/end-to-end.yaml index eb0ac9dd8a..5c0f1babdf 100644 --- a/zuul.d/end-to-end.yaml +++ b/zuul.d/end-to-end.yaml @@ -9,7 +9,6 @@ zuul_log_collection: true registry_login_enabled: false pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml post-run: diff --git a/zuul.d/kuttl.yaml b/zuul.d/kuttl.yaml index 6ec48dd4ca..883e53cd3c 100644 --- a/zuul.d/kuttl.yaml +++ b/zuul.d/kuttl.yaml @@ -9,7 +9,6 @@ zuul_log_collection: true parent: base-simple-crc pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml run: - ci/playbooks/dump_zuul_data.yml diff --git a/zuul.d/kuttl_multinode.yaml b/zuul.d/kuttl_multinode.yaml index bd15970b38..f0b38929b4 100644 --- a/zuul.d/kuttl_multinode.yaml +++ b/zuul.d/kuttl_multinode.yaml @@ -41,8 +41,6 @@ ip: 172.18.0.5 tenant: ip: 172.19.0.5 - pre-run: - - ci/playbooks/read_global_vars.yml run: - ci/playbooks/dump_zuul_data.yml - ci/playbooks/kuttl/run.yml diff --git a/zuul.d/molecule-base.yaml b/zuul.d/molecule-base.yaml index 58a620efee..01c41e6aa2 100644 --- a/zuul.d/molecule-base.yaml +++ b/zuul.d/molecule-base.yaml @@ -7,7 +7,6 @@ provides: - cifmw-molecule pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/dump_zuul_data.yml - ci/playbooks/molecule-prepare.yml run: ci/playbooks/molecule-test.yml @@ -26,7 +25,6 @@ provides: - cifmw-molecule pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/dump_zuul_data.yml - ci/playbooks/molecule-prepare.yml run: ci/playbooks/molecule-test.yml diff --git a/zuul.d/pods.yaml b/zuul.d/pods.yaml index 5c22d47279..3292d68a5b 100644 --- a/zuul.d/pods.yaml +++ b/zuul.d/pods.yaml @@ -9,8 +9,6 @@ Run lightweight jobs in pods required-projects: - openstack-k8s-operators/ci-framework - pre-run: - - ci/playbooks/read_global_vars.yml run: ci/playbooks/pod-jobs.yml - job: @@ -65,7 +63,6 @@ parent: build-push-container-base nodeset: centos-stream-9 pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/molecule-prepare.yml - ci/playbooks/dump_zuul_data.yml run: ci/playbooks/build_push_container_runner.yml diff --git a/zuul.d/tcib.yaml b/zuul.d/tcib.yaml index 642b8fd106..f90fc3d9a3 100644 --- a/zuul.d/tcib.yaml +++ b/zuul.d/tcib.yaml @@ -11,7 +11,6 @@ - github.com/openstack-k8s-operators/tcib - github.com/openstack-k8s-operators/install_yamls pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/content_provider/pre.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml diff --git a/zuul.d/test-job.yaml b/zuul.d/test-job.yaml index 30c0d35a20..9e7e1cd15c 100644 --- a/zuul.d/test-job.yaml +++ b/zuul.d/test-job.yaml @@ -5,7 +5,6 @@ nodeset: centos-stream-9 abstract: true pre-run: - - ci/playbooks/read_global_vars.yml - ci/playbooks/e2e-prepare.yml - ci/playbooks/dump_zuul_data.yml run: From 2e21312fb48ede26780a7163e09beae9a6b3ed1e Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Mon, 29 Sep 2025 11:59:27 +0530 Subject: [PATCH 380/480] Fix zuul check jobs This [1] commit broke our zuul check jobs. This commit fix the issue. We need to work on a better solution so that it does not happen again. It has occured multiple times. Co-Authored-By: Daniel Pawlik [1]: https://github.com/openstack-k8s-operators/ci-framework/pull/3311/files --- .../molecule/default/converge.yml | 1 + zuul.d/molecule.yaml | 20 ++++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/roles/adoption_osp_deploy/molecule/default/converge.yml b/roles/adoption_osp_deploy/molecule/default/converge.yml index 9e88ebe7d3..7e33851b40 100644 --- a/roles/adoption_osp_deploy/molecule/default/converge.yml +++ b/roles/adoption_osp_deploy/molecule/default/converge.yml @@ -12,6 +12,7 @@ cifmw_adoption_osp_deploy_repos: [] cifmw_adoption_source_scenario_path: "." cifmw_basedir: "{{ playbook_dir }}" + ansible_user_dir: "{{ lookup('env', 'HOME') }}" tasks: - name: Gather stack nodes and facts diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 2ef8d22291..ecbdfd87f7 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -1,3 +1,14 @@ +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/adoption_osp_deploy/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-adoption_osp_deploy + parent: cifmw-molecule-base + vars: + TEST_RUN: adoption_osp_deploy - job: files: - ^common-requirements.txt @@ -850,15 +861,6 @@ parent: cifmw-molecule-base vars: TEST_RUN: virtualbmc -- job: - files: - - ^common-requirements.txt - - ^test-requirements.txt - - ^roles/adoption_osp_deploy/.* - - ^ci/playbooks/molecule.* - - ^.config/molecule/.* - name: cifmw-molecule-adoption_osp_deploy - parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt From 940e7b18d9c398660c31a3c9ba3e4d9a8c61567b Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 30 Sep 2025 13:00:44 +0530 Subject: [PATCH 381/480] Improve stability of ci_multus molecule test ci_multus job is quite flaky. Resources from openshift are not ready when we need to create our resource dependending on them. Retries will ensure the annotation is made available. --- roles/ci_multus/molecule/default/verify_crc.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/ci_multus/molecule/default/verify_crc.yml b/roles/ci_multus/molecule/default/verify_crc.yml index fec2a7825b..5f770980f7 100644 --- a/roles/ci_multus/molecule/default/verify_crc.yml +++ b/roles/ci_multus/molecule/default/verify_crc.yml @@ -84,6 +84,9 @@ type: Ready status: "True" register: _ci_multus_molecule_test_pod_out + until: _ci_multus_molecule_test_pod_out is not failed + retries: 30 + delay: 10 - name: Assert that test pod has the additional network ansible.builtin.assert: From 9ee0843f377b3c0c7f040be12736f0c2d2f0aeaa Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Thu, 25 Sep 2025 08:39:55 -0400 Subject: [PATCH 382/480] Remove deprecated cifmw_tempest_tempestconf_config This parameter has been deprecated for some time, so we should use the new variant cifmw_test_operator_tempest_tempestconf_config. All mentions of this parameter have been removed both from downstream and upstream, so this patch is the last step to removing the parameter. --- roles/test_operator/README.md | 1 - roles/test_operator/defaults/main.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index e798998b44..fed1cc58c5 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -92,7 +92,6 @@ cifmw_test_operator_stages: * `cifmw_test_operator_tempest_rerun_override_status`: (Bool) Allow override of exit status with the tempest re-run feature. When activated, the original return value of the tempest run will be overridden with a result of the tempest run on the set of failed tests. Default value: `false` * `cifmw_test_operator_tempest_timing_data_url`: (String) An URL pointing to an archive that contains the saved timing data. This data is used to optimize the test order and reduce Tempest execution time. Default value: `''` * `cifmw_test_operator_tempest_resources`: (Dict) A dictionary that specifies resources (cpu, memory) for the test pods. When untouched it clears the default values set on the test-operator side. This means that the tempest test pods run with unspecified resource limits. Default value: `{requests: {}, limits: {}}` -* `cifmw_tempest_tempestconf_config`: Deprecated, please use `cifmw_test_operator_tempest_tempestconf_config` instead * `cifmw_test_operator_tempest_tempestconf_config`: (Dict) This parameter can be used to customize the execution of the `discover-tempest-config` run. Please consult the test-operator documentation. For example, to pass a custom configuration for `tempest.conf`, use the `overrides` section: ``` cifmw_test_operator_tempest_tempestconf_config: diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index 31f3d6cab5..2b664340ad 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -78,7 +78,6 @@ cifmw_test_operator_tempest_workflow: [] cifmw_test_operator_tempest_cleanup: false cifmw_test_operator_tempest_rerun_failed_tests: false cifmw_test_operator_tempest_rerun_override_status: false -cifmw_test_operator_tempest_tempestconf_config: "{{ cifmw_tempest_tempestconf_config }}" # TODO: The default value of this parameter should be changed to {} once this fix # for tempest reaches the upstream build of the openstack-tempest-all image: From bcdd70ea18369eb37f79e07b810c125f8142050e Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 30 Sep 2025 17:01:56 +0530 Subject: [PATCH 383/480] Increase the scope of cifmw-pods-zuul-files job Just checking the changes to molecule files solve only half of our problem. If a user add a new role, even then molecule job definition for that role is not added, and this job starts to fail in future. Though running this job on any change made in roles dir is quite wider scope. But seems like it is not possible to just run the job when new role is added. --- zuul.d/pods.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zuul.d/pods.yaml b/zuul.d/pods.yaml index 3292d68a5b..9c61e3cf27 100644 --- a/zuul.d/pods.yaml +++ b/zuul.d/pods.yaml @@ -32,7 +32,7 @@ - ^zuul.d/.* - ^ci/templates/.* - ^ci/config/.* - - ^roles/.*/molecule/.* + - ^roles/.* - job: name: cifmw-pod-k8s-snippets-source From 6969e1d25ecc9906510dc4f2d0187d5d6533e654 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Mon, 29 Sep 2025 11:08:38 +0200 Subject: [PATCH 384/480] fix: use ansible_user_id instead of ansible_user in libvirt_manager_user Replace ansible_user with ansible_user_id in cifmw_libvirt_manager_user variable to fix SSH key ownership issues when running with ansible_connection=local. When deploying locally (not through testproject automation), ansible_user can become undefined with ansible_connection=local, causing the fallback lookup('env', 'USER') to return 'root' instead of the intended user. This resulted in SSH keys being created with root:root ownership, leading to permission denied errors when the user tried to connect to compute nodes. The switch to ansible_user_id correctly detects the user even with local connections and resolves the file ownership problem. --- roles/libvirt_manager/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/libvirt_manager/defaults/main.yml b/roles/libvirt_manager/defaults/main.yml index 9dfabddbf3..144a964937 100644 --- a/roles/libvirt_manager/defaults/main.yml +++ b/roles/libvirt_manager/defaults/main.yml @@ -20,7 +20,7 @@ cifmw_libvirt_manager_basedir: "{{ cifmw_basedir | default( ansible_user_dir ~ '/ci-framework-data') }}" cifmw_libvirt_manager_enable_virtualization_module: false -cifmw_libvirt_manager_user: "{{ ansible_user | default(lookup('env', 'USER')) }}" +cifmw_libvirt_manager_user: "{{ ansible_user_id | default(lookup('env', 'USER')) }}" cifmw_libvirt_manager_images_url: https://cloud.centos.org/centos/9-stream/x86_64/images cifmw_libvirt_manager_vm_template: "domain.xml.j2" From 35b1d611ef99a5c1e3edd09dbbcc36fa96f1e463 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 30 Sep 2025 14:56:16 +0530 Subject: [PATCH 385/480] Increase timeout for cifmw-molecule-openshift_obs job cifmw-molecule-openshift_obs job is failing sometimes due to timeout. Trying to increase tiemout to fix this issue. --- ci/config/molecule.yaml | 3 +++ zuul.d/molecule.yaml | 1 + 2 files changed, 4 insertions(+) diff --git a/ci/config/molecule.yaml b/ci/config/molecule.yaml index 468705e5c0..a437292f9f 100644 --- a/ci/config/molecule.yaml +++ b/ci/config/molecule.yaml @@ -1,4 +1,7 @@ --- +- job: + name: cifmw-molecule-openshift_obs + timeout: 3600 - job: name: cifmw-molecule-libvirt_manager files: diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index ecbdfd87f7..085b8d0d10 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -556,6 +556,7 @@ name: cifmw-molecule-openshift_obs nodeset: centos-9-crc-2-48-0-xxl-ibm parent: cifmw-molecule-base + timeout: 3600 vars: TEST_RUN: openshift_obs - job: From 8e0402cf562fa5426f3816fa2bbc60d3a737c940 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Tue, 30 Sep 2025 11:27:56 +0200 Subject: [PATCH 386/480] Hook to cleanup baremetal network agents In the uni04delta-ipv6 jobs Tobiko tests are validating all network agents are UP/Alive. When ironic tempest runs, there are network agents created for fake Ironic nodes. We need to clean up by deleting the baremetal agents after tempest execution. This change add's a hook running a shell script that will delete all network agents of type `baremetal`. Jira: OSPRH-20084 --- .../ironic_network_agent_cleanup.yml | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 hooks/playbooks/ironic_network_agent_cleanup.yml diff --git a/hooks/playbooks/ironic_network_agent_cleanup.yml b/hooks/playbooks/ironic_network_agent_cleanup.yml new file mode 100644 index 0000000000..1470b86791 --- /dev/null +++ b/hooks/playbooks/ironic_network_agent_cleanup.yml @@ -0,0 +1,36 @@ +--- +- name: Delete neutron network agents for Baremetal Nodes + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: false + vars: + _namespace: openstack + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + tasks: + - name: Delete baremetal network agents + ansible.builtin.shell: | + set -xe -o pipefail + oc project {{ _namespace }} + + echo "Discovering baremetal network agents..." + + # Get all baremetal network agent IDs using JSON format + AGENT_IDS=$(oc rsh openstackclient \ + openstack network agent list --agent-type baremetal -f json -c ID | \ + jq -r '.[].ID') + + if [ -n "$AGENT_IDS" ]; then + echo "Found baremetal network agents:" + echo "$AGENT_IDS" + + # Delete each baremetal agent + for AGENT_ID in $AGENT_IDS; do + echo "Deleting baremetal network agent: $AGENT_ID" + oc rsh openstackclient openstack network agent delete "$AGENT_ID" + done + + echo "Baremetal network agent cleanup completed" + else + echo "No baremetal network agents found" + fi From 7c20debbdd5b16f67cf5f2fba08a3df6acc54d27 Mon Sep 17 00:00:00 2001 From: Eduardo Olivares Date: Thu, 25 Sep 2025 17:14:45 +0200 Subject: [PATCH 387/480] [ci_gen_kustomize_values][BGP] Remove NNCP for master nodes In BGP DTs, NNCP only makes sense for OCP workers because no RHOSO service is deployed on OCP masters. --- .../bgp-l3-xl/network-values/values.yaml.j2 | 3 + .../bgp_dt01/network-values/values.yaml.j2 | 3 + .../network-values/values.yaml.j2 | 3 + scenarios/reproducers/bgp-l3-xl.yml | 78 ++++--------------- 4 files changed, 22 insertions(+), 65 deletions(-) diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 index abd5bce805..8cfc2a3cc8 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 @@ -4,9 +4,11 @@ ocp_index=0, lb_tools={}) %} data: +{% set node_groups = groups if 'rhoso-architecture-validate' not in ((zuul | default({})).job | default('')) else test_groups %} {% for host in cifmw_networking_env_definition.instances.keys() -%} {% set hostname = cifmw_networking_env_definition.instances[host]['hostname'] %} {% if host is match('^(ocp|crc).*') %} +{% if 'ocp_workers' not in node_groups or host in node_groups.ocp_workers %} node_{{ ns.ocp_index }}: name: {{ hostname }} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} @@ -41,6 +43,7 @@ data: {% if node_bgp_orig_content.routes | default(false) %} routes: {{ node_bgp_orig_content.routes }} {% endif %} +{% endif %} {% set ns.ocp_index = ns.ocp_index+1 %} {% endif %} {% endfor %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 index 504d282316..030e8d10f8 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 @@ -4,9 +4,11 @@ ocp_index=0, lb_tools={}) %} data: +{% set node_groups = groups if 'rhoso-architecture-validate' not in ((zuul | default({})).job | default('')) else test_groups %} {% for host in cifmw_networking_env_definition.instances.keys() -%} {% set hostname = cifmw_networking_env_definition.instances[host]['hostname'] %} {% if host is match('^(ocp|crc).*') %} +{% if 'ocp_workers' not in node_groups or host in node_groups.ocp_workers %} node_{{ ns.ocp_index }}: name: {{ hostname }} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} @@ -47,6 +49,7 @@ data: {% if node_bgp_orig_content.routes | default(false) %} routes: {{ node_bgp_orig_content.routes }} {% endif %} +{% endif %} {% set ns.ocp_index = ns.ocp_index+1 %} {% endif %} {% endfor %} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/network-values/values.yaml.j2 index f9622a1ce8..5fa5b1dfc1 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt04_ipv6/network-values/values.yaml.j2 @@ -3,12 +3,14 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} +{% set node_groups = groups if 'rhoso-architecture-validate' not in ((zuul | default({})).job | default('')) else test_groups %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} {# FIXEME: (hjensas/eolivare): We need to ensure the OCP cluster_name and base_domain is available here #} {# Because devscripts use fqdn for node names when ipv6 #} {% set hostname = cifmw_networking_env_definition.instances[host]['hostname'] %} {% if host is match('^(ocp|crc).*') %} +{% if 'ocp_workers' not in node_groups or host in node_groups.ocp_workers %} node_{{ ns.ocp_index }}: name: {{ hostname }}.ocp.openstack.lab {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} @@ -43,6 +45,7 @@ data: {% if node_bgp_orig_content.routes | default(false) %} routes: {{ node_bgp_orig_content.routes }} {% endif %} +{% endif %} {% set ns.ocp_index = ns.ocp_index+1 %} {% endif %} {% endfor %} diff --git a/scenarios/reproducers/bgp-l3-xl.yml b/scenarios/reproducers/bgp-l3-xl.yml index 044bb90a53..16c108dd5d 100644 --- a/scenarios/reproducers/bgp-l3-xl.yml +++ b/scenarios/reproducers/bgp-l3-xl.yml @@ -50,42 +50,36 @@ cifmw_libvirt_manager_network_interface_types: l00-ocp0: network l00-ocp1: network l00-ocp2: network - l00-ocp3: network l01-node0: network l01-node1: network l01-node2: network l01-ocp0: network l01-ocp1: network l01-ocp2: network - l01-ocp3: network l10-node0: network l10-node1: network l10-node2: network l10-ocp0: network l10-ocp1: network l10-ocp2: network - l10-ocp3: network l11-node0: network l11-node1: network l11-node2: network l11-ocp0: network l11-ocp1: network l11-ocp2: network - l11-ocp3: network l20-node0: network l20-node1: network l20-node2: network l20-ocp0: network l20-ocp1: network l20-ocp2: network - l20-ocp3: network l21-node0: network l21-node1: network l21-node2: network l21-ocp0: network l21-ocp1: network l21-ocp2: network - l21-ocp3: network cifmw_libvirt_manager_configuration: networks: @@ -214,11 +208,6 @@ cifmw_libvirt_manager_configuration: l00-ocp2 - l00-ocp3: | - - l00-ocp3 - - l01-node0: | l01-node0 @@ -249,11 +238,6 @@ cifmw_libvirt_manager_configuration: l01-ocp2 - l01-ocp3: | - - l01-ocp3 - - ## rack1 l10-node0: | @@ -285,11 +269,6 @@ cifmw_libvirt_manager_configuration: l10-ocp2 - l10-ocp3: | - - l10-ocp3 - - l11-node0: | l11-node0 @@ -320,11 +299,6 @@ cifmw_libvirt_manager_configuration: l11-ocp2 - l11-ocp3: | - - l11-ocp3 - - ## rack2 l20-node0: | @@ -356,11 +330,6 @@ cifmw_libvirt_manager_configuration: l20-ocp2 - l20-ocp3: | - - l20-ocp3 - - l21-node0: | l21-node0 @@ -391,11 +360,6 @@ cifmw_libvirt_manager_configuration: l21-ocp2 - l21-ocp3: | - - l21-ocp3 - - ocpbm: | ocpbm @@ -614,16 +578,6 @@ cifmw_libvirt_manager_configuration: - "ocppr" - "ocpbm" - "osp_trunk" - spineleafnets: - - # rack0 - ocp master 0 - - "l00-ocp0" - - "l01-ocp0" - - # rack1 - ocp master 1 - - "l10-ocp0" - - "l11-ocp0" - - # rack2 - ocp master 2 - - "l20-ocp0" - - "l21-ocp0" ocp_worker: amount: 10 uefi: true @@ -642,32 +596,32 @@ cifmw_libvirt_manager_configuration: - "osp_trunk" spineleafnets: - # rack0 - ocp worker 0 + - "l00-ocp0" + - "l01-ocp0" + - # rack0 - ocp worker 1 - "l00-ocp1" - "l01-ocp1" - - # rack0 - ocp worker 1 + - # rack0 - ocp worker 2 - "l00-ocp2" - "l01-ocp2" - - # rack0 - ocp worker 2 - - "l00-ocp3" - - "l01-ocp3" - # rack1 - ocp worker 3 + - "l10-ocp0" + - "l11-ocp0" + - # rack1 - ocp worker 4 - "l10-ocp1" - "l11-ocp1" - - # rack1 - ocp worker 4 + - # rack1 - ocp worker 5 - "l10-ocp2" - "l11-ocp2" - - # rack1 - ocp worker 5 - - "l10-ocp3" - - "l11-ocp3" - # rack2 - ocp worker 6 + - "l20-ocp0" + - "l21-ocp0" + - # rack2 - ocp worker 7 - "l20-ocp1" - "l21-ocp1" - - # rack2 - ocp worker 7 + - # rack2 - ocp worker 8 - "l20-ocp2" - "l21-ocp2" - - # rack2 - ocp worker 8 - - "l20-ocp3" - - "l21-ocp3" - # router - ocp_tester (worker 9) - "rtr-ocp" router: @@ -750,7 +704,6 @@ cifmw_libvirt_manager_configuration: - "l00-ocp0" - "l00-ocp1" - "l00-ocp2" - - "l00-ocp3" - # rack0 - leaf01 - "l01-s0" - "l01-s1" @@ -760,7 +713,6 @@ cifmw_libvirt_manager_configuration: - "l01-ocp0" - "l01-ocp1" - "l01-ocp2" - - "l01-ocp3" - # rack1 - leaf10 - "l10-s0" - "l10-s1" @@ -770,7 +722,6 @@ cifmw_libvirt_manager_configuration: - "l10-ocp0" - "l10-ocp1" - "l10-ocp2" - - "l10-ocp3" - # rack1 - leaf11 - "l11-s0" - "l11-s1" @@ -780,7 +731,6 @@ cifmw_libvirt_manager_configuration: - "l11-ocp0" - "l11-ocp1" - "l11-ocp2" - - "l11-ocp3" - # rack2 - leaf20 - "l20-s0" - "l20-s1" @@ -790,7 +740,6 @@ cifmw_libvirt_manager_configuration: - "l20-ocp0" - "l20-ocp1" - "l20-ocp2" - - "l20-ocp3" - # rack2 - leaf21 - "l21-s0" - "l21-s1" @@ -800,7 +749,6 @@ cifmw_libvirt_manager_configuration: - "l21-ocp0" - "l21-ocp1" - "l21-ocp2" - - "l21-ocp3" ## devscript support for OCP deploy cifmw_devscripts_config_overrides: @@ -848,7 +796,7 @@ pre_deploy: num_racks: "{{ num_racks }}" router_bool: true edpm_nodes_per_rack: 3 - ocp_nodes_per_rack: 4 + ocp_nodes_per_rack: 3 router_uplink_ip: 100.64.10.1 # post_deploy: From 21d73e76323db798d41af8aa172469a08890f71c Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Tue, 30 Sep 2025 15:51:07 +0200 Subject: [PATCH 388/480] Fix output of must-gather log Right now because of &> the output of ci_script task in collected logs is empty. After the change we will have the output along other calls of ci_script instead of relying on the dedicated file. --- roles/os_must_gather/tasks/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/os_must_gather/tasks/main.yml b/roles/os_must_gather/tasks/main.yml index a7dfd5886d..e780263da3 100644 --- a/roles/os_must_gather/tasks/main.yml +++ b/roles/os_must_gather/tasks/main.yml @@ -68,7 +68,8 @@ OPENSTACK_DATABASES=$OPENSTACK_DATABASES SOS_EDPM=$SOS_EDPM SOS_DECOMPRESS=$SOS_DECOMPRESS - gather &> {{ cifmw_os_must_gather_output_dir }}/logs/openstack-k8s-operators-openstack-must-gather/os_must_gather.log + gather + 2>&1 rescue: - name: Openstack-must-gather failure From 1bdafa727d67041c2def184ab30f68e0879b0104 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Thu, 25 Sep 2025 13:29:38 +0530 Subject: [PATCH 389/480] Ensure molecule finds group_vars Molecule can also use group_vars. For that, we have to add the path to group_vars dir in molecule config. This commit adds the path. It should be noted that relative path from config file wont work. Since molecule will expand the relative path during the molecule test run, the path to group_vars dir should be relative with molecule jobs from where they are run. More details will be in the PR --- .config/molecule/config.yml | 3 ++ .config/molecule/config_local.yml | 3 ++ .config/molecule/config_podman.yml | 2 + .../molecule/cifmw_snr_nhc_molecule.yml | 4 ++ group_vars/molecule/devscript_molecule.yml | 2 + group_vars/molecule/rhol_crc_molecule.yml | 8 +++ .../molecule/default/host_vars/instance.yml | 33 +++++++++++++ roles/ci_multus/molecule/default/molecule.yml | 37 +------------- .../molecule/local/host_vars/instance.yml | 14 ++++++ roles/ci_multus/molecule/local/molecule.yml | 18 +------ .../local_ipv6/host_vars/instance.yml | 2 + .../molecule/local_ipv6/molecule.yml | 6 +-- .../molecule/default/molecule.yml | 10 ++-- .../check_cluster_status/molecule.yml | 10 ++-- .../devscripts/molecule/default/molecule.yml | 11 +++-- .../molecule/check_dns/host_vars/instance.yml | 4 ++ .../molecule/check_dns/molecule.yml | 8 +-- .../deploy_layout/host_vars/instance.yml | 4 ++ .../molecule/deploy_layout/molecule.yml | 8 +-- .../host_vars/instance.yml | 4 ++ .../generate_network_data/molecule.yml | 8 +-- .../ocp_layout/host_vars/instance.yml | 4 ++ .../molecule/ocp_layout/molecule.yml | 8 +-- .../molecule/default/host_vars/instance.yml | 4 ++ .../molecule/default/molecule.yml | 8 +-- .../molecule/add_crc_creds/molecule.yml | 17 +++---- roles/rhol_crc/molecule/binary/molecule.yml | 10 ++-- roles/rhol_crc/molecule/default/molecule.yml | 10 ++-- roles/rhol_crc/molecule/find_crc/molecule.yml | 10 ++-- .../molecule/get_versions/molecule.yml | 10 ++-- .../molecule/default/host_vars/instance.yml | 44 +++++++++++++++++ roles/run_hook/molecule/default/molecule.yml | 49 +------------------ 32 files changed, 197 insertions(+), 176 deletions(-) create mode 100644 group_vars/molecule/cifmw_snr_nhc_molecule.yml create mode 100644 group_vars/molecule/devscript_molecule.yml create mode 100644 group_vars/molecule/rhol_crc_molecule.yml create mode 100644 roles/ci_multus/molecule/default/host_vars/instance.yml create mode 100644 roles/ci_multus/molecule/local/host_vars/instance.yml create mode 100644 roles/ci_multus/molecule/local_ipv6/host_vars/instance.yml create mode 100644 roles/libvirt_manager/molecule/check_dns/host_vars/instance.yml create mode 100644 roles/libvirt_manager/molecule/deploy_layout/host_vars/instance.yml create mode 100644 roles/libvirt_manager/molecule/generate_network_data/host_vars/instance.yml create mode 100644 roles/libvirt_manager/molecule/ocp_layout/host_vars/instance.yml create mode 100644 roles/networking_mapper/molecule/default/host_vars/instance.yml create mode 100644 roles/run_hook/molecule/default/host_vars/instance.yml diff --git a/.config/molecule/config.yml b/.config/molecule/config.yml index 3ade7fc208..eed745f188 100644 --- a/.config/molecule/config.yml +++ b/.config/molecule/config.yml @@ -26,6 +26,9 @@ platforms: provisioner: name: ansible + inventory: + links: + group_vars: ../../../../group_vars/ config_options: defaults: fact_caching: jsonfile diff --git a/.config/molecule/config_local.yml b/.config/molecule/config_local.yml index c931941ba4..d9fb17668d 100644 --- a/.config/molecule/config_local.yml +++ b/.config/molecule/config_local.yml @@ -25,6 +25,9 @@ provisioner: # all: # cifmw_discover_latest_image_qcow_prefix: "CentOS-Stream-GenericCloud-9-20240506" + inventory: + links: + group_vars: ../../../../group_vars/ config_options: defaults: fact_caching: jsonfile diff --git a/.config/molecule/config_podman.yml b/.config/molecule/config_podman.yml index 1a542f146b..8fa36f559b 100644 --- a/.config/molecule/config_podman.yml +++ b/.config/molecule/config_podman.yml @@ -25,6 +25,8 @@ provisioner: hosts: instance: ansible_python_interpreter: /usr/bin/python3 + links: + group_vars: ../../../../group_vars/ name: ansible log: true env: diff --git a/group_vars/molecule/cifmw_snr_nhc_molecule.yml b/group_vars/molecule/cifmw_snr_nhc_molecule.yml new file mode 100644 index 0000000000..9657cd318d --- /dev/null +++ b/group_vars/molecule/cifmw_snr_nhc_molecule.yml @@ -0,0 +1,4 @@ +cifmw_snr_nhc_kubeconfig: "/tmp/kubeconfig" +cifmw_snr_nhc_kubeadmin_password_file: "/tmp/kubeadmin-password" +cifmw_snr_nhc_namespace: "test-workload-availability" +ansible_python_interpreter: /usr/bin/python3 diff --git a/group_vars/molecule/devscript_molecule.yml b/group_vars/molecule/devscript_molecule.yml new file mode 100644 index 0000000000..953e7a6b9b --- /dev/null +++ b/group_vars/molecule/devscript_molecule.yml @@ -0,0 +1,2 @@ +cifmw_devscripts_config_overrides_patch_01_override_br_management: + external_bootstrap_mac: '52:54:ab:83:31:87' diff --git a/group_vars/molecule/rhol_crc_molecule.yml b/group_vars/molecule/rhol_crc_molecule.yml new file mode 100644 index 0000000000..1635ea8fe4 --- /dev/null +++ b/group_vars/molecule/rhol_crc_molecule.yml @@ -0,0 +1,8 @@ +cifmw_rhol_crc_binary_folder: "/usr/local/bin" +# If you want to run this job on your own node, +# and if you don't have CRC pre-provisioned, you can +# uncomment and tweak the following content +# +# cifmw_manage_secrets_pullsecret_content: | +# your pull-secret +# setup_crc: true diff --git a/roles/ci_multus/molecule/default/host_vars/instance.yml b/roles/ci_multus/molecule/default/host_vars/instance.yml new file mode 100644 index 0000000000..fec49b4852 --- /dev/null +++ b/roles/ci_multus/molecule/default/host_vars/instance.yml @@ -0,0 +1,33 @@ +_expected_multus_networks: + - default + - patchnetwork + - bridge-to-linux-bridge +cifmw_ci_multus_net_info_patch_1: + patchnetwork: + gw_v4: 192.168.122.1 + network_name: patchnetwork + network_v4: 192.168.122.0/24 + interface_name: eth2 + tools: + multus: + ipv4_ranges: + - start: 192.168.122.30 + end: 192.168.122.70 + multus_type: macvlan +cifmw_ci_multus_net_info_patch_2: + bridge-to-linux-bridge: + gw_v4: 192.168.122.1 + network_name: bridge-to-linux-bridge + network_v4: 192.168.122.0/24 + interface_name: eth1 + tools: + multus: + ipv4_ranges: + - start: 192.168.122.30 + end: 192.168.122.70 + multus_type: bridge + multus_attach: linux-bridge + +cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" +cifmw_openshift_kubeconfig: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" +testpod_name: "pod-testnad" diff --git a/roles/ci_multus/molecule/default/molecule.yml b/roles/ci_multus/molecule/default/molecule.yml index 6f2dcb39b9..1122c16fdf 100644 --- a/roles/ci_multus/molecule/default/molecule.yml +++ b/roles/ci_multus/molecule/default/molecule.yml @@ -10,41 +10,8 @@ provisioner: playbooks: side_effect: side_effect.yml inventory: - host_vars: - instance: - _expected_multus_networks: - - default - - patchnetwork - - bridge-to-linux-bridge - cifmw_ci_multus_net_info_patch_1: - patchnetwork: - gw_v4: 192.168.122.1 - network_name: patchnetwork - network_v4: 192.168.122.0/24 - interface_name: eth2 - tools: - multus: - ipv4_ranges: - - start: 192.168.122.30 - end: 192.168.122.70 - multus_type: macvlan - cifmw_ci_multus_net_info_patch_2: - bridge-to-linux-bridge: - gw_v4: 192.168.122.1 - network_name: bridge-to-linux-bridge - network_v4: 192.168.122.0/24 - interface_name: eth1 - tools: - multus: - ipv4_ranges: - - start: 192.168.122.30 - end: 192.168.122.70 - multus_type: bridge - multus_attach: linux-bridge - - cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" - cifmw_openshift_kubeconfig: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" - testpod_name: "pod-testnad" + links: + host_vars: ./host_vars/ prerun: false scenario: test_sequence: diff --git a/roles/ci_multus/molecule/local/host_vars/instance.yml b/roles/ci_multus/molecule/local/host_vars/instance.yml new file mode 100644 index 0000000000..a75945670c --- /dev/null +++ b/roles/ci_multus/molecule/local/host_vars/instance.yml @@ -0,0 +1,14 @@ +_expected_multus_networks: + - default + - patchnetwork +cifmw_ci_multus_net_info_patch_1: + patchnetwork: + gw_v4: 192.168.122.1 + network_name: patchnetwork + network_v4: 192.168.122.0/24 + interface_name: eth2 + tools: + multus: + ipv4_ranges: + - start: 192.168.122.30 + end: 192.168.122.70 diff --git a/roles/ci_multus/molecule/local/molecule.yml b/roles/ci_multus/molecule/local/molecule.yml index 9ed0aa2a52..43db501d6f 100644 --- a/roles/ci_multus/molecule/local/molecule.yml +++ b/roles/ci_multus/molecule/local/molecule.yml @@ -10,22 +10,8 @@ provisioner: playbooks: side_effect: side_effect.yml inventory: - host_vars: - instance: - _expected_multus_networks: - - default - - patchnetwork - cifmw_ci_multus_net_info_patch_1: - patchnetwork: - gw_v4: 192.168.122.1 - network_name: patchnetwork - network_v4: 192.168.122.0/24 - interface_name: eth2 - tools: - multus: - ipv4_ranges: - - start: 192.168.122.30 - end: 192.168.122.70 + links: + host_vars: ./host_vars/ prerun: false scenario: diff --git a/roles/ci_multus/molecule/local_ipv6/host_vars/instance.yml b/roles/ci_multus/molecule/local_ipv6/host_vars/instance.yml new file mode 100644 index 0000000000..34484302ca --- /dev/null +++ b/roles/ci_multus/molecule/local_ipv6/host_vars/instance.yml @@ -0,0 +1,2 @@ +_expected_multus_networks: + - default diff --git a/roles/ci_multus/molecule/local_ipv6/molecule.yml b/roles/ci_multus/molecule/local_ipv6/molecule.yml index f38939e911..43db501d6f 100644 --- a/roles/ci_multus/molecule/local_ipv6/molecule.yml +++ b/roles/ci_multus/molecule/local_ipv6/molecule.yml @@ -10,10 +10,8 @@ provisioner: playbooks: side_effect: side_effect.yml inventory: - host_vars: - instance: - _expected_multus_networks: - - default + links: + host_vars: ./host_vars/ prerun: false scenario: diff --git a/roles/cifmw_snr_nhc/molecule/default/molecule.yml b/roles/cifmw_snr_nhc/molecule/default/molecule.yml index be4602e4b7..869049f651 100644 --- a/roles/cifmw_snr_nhc/molecule/default/molecule.yml +++ b/roles/cifmw_snr_nhc/molecule/default/molecule.yml @@ -10,6 +10,9 @@ driver: platforms: - name: instance + groups: + - molecule + - rhol_crc_molecule image: registry.access.redhat.com/ubi9/ubi:latest pre_build_image: true volumes: @@ -24,13 +27,6 @@ platforms: provisioner: name: ansible - inventory: - group_vars: - all: - cifmw_snr_nhc_kubeconfig: "/tmp/kubeconfig" - cifmw_snr_nhc_kubeadmin_password_file: "/tmp/kubeadmin-password" - cifmw_snr_nhc_namespace: "test-workload-availability" - ansible_python_interpreter: /usr/bin/python3 verifier: name: ansible diff --git a/roles/devscripts/molecule/check_cluster_status/molecule.yml b/roles/devscripts/molecule/check_cluster_status/molecule.yml index 98cff62401..7b86095cef 100644 --- a/roles/devscripts/molecule/check_cluster_status/molecule.yml +++ b/roles/devscripts/molecule/check_cluster_status/molecule.yml @@ -6,11 +6,11 @@ log: true +platforms: + - name: instance + groups: + - devscript_molecule + provisioner: name: ansible log: true - inventory: - group_vars: - all: - cifmw_devscripts_config_overrides_patch_01_override_br_management: - external_bootstrap_mac: '52:54:ab:83:31:87' diff --git a/roles/devscripts/molecule/default/molecule.yml b/roles/devscripts/molecule/default/molecule.yml index 98cff62401..360d8c1238 100644 --- a/roles/devscripts/molecule/default/molecule.yml +++ b/roles/devscripts/molecule/default/molecule.yml @@ -6,11 +6,12 @@ log: true +platforms: + - name: instance + groups: + - molecule + - devscript_molecule + provisioner: name: ansible log: true - inventory: - group_vars: - all: - cifmw_devscripts_config_overrides_patch_01_override_br_management: - external_bootstrap_mac: '52:54:ab:83:31:87' diff --git a/roles/libvirt_manager/molecule/check_dns/host_vars/instance.yml b/roles/libvirt_manager/molecule/check_dns/host_vars/instance.yml new file mode 100644 index 0000000000..7d1ed277e3 --- /dev/null +++ b/roles/libvirt_manager/molecule/check_dns/host_vars/instance.yml @@ -0,0 +1,4 @@ +cifmw_libvirt_manager_configuration_patch_01_more_computes: + vms: + compute: + amount: 2 diff --git a/roles/libvirt_manager/molecule/check_dns/molecule.yml b/roles/libvirt_manager/molecule/check_dns/molecule.yml index ab2823a877..659af155b1 100644 --- a/roles/libvirt_manager/molecule/check_dns/molecule.yml +++ b/roles/libvirt_manager/molecule/check_dns/molecule.yml @@ -5,9 +5,5 @@ provisioner: name: ansible log: true inventory: - host_vars: - instance: - cifmw_libvirt_manager_configuration_patch_01_more_computes: - vms: - compute: - amount: 2 + links: + host_vars: ./host_vars/ diff --git a/roles/libvirt_manager/molecule/deploy_layout/host_vars/instance.yml b/roles/libvirt_manager/molecule/deploy_layout/host_vars/instance.yml new file mode 100644 index 0000000000..7d1ed277e3 --- /dev/null +++ b/roles/libvirt_manager/molecule/deploy_layout/host_vars/instance.yml @@ -0,0 +1,4 @@ +cifmw_libvirt_manager_configuration_patch_01_more_computes: + vms: + compute: + amount: 2 diff --git a/roles/libvirt_manager/molecule/deploy_layout/molecule.yml b/roles/libvirt_manager/molecule/deploy_layout/molecule.yml index ab2823a877..659af155b1 100644 --- a/roles/libvirt_manager/molecule/deploy_layout/molecule.yml +++ b/roles/libvirt_manager/molecule/deploy_layout/molecule.yml @@ -5,9 +5,5 @@ provisioner: name: ansible log: true inventory: - host_vars: - instance: - cifmw_libvirt_manager_configuration_patch_01_more_computes: - vms: - compute: - amount: 2 + links: + host_vars: ./host_vars/ diff --git a/roles/libvirt_manager/molecule/generate_network_data/host_vars/instance.yml b/roles/libvirt_manager/molecule/generate_network_data/host_vars/instance.yml new file mode 100644 index 0000000000..7d1ed277e3 --- /dev/null +++ b/roles/libvirt_manager/molecule/generate_network_data/host_vars/instance.yml @@ -0,0 +1,4 @@ +cifmw_libvirt_manager_configuration_patch_01_more_computes: + vms: + compute: + amount: 2 diff --git a/roles/libvirt_manager/molecule/generate_network_data/molecule.yml b/roles/libvirt_manager/molecule/generate_network_data/molecule.yml index ab2823a877..659af155b1 100644 --- a/roles/libvirt_manager/molecule/generate_network_data/molecule.yml +++ b/roles/libvirt_manager/molecule/generate_network_data/molecule.yml @@ -5,9 +5,5 @@ provisioner: name: ansible log: true inventory: - host_vars: - instance: - cifmw_libvirt_manager_configuration_patch_01_more_computes: - vms: - compute: - amount: 2 + links: + host_vars: ./host_vars/ diff --git a/roles/libvirt_manager/molecule/ocp_layout/host_vars/instance.yml b/roles/libvirt_manager/molecule/ocp_layout/host_vars/instance.yml new file mode 100644 index 0000000000..7d1ed277e3 --- /dev/null +++ b/roles/libvirt_manager/molecule/ocp_layout/host_vars/instance.yml @@ -0,0 +1,4 @@ +cifmw_libvirt_manager_configuration_patch_01_more_computes: + vms: + compute: + amount: 2 diff --git a/roles/libvirt_manager/molecule/ocp_layout/molecule.yml b/roles/libvirt_manager/molecule/ocp_layout/molecule.yml index ab2823a877..659af155b1 100644 --- a/roles/libvirt_manager/molecule/ocp_layout/molecule.yml +++ b/roles/libvirt_manager/molecule/ocp_layout/molecule.yml @@ -5,9 +5,5 @@ provisioner: name: ansible log: true inventory: - host_vars: - instance: - cifmw_libvirt_manager_configuration_patch_01_more_computes: - vms: - compute: - amount: 2 + links: + host_vars: ./host_vars/ diff --git a/roles/networking_mapper/molecule/default/host_vars/instance.yml b/roles/networking_mapper/molecule/default/host_vars/instance.yml new file mode 100644 index 0000000000..96308f0b3b --- /dev/null +++ b/roles/networking_mapper/molecule/default/host_vars/instance.yml @@ -0,0 +1,4 @@ +cifmw_networking_mapper_definition_patch_01: + networks: + internalapi: + vlan: 100 diff --git a/roles/networking_mapper/molecule/default/molecule.yml b/roles/networking_mapper/molecule/default/molecule.yml index 3b7f18ef6f..040b9f6b17 100644 --- a/roles/networking_mapper/molecule/default/molecule.yml +++ b/roles/networking_mapper/molecule/default/molecule.yml @@ -10,9 +10,5 @@ provisioner: env: ANSIBLE_STDOUT_CALLBACK: yaml inventory: - host_vars: - instance: - cifmw_networking_mapper_definition_patch_01: - networks: - internalapi: - vlan: 100 + link: + host_vars: ./host_vars/ diff --git a/roles/rhol_crc/molecule/add_crc_creds/molecule.yml b/roles/rhol_crc/molecule/add_crc_creds/molecule.yml index fd7bbe0ce2..577444e6c4 100644 --- a/roles/rhol_crc/molecule/add_crc_creds/molecule.yml +++ b/roles/rhol_crc/molecule/add_crc_creds/molecule.yml @@ -1,20 +1,15 @@ --- log: true +platforms: + - name: instance + groups: + - molecule + - rhol_crc_molecule + provisioner: name: ansible log: true - inventory: - group_vars: - all: - cifmw_rhol_crc_binary_folder: "/usr/local/bin" - # If you want to run this job on your own node, - # and if you don't have CRC pre-provisioned, you can - # uncomment and tweak the following content - # - # cifmw_manage_secrets_pullsecret_content: | - # your pull-secret - # setup_crc: true # Enforce scenario steps to NOT # run "verify" as a standalone play diff --git a/roles/rhol_crc/molecule/binary/molecule.yml b/roles/rhol_crc/molecule/binary/molecule.yml index 1fbfd90872..577444e6c4 100644 --- a/roles/rhol_crc/molecule/binary/molecule.yml +++ b/roles/rhol_crc/molecule/binary/molecule.yml @@ -1,13 +1,15 @@ --- log: true +platforms: + - name: instance + groups: + - molecule + - rhol_crc_molecule + provisioner: name: ansible log: true - inventory: - group_vars: - all: - cifmw_rhol_crc_binary_folder: "/usr/local/bin" # Enforce scenario steps to NOT # run "verify" as a standalone play diff --git a/roles/rhol_crc/molecule/default/molecule.yml b/roles/rhol_crc/molecule/default/molecule.yml index 1fbfd90872..577444e6c4 100644 --- a/roles/rhol_crc/molecule/default/molecule.yml +++ b/roles/rhol_crc/molecule/default/molecule.yml @@ -1,13 +1,15 @@ --- log: true +platforms: + - name: instance + groups: + - molecule + - rhol_crc_molecule + provisioner: name: ansible log: true - inventory: - group_vars: - all: - cifmw_rhol_crc_binary_folder: "/usr/local/bin" # Enforce scenario steps to NOT # run "verify" as a standalone play diff --git a/roles/rhol_crc/molecule/find_crc/molecule.yml b/roles/rhol_crc/molecule/find_crc/molecule.yml index 1fbfd90872..577444e6c4 100644 --- a/roles/rhol_crc/molecule/find_crc/molecule.yml +++ b/roles/rhol_crc/molecule/find_crc/molecule.yml @@ -1,13 +1,15 @@ --- log: true +platforms: + - name: instance + groups: + - molecule + - rhol_crc_molecule + provisioner: name: ansible log: true - inventory: - group_vars: - all: - cifmw_rhol_crc_binary_folder: "/usr/local/bin" # Enforce scenario steps to NOT # run "verify" as a standalone play diff --git a/roles/rhol_crc/molecule/get_versions/molecule.yml b/roles/rhol_crc/molecule/get_versions/molecule.yml index 1fbfd90872..577444e6c4 100644 --- a/roles/rhol_crc/molecule/get_versions/molecule.yml +++ b/roles/rhol_crc/molecule/get_versions/molecule.yml @@ -1,13 +1,15 @@ --- log: true +platforms: + - name: instance + groups: + - molecule + - rhol_crc_molecule + provisioner: name: ansible log: true - inventory: - group_vars: - all: - cifmw_rhol_crc_binary_folder: "/usr/local/bin" # Enforce scenario steps to NOT # run "verify" as a standalone play diff --git a/roles/run_hook/molecule/default/host_vars/instance.yml b/roles/run_hook/molecule/default/host_vars/instance.yml new file mode 100644 index 0000000000..ec2b240e72 --- /dev/null +++ b/roles/run_hook/molecule/default/host_vars/instance.yml @@ -0,0 +1,44 @@ +_tmp: "/tmp" +# Fill only _list_hooks +list_hooks: + - name: Run dummy-2 + source: "{{ _tmp }}/dummy-2.yml" + type: playbook + extra_vars: + foo: bar + file: "/tmp/dummy-env.yml" + - name: Run dummy-3 + source: /tmp/dummy-3.yml + type: playbook + extra_vars: + foo: bar + file: "/tmp/dummy-env.yml" +# fill up _list_hooks and _filtered_hooks +# Also ensure ordering is properly taken +run_molecule: + - name: 01 Default noop hook + source: noop.yml + type: playbook + - name: 02 Re-run noop + source: noop.yml + type: playbook +run_molecule_03_single_hook: + source: "{{ _tmp }}/dummy-1.yml" + type: playbook + extra_vars: + foo: bar + file: "/tmp/dummy-env.yml" + +# Fill only _filtered_hooks +filtered_hooks_01_my_hook: + source: "{{ _tmp }}/dummy-4.yml" + type: playbook + extra_vars: + foo: bar + file: "/tmp/dummy-env.yml" + +retry_hook: + - name: Run hook with retry + source: "/tmp/dummy-retry.yml" + type: playbook + retry_hook: true diff --git a/roles/run_hook/molecule/default/molecule.yml b/roles/run_hook/molecule/default/molecule.yml index 186751b6a4..45f62a5eec 100644 --- a/roles/run_hook/molecule/default/molecule.yml +++ b/roles/run_hook/molecule/default/molecule.yml @@ -9,50 +9,5 @@ provisioner: name: ansible log: true inventory: - host_vars: - instance: - # Ensure vars are properly interpreted - _tmp: "/tmp" - # Fill only _list_hooks - list_hooks: - - name: Run dummy-2 - source: "{{ _tmp }}/dummy-2.yml" - type: playbook - extra_vars: - foo: bar - file: "/tmp/dummy-env.yml" - - name: Run dummy-3 - source: /tmp/dummy-3.yml - type: playbook - extra_vars: - foo: bar - file: "/tmp/dummy-env.yml" - # fill up _list_hooks and _filtered_hooks - # Also ensure ordering is properly taken - run_molecule: - - name: 01 Default noop hook - source: noop.yml - type: playbook - - name: 02 Re-run noop - source: noop.yml - type: playbook - run_molecule_03_single_hook: - source: "{{ _tmp }}/dummy-1.yml" - type: playbook - extra_vars: - foo: bar - file: "/tmp/dummy-env.yml" - - # Fill only _filtered_hooks - filtered_hooks_01_my_hook: - source: "{{ _tmp }}/dummy-4.yml" - type: playbook - extra_vars: - foo: bar - file: "/tmp/dummy-env.yml" - - retry_hook: - - name: Run hook with retry - source: "/tmp/dummy-retry.yml" - type: playbook - retry_hook: true + links: + host_vars: ./host_vars/ From c3fdc7c55f9ce6ab52552d03b04edfc9febcaf69 Mon Sep 17 00:00:00 2001 From: bshewale Date: Fri, 26 Sep 2025 17:45:19 +0530 Subject: [PATCH 390/480] feat(artifacts): Replace hardcoded zuul path with configurable variable Replace hardcoded /home/zuul/ paths with the configurable `ansible_user_dir` variable in the artifacts edpm.yml task file to remove hardcoded dependencies and align with other roles in the framework. --- roles/artifacts/tasks/edpm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/artifacts/tasks/edpm.yml b/roles/artifacts/tasks/edpm.yml index 2c05a49b80..08a15b550e 100644 --- a/roles/artifacts/tasks/edpm.yml +++ b/roles/artifacts/tasks/edpm.yml @@ -75,7 +75,7 @@ sudo test -d /var/lib/openstack && sudo cp -a /var/lib/openstack /tmp/{{ host_ip }} sudo test -d /var/lib/config-data && sudo cp -a /var/lib/config-data /tmp/{{ host_ip }} sudo test -d /var/lib/cloud && sudo cp -a /var/lib/cloud /tmp/{{ host_ip }} - sudo test -d /home/zuul/compliance-scans && sudo cp -a /home/zuul/compliance-scans /tmp/{{ host_ip }} + sudo test -d {{ ansible_user_dir }}/compliance-scans && sudo cp -a {{ ansible_user_dir }}/compliance-scans /tmp/{{ host_ip }} sudo find /tmp/{{ host_ip }} -type d -exec chmod ugoa+rx '{}' \; sudo find /tmp/{{ host_ip }} -type f -exec chmod ugoa+r '{}' \; command -v ovs-vsctl && sudo ovs-vsctl list Open_vSwitch > /tmp/{{ host_ip }}/ovs_vsctl_list_openvswitch.txt From 1609beb3f0c8ea10d5ad5cd2489a3b8710c6fa65 Mon Sep 17 00:00:00 2001 From: bshewale Date: Fri, 26 Sep 2025 16:13:45 +0530 Subject: [PATCH 391/480] feat(build_push_container): Replace hardcoded zuul path with configurable variable Replace hardcoded /home/zuul/ paths with the configurable ansible_user_dir variable in the molecule converge.yml task file to remove hardcoded dependencies and align with other roles in the framework. --- roles/build_push_container/molecule/default/converge.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/build_push_container/molecule/default/converge.yml b/roles/build_push_container/molecule/default/converge.yml index ad340ca155..5665507318 100644 --- a/roles/build_push_container/molecule/default/converge.yml +++ b/roles/build_push_container/molecule/default/converge.yml @@ -32,7 +32,7 @@ cifmw_build_push_container_patch_number: 123 cifmw_build_push_container_name: test_container_multi_arch cifmw_build_push_container_containerfile_path: >- - /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/build_push_container/molecule/default/files/containerfile + {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/roles/build_push_container/molecule/default/files/containerfile cifmw_build_push_container_registry_name: 127.0.0.1:5001/cifmw-client/test_container_multi_arch cifmw_build_push_container_registry_tls_verify: false cifmw_build_push_container_supported_platform: [linux/amd64, linux/arm64] @@ -78,7 +78,7 @@ cifmw_build_push_container_patch_number: 123 cifmw_build_push_container_name: test_container_single_arch cifmw_build_push_container_containerfile_path: >- - /home/zuul/src/github.com/openstack-k8s-operators/ci-framework/roles/build_push_container/molecule/default/files/containerfile + {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/roles/build_push_container/molecule/default/files/containerfile cifmw_build_push_container_registry_name: 127.0.0.1:5001/cifmw-client/test_container_single_arch cifmw_build_push_container_registry_tls_verify: false ansible.builtin.include_role: From 0e212d9a55efece890075c71fe25d1c09baf5d24 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 1 Oct 2025 15:45:55 +0530 Subject: [PATCH 392/480] fix(devscripts): add molecule group in roles/devscript/check_cluster_status/molecule.yml PR [1] ensured our molecule jobs can use group_vars. During this, adding molecule group to one file was missed. This commit adds that. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/3333 --- roles/devscripts/molecule/check_cluster_status/molecule.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/devscripts/molecule/check_cluster_status/molecule.yml b/roles/devscripts/molecule/check_cluster_status/molecule.yml index 7b86095cef..360d8c1238 100644 --- a/roles/devscripts/molecule/check_cluster_status/molecule.yml +++ b/roles/devscripts/molecule/check_cluster_status/molecule.yml @@ -9,6 +9,7 @@ log: true platforms: - name: instance groups: + - molecule - devscript_molecule provisioner: From 6819bd56c80132ae8519c1c8b481ea914e273f23 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 1 Oct 2025 17:03:03 +0530 Subject: [PATCH 393/480] refactor: Update molecule template Since we are now reading group_vars in molecule through links, it is important to add the required config for new molecule roles. This commit updates the template to include these changes. Signed-off-by: Amartya Sinha --- _skeleton_role_/molecule/default/molecule.yml | 11 ------- .../molecule/default/molecule.yml.j2 | 33 +++++++++++++++++++ 2 files changed, 33 insertions(+), 11 deletions(-) delete mode 100644 _skeleton_role_/molecule/default/molecule.yml create mode 100644 _skeleton_role_/molecule/default/molecule.yml.j2 diff --git a/_skeleton_role_/molecule/default/molecule.yml b/_skeleton_role_/molecule/default/molecule.yml deleted file mode 100644 index fda947cafe..0000000000 --- a/_skeleton_role_/molecule/default/molecule.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Mainly used to override the defaults set in .config/molecule/ -# By default, it uses the "config_podman.yml" - in CI, it will use -# "config_local.yml". -log: true - -provisioner: - name: ansible - log: true - env: - ANSIBLE_STDOUT_CALLBACK: yaml diff --git a/_skeleton_role_/molecule/default/molecule.yml.j2 b/_skeleton_role_/molecule/default/molecule.yml.j2 new file mode 100644 index 0000000000..52658eea35 --- /dev/null +++ b/_skeleton_role_/molecule/default/molecule.yml.j2 @@ -0,0 +1,33 @@ +--- +# Mainly used to override the defaults set in .config/molecule/ +# By default, it uses the "config_podman.yml" - in CI, it will use +# "config_local.yml". +# +# Do not add host_vars and group_vars within this config file. +# To add host_vars, uncomment the links: host_vars config and add +# host_vars file in roles/{{ role_name }}/molecule/host_vars/instance.yml. +# +# To add group_vars, uncomment platforms section so that +# the molecule test is added to required groups. After that, +# create group_vars/molecule/{{ role_name }}_molecule.yml file +# and add group_vars specific to this molecule test there. +# +# Reason is, you can either have links to group_vars/host_vars +# or add group_vars/host_vars directly. Ref [1] +# [1] https://ansible.readthedocs.io/projects/molecule/configuration/#provisioner-pre-ansible-native +log: true + +#platforms: +# - name: instance +# groups: +# - molecule +# - {{ role_name }}_molecule + +provisioner: + name: ansible + log: true + env: + ANSIBLE_STDOUT_CALLBACK: yaml +# inventory: +# links: +# host_vars: ./host_vars/ From 2a995b7c41482c85667efc1265e08ed356518960 Mon Sep 17 00:00:00 2001 From: Fiorella Yanac Date: Mon, 29 Sep 2025 14:27:56 +0200 Subject: [PATCH 394/480] Increase ram and disk in computes for hci env --- scenarios/adoption/hci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scenarios/adoption/hci.yml b/scenarios/adoption/hci.yml index 7c618f7a95..401cf6adcf 100644 --- a/scenarios/adoption/hci.yml +++ b/scenarios/adoption/hci.yml @@ -47,9 +47,9 @@ libvirt_manager_patch_layout: osp-compute: <<: *osp_base_conf amount: 3 - memory: 4 + memory: 8 cpus: 4 - disksize: 20 + disksize: 40 extra_disks_num: 3 extra_disks_size: 30G nets: From c0ca9fc6c9af013fef9286708cae6dd22c9781cc Mon Sep 17 00:00:00 2001 From: James Slagle Date: Thu, 28 Aug 2025 14:34:54 -0400 Subject: [PATCH 395/480] Fix permission denied when checking for cifmw_reproducer_src_dir The ansible stat module does gracefully handle permission denied, and will instead fail with an error of the patch can't be read due to permissions. Add an ignore_errors: true to the task checking for cifmw_reproducer_src_dir and then verify the stat result is defined before using it. This scenario can occur when ansible_user is not the same user as the one executing ansible-playbook, and since this task is delegated to localhost, most users don't have permission to read other user's home dirs. Jira: OSPRH-19535 Signed-off-by: James Slagle --- roles/reproducer/tasks/configure_controller.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index a1b617e96f..a8995ae1f5 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -356,6 +356,7 @@ path: "{{ cifmw_reproducer_src_dir }}" register: cifmw_reproducer_src_dir_stat run_once: true + ignore_errors: true - name: Sync local repositories to other hosts if present delegate_to: localhost @@ -366,6 +367,7 @@ recursive: true loop: "{{ groups['controllers'] }}" when: + - cifmw_reproducer_src_dir_stat.stat is defined - cifmw_reproducer_src_dir_stat.stat.exists - cifmw_reproducer_src_dir_stat.stat.isdir @@ -375,6 +377,7 @@ path: "{{ cifmw_reproducer_src_dir }}/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" register: _local_common_requirements_check run_once: true + ignore_errors: true - name: Install ansible dependencies register: _async_dep_install @@ -383,7 +386,7 @@ ansible.builtin.pip: requirements: "{{ have_local | ternary(local, remote) }}" vars: - have_local: "{{ _local_common_requirements_check.stat.exists }}" + have_local: "{{ _local_common_requirements_check.stat is defined and _local_common_requirements_check.stat.exists }}" local: "{{ cifmw_reproducer_src_dir }}/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" remote: https://raw.githubusercontent.com/openstack-k8s-operators/ci-framework/main/common-requirements.txt From 26f39882b6d0b7887f25797992698ef83a3889ca Mon Sep 17 00:00:00 2001 From: Rajesh Tailor Date: Thu, 11 Sep 2025 17:01:00 +0530 Subject: [PATCH 396/480] Fix: correct filename This change fixes typo in filename for clarity. --- roles/ci_dcn_site/tasks/post-ceph.yml | 2 +- .../templates/{ceph_secerts.yaml.j2 => ceph_secrets.yaml.j2} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename roles/ci_dcn_site/templates/{ceph_secerts.yaml.j2 => ceph_secrets.yaml.j2} (100%) diff --git a/roles/ci_dcn_site/tasks/post-ceph.yml b/roles/ci_dcn_site/tasks/post-ceph.yml index 71fb59ae86..e1681429fb 100644 --- a/roles/ci_dcn_site/tasks/post-ceph.yml +++ b/roles/ci_dcn_site/tasks/post-ceph.yml @@ -67,7 +67,7 @@ ansible.builtin.template: mode: "0644" backup: false - src: "templates/ceph_secerts.yaml.j2" + src: "templates/ceph_secrets.yaml.j2" dest: "{{ ci_dcn_site_arch_path }}/ceph_secrets_{{ _az }}.yaml" - name: Apply ceph secret for this _az diff --git a/roles/ci_dcn_site/templates/ceph_secerts.yaml.j2 b/roles/ci_dcn_site/templates/ceph_secrets.yaml.j2 similarity index 100% rename from roles/ci_dcn_site/templates/ceph_secerts.yaml.j2 rename to roles/ci_dcn_site/templates/ceph_secrets.yaml.j2 From 61c908bcc80bd0e95cae429bb5028f3e2cb544c8 Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Mon, 9 Jun 2025 17:36:24 +0200 Subject: [PATCH 397/480] Make worker-Nodes take preference in nncp ci_kustomize As for example in 3+3 scenarios, we want to run all openstack namespaces pods in the worker nodes. As we have different type of nodes: ocp, crc and ocp-worker; we want to select the nodes by taking precedende to worker nodes if defined. If not, we want to check if it's a SNO crc deployment, and lastly, we'll go by typical scenarios where the control-plane, master and workers in the same nodes. --- .../templates/network-values/values.yaml.j2 | 9 ++++++++- .../templates/bgp-l3-xl/network-values/values.yaml.j2 | 1 + .../templates/bgp_dt01/network-values/values.yaml.j2 | 1 + .../templates/bmo01/network-values/values.yaml.j2 | 10 +++++++++- .../templates/common/network-values/values.yaml.j2 | 11 ++++++++++- .../templates/dcn/network-values/values.yaml.j2 | 10 +++++++++- .../multi-namespace/network-values2/values.yaml.j2 | 10 +++++++++- .../osasinfra-ipv6/network-values/values.yaml.j2 | 10 +++++++++- .../shiftstack/network-values/values.yaml.j2 | 10 ++++++++-- .../uni01alpha-adoption/network-values/values.yaml.j2 | 9 ++++++++- .../uni01alpha/network-values/values.yaml.j2 | 10 +++++++++- .../network-values/values.yaml.j2 | 10 +++++++++- .../uni04delta-ipv6/network-values/values.yaml.j2 | 10 +++++++++- .../uni05epsilon/network-values/values.yaml.j2 | 9 ++++++++- .../templates/uni06zeta/network-values/values.yaml.j2 | 10 +++++++++- .../templates/uni07eta/network-values/values.yaml.j2 | 10 +++++++++- 16 files changed, 125 insertions(+), 15 deletions(-) diff --git a/roles/ci_dcn_site/templates/network-values/values.yaml.j2 b/roles/ci_dcn_site/templates/network-values/values.yaml.j2 index 2c16ac3ed1..2f54cbce76 100644 --- a/roles/ci_dcn_site/templates/network-values/values.yaml.j2 +++ b/roles/ci_dcn_site/templates/network-values/values.yaml.j2 @@ -3,6 +3,13 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} @@ -13,7 +20,7 @@ data: }, recursive=true) -%} {% endfor -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 index 8cfc2a3cc8..6c6cdc4ca0 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp-l3-xl/network-values/values.yaml.j2 @@ -3,6 +3,7 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + data: {% set node_groups = groups if 'rhoso-architecture-validate' not in ((zuul | default({})).job | default('')) else test_groups %} {% for host in cifmw_networking_env_definition.instances.keys() -%} diff --git a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 index 030e8d10f8..78a491e5ff 100644 --- a/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bgp_dt01/network-values/values.yaml.j2 @@ -3,6 +3,7 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + data: {% set node_groups = groups if 'rhoso-architecture-validate' not in ((zuul | default({})).job | default('')) else test_groups %} {% for host in cifmw_networking_env_definition.instances.keys() -%} diff --git a/roles/ci_gen_kustomize_values/templates/bmo01/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/bmo01/network-values/values.yaml.j2 index 44a236a8a1..17f0155f90 100644 --- a/roles/ci_gen_kustomize_values/templates/bmo01/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/bmo01/network-values/values.yaml.j2 @@ -2,9 +2,17 @@ # source: bmo01/network-values/values.yaml.j2 {% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} {% set ns = namespace(ocp_index=0) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/common/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/common/network-values/values.yaml.j2 index 5f3cf99b28..9530d5444f 100644 --- a/roles/ci_gen_kustomize_values/templates/common/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/common/network-values/values.yaml.j2 @@ -4,9 +4,18 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} + data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/dcn/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/dcn/network-values/values.yaml.j2 index ed85d77c5e..d5331a867a 100644 --- a/roles/ci_gen_kustomize_values/templates/dcn/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/dcn/network-values/values.yaml.j2 @@ -3,6 +3,14 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} {% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} @@ -13,7 +21,7 @@ data: }, recursive=true) -%} {% endfor -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 index ac7ffc8104..b31bfedcd9 100644 --- a/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/multi-namespace/network-values2/values.yaml.j2 @@ -4,9 +4,17 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/osasinfra-ipv6/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/osasinfra-ipv6/network-values/values.yaml.j2 index 4db10941c9..de8066ffff 100644 --- a/roles/ci_gen_kustomize_values/templates/osasinfra-ipv6/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/osasinfra-ipv6/network-values/values.yaml.j2 @@ -4,9 +4,17 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/shiftstack/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/shiftstack/network-values/values.yaml.j2 index f9b4a4933a..65276ec2dc 100644 --- a/roles/ci_gen_kustomize_values/templates/shiftstack/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/shiftstack/network-values/values.yaml.j2 @@ -1,10 +1,16 @@ --- # source: shiftstack/network-values/values.yaml.j2 {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} - +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/uni01alpha-adoption/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni01alpha-adoption/network-values/values.yaml.j2 index e17e9d1257..ce9fa52452 100644 --- a/roles/ci_gen_kustomize_values/templates/uni01alpha-adoption/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni01alpha-adoption/network-values/values.yaml.j2 @@ -4,9 +4,16 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/uni01alpha/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni01alpha/network-values/values.yaml.j2 index 11b7f5230a..3a00c8e344 100644 --- a/roles/ci_gen_kustomize_values/templates/uni01alpha/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni01alpha/network-values/values.yaml.j2 @@ -4,9 +4,17 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 index 5c33c206fb..be731f91ca 100644 --- a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 @@ -4,9 +4,17 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 index 3b48d68758..7af40fb95d 100644 --- a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 @@ -4,9 +4,17 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/uni05epsilon/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni05epsilon/network-values/values.yaml.j2 index 526b09deb6..abfd17111b 100644 --- a/roles/ci_gen_kustomize_values/templates/uni05epsilon/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni05epsilon/network-values/values.yaml.j2 @@ -4,9 +4,16 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/uni06zeta/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni06zeta/network-values/values.yaml.j2 index a88987a16d..0908eb9f83 100644 --- a/roles/ci_gen_kustomize_values/templates/uni06zeta/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni06zeta/network-values/values.yaml.j2 @@ -4,9 +4,17 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/uni07eta/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni07eta/network-values/values.yaml.j2 index 2df6024b6c..4928c5e47d 100644 --- a/roles/ci_gen_kustomize_values/templates/uni07eta/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni07eta/network-values/values.yaml.j2 @@ -4,9 +4,17 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} + +{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} +{% set filter="^ocp-worker" %} +{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} +{% set filter="^crc" %} +{% else %} +{% set filter="^ocp" %} +{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match('^(ocp|crc).*') %} +{% if host is match(filter) %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} From 35b8986b014c5316d873d58c20dfc131ae44aa83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 2 Oct 2025 13:02:09 +0200 Subject: [PATCH 398/480] ironic hooks: Add a router if IPv6 For IPv6 network booting to work we need a router that does router advertisements with the options to instrcut clients to attempt DHCPv6 Config requests. This change adds a tasks to both adoption_ironic_post_oc and ironic_network hooks which create this router. Jira: OSPRH-20084 Jira: OSPRH-20019 --- hooks/playbooks/adoption_ironic_post_oc.yml | 9 +++++++++ hooks/playbooks/ironic_network.yml | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/hooks/playbooks/adoption_ironic_post_oc.yml b/hooks/playbooks/adoption_ironic_post_oc.yml index a97d4164a9..9bea215291 100644 --- a/hooks/playbooks/adoption_ironic_post_oc.yml +++ b/hooks/playbooks/adoption_ironic_post_oc.yml @@ -21,6 +21,7 @@ _subnet_gateway: '172.20.1.1' _subnet_alloc_pool_start: '172.20.1.150' _subnet_alloc_pool_end: '172.20.1.199' + _subnet_ip_version: 4 _provider_physical_network: ironic _provider_network_type: flat tasks: @@ -156,6 +157,14 @@ --gateway {{ _subnet_gateway }} \ --allocation-pool start={{ _subnet_alloc_pool_start }},end={{ _subnet_alloc_pool_end }} + - name: Create router and attach subnet for IPv6 provisioning network + ansible.builtin.shell: + cmd: >- + openstack router show provisioning &>/dev/null || \ + (openstack router create provisioning && \ + openstack router add subnet provisioning provisioning-subnet) + when: _subnet_ip_version | int == 6 + - name: Slurp ironic_nodes.yaml from controller-0 delegate_to: controller-0 register: _ironic_nodes_slurp diff --git a/hooks/playbooks/ironic_network.yml b/hooks/playbooks/ironic_network.yml index 2097423b52..3e00483b3a 100644 --- a/hooks/playbooks/ironic_network.yml +++ b/hooks/playbooks/ironic_network.yml @@ -47,3 +47,13 @@ --gateway {{ _subnet_gateway }} \ --dns-nameserver {{ _subnet_nameserver }} \ --allocation-pool start={{ _subnet_alloc_pool_start }},end={{ _subnet_alloc_pool_end }} + + - name: Create router for IPv6 provisioning network + ansible.builtin.shell: | + set -xe -o pipefail + oc project {{ _namespace }} + oc rsh openstackclient \ + openstack router create provisioning + oc rsh openstackclient \ + openstack router add subnet provisioning provisioning-subnet + when: _subnet_ip_version | int == 6 From 26f0783f4d1e4f4c1f84e161f1de62a3a209d04f Mon Sep 17 00:00:00 2001 From: Luca Miccini Date: Fri, 3 Oct 2025 08:07:25 +0200 Subject: [PATCH 399/480] Add pidone network-values template 61c908bcc80bd0e95cae429bb5028f3e2cb544c8 broke va-pidone, with errors like: "fieldPath `data.node_4.internalapi_ip` is missing for replacement" This commit adds a dedicated template (essentially a copy of the common template at HEAD~1). --- .../pidone/network-values/values.yaml.j2 | 128 ++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 roles/ci_gen_kustomize_values/templates/pidone/network-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/pidone/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/pidone/network-values/values.yaml.j2 new file mode 100644 index 0000000000..57d1ccd817 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/pidone/network-values/values.yaml.j2 @@ -0,0 +1,128 @@ +--- +# source: pidone/network-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: +{% set ns.ocp_index = ns.ocp_index+1 %} + name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} + {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} +{% endfor %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() %} +{% set ns.lb_tools = {} %} + {{ network.network_name }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} + subnets: +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} + - allocationRanges: +{% for range in network.tools.netconfig[_ipv.ipvX_ranges] %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network[_ipv.network_vX] }} +{% if network[_ipv.gw_vX] is defined %} + gateway: {{ network[_ipv.gw_vX] }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% for lb_range in network.tools[tool][_ipv.ipvX_ranges] %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network[_ipv.network_vX] | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} + base_iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% else %} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% endif %} +{% if network.tools.multus is defined %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "macvlan", +{% if network.vlan_id is defined%} + "master": "{{ network.network_name }}", +{% elif network.network_name == "ctlplane" %} + "master": "ospbr", +{% else %} + "master": "{{ ns.interfaces[network.network_name] }}", +{% endif %} + "ipam": { + "type": "whereabouts", + "range": "{{ network[_ipv.network_vX] }}", + "range_start": "{{ network.tools.multus[_ipv.ipvX_ranges].0.start }}", + "range_end": "{{ network.tools.multus[_ipv.ipvX_ranges].0.end }}" + } + } +{% endif %} +{% endfor %} + + dns-resolver: + config: + server: + - "{{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }}" + search: [] + options: + - key: server + values: + - {{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }} +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + + routes: + config: [] + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: {{ cifmw_ci_gen_kustomize_values_storage_class }} From dfb9a8c119062c5d89fdcb4779c3797331e0293c Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 26 Sep 2025 15:31:52 +0200 Subject: [PATCH 400/480] Make a symlink of os_must_gather dir to static name dir Some teams are using static directory to grab latest CI job results. Let's help them and create a symlink to latest dir. Signed-off-by: Daniel Pawlik --- roles/os_must_gather/defaults/main.yml | 1 + roles/os_must_gather/tasks/main.yml | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/roles/os_must_gather/defaults/main.yml b/roles/os_must_gather/defaults/main.yml index 4005722552..89aea8c31f 100644 --- a/roles/os_must_gather/defaults/main.yml +++ b/roles/os_must_gather/defaults/main.yml @@ -21,6 +21,7 @@ cifmw_os_must_gather_image: "quay.io/openstack-k8s-operators/openstack-must-gath cifmw_os_must_gather_image_push: true cifmw_os_must_gather_image_registry: "quay.rdoproject.org/openstack-k8s-operators" cifmw_os_must_gather_output_dir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}" +cifmw_os_must_gather_output_log_dir: "{{ cifmw_os_must_gather_output_dir }}/logs/openstack-must-gather" cifmw_os_must_gather_repo_path: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/openstack-must-gather" cifmw_os_must_gather_timeout: "10m" cifmw_os_must_gather_additional_namespaces: "kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko" diff --git a/roles/os_must_gather/tasks/main.yml b/roles/os_must_gather/tasks/main.yml index e780263da3..c11d6e9e55 100644 --- a/roles/os_must_gather/tasks/main.yml +++ b/roles/os_must_gather/tasks/main.yml @@ -16,7 +16,7 @@ - name: Ensure directories are present ansible.builtin.file: - path: "{{ cifmw_os_must_gather_output_dir }}/logs/openstack-k8s-operators-openstack-must-gather" + path: "{{ cifmw_os_must_gather_output_log_dir }}" state: directory mode: "0755" @@ -63,7 +63,7 @@ oc adm must-gather --image {{ cifmw_os_must_gather_image }} --timeout {{ cifmw_os_must_gather_timeout }} --host-network={{ cifmw_os_must_gather_host_network }} - --dest-dir {{ cifmw_os_must_gather_output_dir }}/logs/openstack-k8s-operators-openstack-must-gather + --dest-dir {{ cifmw_os_must_gather_output_log_dir }} -- ADDITIONAL_NAMESPACES={{ cifmw_os_must_gather_additional_namespaces }} OPENSTACK_DATABASES=$OPENSTACK_DATABASES SOS_EDPM=$SOS_EDPM @@ -71,6 +71,19 @@ gather 2>&1 + - name: Find existing os-must-gather directories + ansible.builtin.find: + paths: "{{ cifmw_os_must_gather_output_log_dir }}" + file_type: directory + depth: 1 + register: _os_gather_latest_dir + + - name: Create a symlink to newest os-must-gather directory + ansible.builtin.file: + src: "{{ (_os_gather_latest_dir.files | sort(attribute='mtime', reverse=True) | first).path | basename }}" + dest: "{{ cifmw_os_must_gather_output_log_dir }}/latest" + state: link + rescue: - name: Openstack-must-gather failure block: From 242d32af48d301ebc3f8241b888eac5f9a5877af Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Tue, 7 Oct 2025 12:02:30 +0200 Subject: [PATCH 401/480] Add tip for deepscrub usage With this tip we want to make a suggestion about when using deepscrub, and explaining that using it, it also flushes the cache. --- docs/source/quickstart/05_clean_infra.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/source/quickstart/05_clean_infra.md b/docs/source/quickstart/05_clean_infra.md index f40da2c454..d6b535c323 100644 --- a/docs/source/quickstart/05_clean_infra.md +++ b/docs/source/quickstart/05_clean_infra.md @@ -25,3 +25,7 @@ In case you want to remove everything, with the base images. reproducer-clean.yml \ --tags deepscrub ``` + +~~~{tip} +This includes flushing the Ansible cache. If you perform a quick cleanup and encounter new errors, you should run a deepscrub or re-run the playbook with --flush-cache. +~~~ From ffe7f2aef1f883b4d47cf89a09436901a79e616d Mon Sep 17 00:00:00 2001 From: Abhishek Bongale Date: Wed, 1 Oct 2025 10:50:53 +0100 Subject: [PATCH 402/480] Configure adoption_ironic_post_oc subnet with ipv6 We have noticed that uni04delta-ipv6 adoption deploys the OSP17.1 successfully with IPv6 network. But we found out that ironic subnet is configured with IPv4 instead of IPv6. This PR aims to fix it. fixes: OSPRH-20019 Signed-off-by: Abhishek Bongale --- hooks/playbooks/adoption_ironic_post_oc.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/hooks/playbooks/adoption_ironic_post_oc.yml b/hooks/playbooks/adoption_ironic_post_oc.yml index 9bea215291..ef1c855907 100644 --- a/hooks/playbooks/adoption_ironic_post_oc.yml +++ b/hooks/playbooks/adoption_ironic_post_oc.yml @@ -22,6 +22,8 @@ _subnet_alloc_pool_start: '172.20.1.150' _subnet_alloc_pool_end: '172.20.1.199' _subnet_ip_version: 4 + _subnet_ipv6_address_mode: null + _subnet_ipv6_ra_mode: null _provider_physical_network: ironic _provider_network_type: flat tasks: @@ -153,6 +155,13 @@ openstack subnet show provisioning-subnet &>/dev/null || \ openstack subnet create provisioning-subnet \ --network provisioning \ + --ip-version {{ _subnet_ip_version }} \ + {% if _subnet_ipv6_address_mode -%} + --ipv6-address-mode {{ _subnet_ipv6_address_mode }} \ + {% endif -%} + {% if _subnet_ipv6_ra_mode -%} + --ipv6-ra-mode {{ _subnet_ipv6_ra_mode }} \ + {% endif -%} --subnet-range {{ _subnet_range }} \ --gateway {{ _subnet_gateway }} \ --allocation-pool start={{ _subnet_alloc_pool_start }},end={{ _subnet_alloc_pool_end }} From 8a5b406a34c359f47c5e8af35df49253f9c6d334 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 7 Oct 2025 14:38:46 +0530 Subject: [PATCH 403/480] refactor(ci_nmstate): Increase retries for nmstate nncp to be ready Currently, we were retrying only 6 times with delay of 10 seconds to get nmstate nncp ready. It causes the job to fail frequently in molecule run. Increasing retries should help nmstate nncp get ready, and molecule job of ci_nmstate role pass. Signed-off-by: Amartya Sinha --- roles/ci_nmstate/tasks/nmstate_k8s_provision_node.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ci_nmstate/tasks/nmstate_k8s_provision_node.yml b/roles/ci_nmstate/tasks/nmstate_k8s_provision_node.yml index fe1b28d0d0..8c5103aacd 100644 --- a/roles/ci_nmstate/tasks/nmstate_k8s_provision_node.yml +++ b/roles/ci_nmstate/tasks/nmstate_k8s_provision_node.yml @@ -41,7 +41,7 @@ context: "{{ cifmw_openshift_context | default(omit)}}" name: "{{ _cifmw_ci_nmstate_k8s_node_config_name }}" register: _nsmate_instance_nncp_out - retries: 6 + retries: 30 delay: 10 until: - _nsmate_instance_nncp_out is defined From ed55a023cd55cfee7b519df1d81984f1a137aa12 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 23 Sep 2025 14:22:09 -0400 Subject: [PATCH 404/480] Fix openshift_adm role context when kubeconfig has multiple clusters Add configurable context parameter to openshift_adm role to explicitly target the deployed OpenShift cluster instead of relying on current-context. Changes: - Add cifmw_openshift_adm_context variable defaulting to 'admin' - Update all kubernetes.core tasks to use the specified context: - _get_nodes.yml: k8s_info task for node gathering - wait_for_cluster.yml: k8s_drain task for node management - api_cert.yml: k8s and k8s_info tasks for certificate operations - shutdown.yml: k8s_drain task for node cordoning - Add context switching for custom modules that don't support context: - approve_csr module: switch context before CSR approval - openshift_auth module: switch context before authentication - Replace static cifmw_openshift_api parameter with dynamic API server URL detection: - Add _get_api_server.yml task to retrieve URL from current context - Update URI check and authentication to use context-based URL - Fixes issue where tasks fail with 403 Forbidden or target wrong cluster when current-context points to CI cluster where user lacks permissions - Also, increase delay in wait_for_cluster.yml since waiting for 2 seconds before retrying does more harm than good in our long running CI jobs. Jira: https://issues.redhat.com/browse/OSPRH-20252 Co-Authored-By: Claude Signed-off-by: John Fulton --- roles/openshift_adm/README.md | 6 +- roles/openshift_adm/defaults/main.yml | 1 + roles/openshift_adm/tasks/_get_api_server.yml | 46 +++++++++++++ roles/openshift_adm/tasks/_get_nodes.yml | 1 + roles/openshift_adm/tasks/api_cert.yml | 2 + roles/openshift_adm/tasks/main.yml | 1 - roles/openshift_adm/tasks/shutdown.yml | 1 + .../openshift_adm/tasks/wait_for_cluster.yml | 65 ++++++++++++------- 8 files changed, 99 insertions(+), 24 deletions(-) create mode 100644 roles/openshift_adm/tasks/_get_api_server.yml diff --git a/roles/openshift_adm/README.md b/roles/openshift_adm/README.md index ecf7ddff4f..e8fa15f863 100644 --- a/roles/openshift_adm/README.md +++ b/roles/openshift_adm/README.md @@ -16,7 +16,6 @@ This role requires the following parameters to be configured. * `cifmw_openshift_adm_basedir` (str) Framework base directory, defaults to `cifmw_basedir` or `~/ci-framework-data`. -* `cifmw_openshift_api` (str) Cluster endpoint to be used for communication. * `cifmw_openshift_user` (str) Name of the user to be used for authentication. * `cifmw_openshift_password` (str) Password of the provided user. * `cifmw_openshift_kubeconfig` (str) Absolute path to the kubeconfig file. @@ -30,6 +29,11 @@ This role requires the following parameters to be configured. performed on the cluster. * `cifmw_openshift_adm_retry_count` (int) The maximum number of attempts to be made for a command to succeed. Default is `100`. +* `cifmw_openshift_adm_context` (str) The kubeconfig context to use for cluster operations. Default is `admin`. + +## Obsolete Parameters + +* `cifmw_openshift_api` (str) Previously required cluster endpoint URL. Removed in favor of dynamic API server URL detection from kubeconfig context to ensure correct cluster targeting. ## Reference diff --git a/roles/openshift_adm/defaults/main.yml b/roles/openshift_adm/defaults/main.yml index fc3b2ccdd5..1cb22791f9 100644 --- a/roles/openshift_adm/defaults/main.yml +++ b/roles/openshift_adm/defaults/main.yml @@ -29,3 +29,4 @@ cifmw_openshift_adm_op: "" cifmw_openshift_adm_dry_run: false cifmw_openshift_adm_retry_count: 100 cifmw_openshift_adm_stable_period: 3m +cifmw_openshift_adm_context: admin diff --git a/roles/openshift_adm/tasks/_get_api_server.yml b/roles/openshift_adm/tasks/_get_api_server.yml new file mode 100644 index 0000000000..f7d44f3922 --- /dev/null +++ b/roles/openshift_adm/tasks/_get_api_server.yml @@ -0,0 +1,46 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# Gets the API server URL from the current context in the kubeconfig + +- name: Get current context + ansible.builtin.command: | + oc config current-context + register: _current_context + changed_when: false + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + +- name: Get cluster name from current context + ansible.builtin.command: | + oc config view -o jsonpath='{.contexts[?(@.name=="{{ _current_context.stdout }}")].context.cluster}' + register: _current_cluster + changed_when: false + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + +- name: Get API server URL from cluster + ansible.builtin.command: | + oc config view -o jsonpath='{.clusters[?(@.name=="{{ _current_cluster.stdout }}")].cluster.server}' + register: _context_api_server + changed_when: false + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + +- name: Set API server URL from context + ansible.builtin.set_fact: + _current_api_server: "{{ _context_api_server.stdout }}" diff --git a/roles/openshift_adm/tasks/_get_nodes.yml b/roles/openshift_adm/tasks/_get_nodes.yml index ab7dd03ebb..b3d75341e6 100644 --- a/roles/openshift_adm/tasks/_get_nodes.yml +++ b/roles/openshift_adm/tasks/_get_nodes.yml @@ -4,6 +4,7 @@ kubernetes.core.k8s_info: kind: Node kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + context: "{{ cifmw_openshift_adm_context }}" validate_certs: false wait_condition: reason: KubeletReady diff --git a/roles/openshift_adm/tasks/api_cert.yml b/roles/openshift_adm/tasks/api_cert.yml index b62232c07e..722506d49d 100644 --- a/roles/openshift_adm/tasks/api_cert.yml +++ b/roles/openshift_adm/tasks/api_cert.yml @@ -37,6 +37,7 @@ name: "{{ item }}" state: absent kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + context: "{{ cifmw_openshift_adm_context }}" validate_certs: false loop: - csr-signer-signer @@ -60,6 +61,7 @@ namespace: openshift-kube-controller-manager-operator name: csr-signer-signer kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + context: "{{ cifmw_openshift_adm_context }}" validate_certs: false register: _api_cert diff --git a/roles/openshift_adm/tasks/main.yml b/roles/openshift_adm/tasks/main.yml index 393fc9640f..dda10c2221 100644 --- a/roles/openshift_adm/tasks/main.yml +++ b/roles/openshift_adm/tasks/main.yml @@ -20,7 +20,6 @@ that: - cifmw_basedir is defined - cifmw_path is defined - - cifmw_openshift_api is defined - cifmw_openshift_user is defined - cifmw_openshift_password is defined - cifmw_openshift_kubeconfig is defined diff --git a/roles/openshift_adm/tasks/shutdown.yml b/roles/openshift_adm/tasks/shutdown.yml index 23d3b326d0..7f3cf6d0d0 100644 --- a/roles/openshift_adm/tasks/shutdown.yml +++ b/roles/openshift_adm/tasks/shutdown.yml @@ -57,6 +57,7 @@ name: "{{ item }}" state: cordon kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + context: "{{ cifmw_openshift_adm_context }}" validate_certs: false loop: "{{ _node_names }}" diff --git a/roles/openshift_adm/tasks/wait_for_cluster.yml b/roles/openshift_adm/tasks/wait_for_cluster.yml index 3148877b81..f96a14518e 100644 --- a/roles/openshift_adm/tasks/wait_for_cluster.yml +++ b/roles/openshift_adm/tasks/wait_for_cluster.yml @@ -18,16 +18,19 @@ # We would wait till forbidden error is received. It indicates the endpoint # is reachable. +- name: Get API server URL from current context + ansible.builtin.include_tasks: _get_api_server.yml + - name: Wait until the OCP API endpoint is reachable. ansible.builtin.uri: - url: "{{ cifmw_openshift_api }}" + url: "{{ _current_api_server }}" return_content: true validate_certs: false status_code: 403 register: ocp_api_result until: ocp_api_result.status == 403 retries: "{{ cifmw_openshift_adm_retry_count }}" - delay: 5 + delay: 30 - name: Get nodes list ansible.builtin.import_tasks: _get_nodes.yml @@ -39,25 +42,32 @@ name: "{{ item }}" state: uncordon kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + context: "{{ cifmw_openshift_adm_context }}" validate_certs: false loop: "{{ _nodes.resources | map(attribute='metadata.name') | list }}" register: _node_status until: _node_status.result is defined retries: "{{ cifmw_openshift_adm_retry_count }}" - delay: 5 + delay: 30 - name: Check for pending certificate approval. when: - _openshift_adm_check_cert_approve | default(false) | bool - register: _approve_csr - approve_csr: - k8s_config: "{{ cifmw_openshift_kubeconfig }}" - retries: 30 - delay: 10 - until: - - _approve_csr is defined - - _approve_csr.rc is defined - - _approve_csr.rc == 0 + block: + - name: Set current context to admin for CSR approval + ansible.builtin.shell: | + KUBECONFIG="{{ cifmw_openshift_kubeconfig }}" oc config use-context "{{ cifmw_openshift_adm_context }}" + + - name: Approve pending certificate requests + register: _approve_csr + approve_csr: + k8s_config: "{{ cifmw_openshift_kubeconfig }}" + retries: 10 + delay: 30 + until: + - _approve_csr is defined + - _approve_csr.rc is defined + - _approve_csr.rc == 0 - name: Wait until the OpenShift cluster is stable. environment: @@ -68,13 +78,24 @@ oc adm wait-for-stable-cluster --minimum-stable-period=5s --timeout=30m - name: Wait until OCP login succeeds. - community.okd.openshift_auth: - host: "{{ cifmw_openshift_api }}" - password: "{{ cifmw_openshift_password }}" - state: present - username: "{{ cifmw_openshift_user }}" - validate_certs: false - register: _oc_login_result - until: _oc_login_result.k8s_auth is defined - retries: "{{ cifmw_openshift_adm_retry_count }}" - delay: 2 + block: + - name: Ensure admin context is set for login + ansible.builtin.shell: | + KUBECONFIG="{{ cifmw_openshift_kubeconfig }}" oc config use-context "{{ cifmw_openshift_adm_context }}" + + # Re-get API server URL since admin context may point to a different + # cluster than the initial context used for reachability check above + - name: Get API server URL from admin context + ansible.builtin.include_tasks: _get_api_server.yml + + - name: Authenticate to OpenShift cluster + community.okd.openshift_auth: + host: "{{ _current_api_server }}" + password: "{{ cifmw_openshift_password }}" + state: present + username: "{{ cifmw_openshift_user }}" + validate_certs: false + register: _oc_login_result + until: _oc_login_result.k8s_auth is defined + retries: "{{ cifmw_openshift_adm_retry_count }}" + delay: 30 From 1a7b2ae378ead444d1ed756aace4ec009d749f1a Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 7 Oct 2025 12:41:52 +0200 Subject: [PATCH 405/480] Add semaphore-molecule We would like to limit maximum value of executed CI jobs in the same time. By adding semaphore with limit, should help with that. Signed-off-by: Daniel Pawlik --- zuul.d/molecule-base.yaml | 1 + zuul.d/semaphores.yaml | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 zuul.d/semaphores.yaml diff --git a/zuul.d/molecule-base.yaml b/zuul.d/molecule-base.yaml index 01c41e6aa2..709544963a 100644 --- a/zuul.d/molecule-base.yaml +++ b/zuul.d/molecule-base.yaml @@ -4,6 +4,7 @@ name: cifmw-molecule-base nodeset: centos-stream-9-ibm parent: base-ci-framework + semaphore: semaphore-molecule provides: - cifmw-molecule pre-run: diff --git a/zuul.d/semaphores.yaml b/zuul.d/semaphores.yaml new file mode 100644 index 0000000000..67cf97f430 --- /dev/null +++ b/zuul.d/semaphores.yaml @@ -0,0 +1,4 @@ +--- +- semaphore: + name: semaphore-molecule + max: 15 From 9906dc1097b31ea0f626418776db20f9acbfa271 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 8 Oct 2025 08:52:28 +0200 Subject: [PATCH 406/480] Increase semaphore for molecule job We thought that some molecule CI test will fail less often if less molecule jobs would be computed in the same amount of time. Max server for IBM cloud provider is 25 per host, which means that maximum value is 50. For sure the host would handle that, but let's have some resource reservation to avoid potential problems. Signed-off-by: Daniel Pawlik --- zuul.d/semaphores.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zuul.d/semaphores.yaml b/zuul.d/semaphores.yaml index 67cf97f430..e9338c7f3f 100644 --- a/zuul.d/semaphores.yaml +++ b/zuul.d/semaphores.yaml @@ -1,4 +1,4 @@ --- - semaphore: name: semaphore-molecule - max: 15 + max: 40 From e5b36105557ef15754bf378f33cab91efe098e8f Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 3 Oct 2025 11:01:18 +0200 Subject: [PATCH 407/480] Add parse ansible argument string to variables In some playbook, when nested Ansible is executed via shell/command module, there is a string which contains arguments to parse by the ansible-playbook binary. If nested Ansible can be removed, it would be required to parse such variables. This helper task can help to parse properly variables, that later can be read properly by tasks without using nested Ansible execution. Signed-off-by: Daniel Pawlik --- roles/cifmw_helpers/README.md | 114 ++++++++++++++++++ .../tasks/parse_ansible_args_string.yml | 34 ++++++ 2 files changed, 148 insertions(+) create mode 100644 roles/cifmw_helpers/tasks/parse_ansible_args_string.yml diff --git a/roles/cifmw_helpers/README.md b/roles/cifmw_helpers/README.md index 0b084df452..8a1afc454c 100644 --- a/roles/cifmw_helpers/README.md +++ b/roles/cifmw_helpers/README.md @@ -209,3 +209,117 @@ execute tasks on new host. name: cifmw_helpers tasks_from: inventory_file.yml ``` + +#### Parse string of arguments and convert to list of variables or list of files + +In some playbook, when nested Ansible is executed via shell/command module, +there is a string which contains arguments to parse by the ansible-playbook +binary. If nested Ansible can be removed, it would be required to parse +such variables. Below example how nested Ansible execution looks like, +and how it could be replaced. + +NOTE: `test.yaml` is executed on `host-1`. + +Example: +- all files are on same host which execute ansible-playbook + +```yaml +- name: Nested Ansible execution + hosts: localhost + tasks: + - name: Run ansible-playbook + vars: + cmd_args: "-e@somefile.yml -e @/tmp/someotherfile.yml -e myvar=test" + ansible.builtin.command: | + ansible-playbook "{{ cmd_args }}" test.yaml +``` + +To: + +```yaml +- name: Playbook that does not use nested Ansible - same host + hosts: localhost + vars: + cifmw_cmd_args: "-e@somefile.yml -e @/tmp/someotherfile.yml -e myvar=test" + tasks: + # NOTE: The task returns fact: cifmw_cmd_args_vars and cifmw_cmd_args_files + - name: Read inventory file and add it using add_host module + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: parse_ansible_args_string.yml + + - name: Parse only variables from cifmw_cmd_args_vars + when: cifmw_cmd_args_vars is defined and cifmw_cmd_args_vars | length > 0 + vars: + various_vars: "{{ cifmw_cmd_args_vars }}" + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: various_vars.yml + + - name: Read var files from cifmw_cmd_args + when: cifmw_cmd_args_files is defined and cifmw_cmd_args_files | length > 0 + ansible.builtin.include_vars: + file: "{{ files_item }}" + loop: "{{ cifmw_cmd_args_files }}" + loop_control: + loop_var: files_item +``` + +- files are located in remote host - controller + +In alternative version, variables are available on remote host. That requires +to fetch the files first to host which is executing the Ansible - include_vars +reads only files that are on the host where ansible-playbook was executed. +Example: + +```yaml +- name: Nested Ansible execution + hosts: controller + tasks: + - name: Run ansible-playbook + vars: + cmd_args: "-e@somefile.yml -e @/tmp/someotherfile.yml -e myvar=test" + ansible.builtin.command: | + ansible-playbook "{{ cmd_args }}" test.yaml +``` + +To: + +```yaml +- name: Playbook that does not use nested Ansible - different host + hosts: controller + vars: + cifmw_cmd_args: "-e@somefile.yml -e @/tmp/someotherfile.yml -e myvar=test" + tasks: + # NOTE: The task returns fact: cifmw_cmd_args_vars and cifmw_cmd_args_files + - name: Read inventory file and add it using add_host module + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: parse_ansible_args_string.yml + + - name: Parse only variables from cifmw_cmd_args_vars + when: cifmw_cmd_args_vars is defined and cifmw_cmd_args_vars | length > 0 + vars: + various_vars: "{{ cifmw_cmd_args_vars }}" + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: various_vars.yml + + - name: Fetch cifmw_cmd_args_files to executing host + when: cifmw_cmd_args_files is defined and cifmw_cmd_args_files | length > 0 + ansible.builtin.fetch: + src: "{{ files_item }}" + dest: "{{ files_item }}" + flat: true + loop: "{{ cifmw_cmd_args_files }}" + loop_control: + loop_var: files_item + + - name: Read fetched var files from cmd_args + when: cifmw_cmd_args_files is defined and cifmw_cmd_args_files | length > 0 + ansible.builtin.include_vars: + file: "{{ files_item }}" + loop: "{{ cifmw_cmd_args_files }}" + loop_control: + loop_var: files_item +``` diff --git a/roles/cifmw_helpers/tasks/parse_ansible_args_string.yml b/roles/cifmw_helpers/tasks/parse_ansible_args_string.yml new file mode 100644 index 0000000000..a72ae21922 --- /dev/null +++ b/roles/cifmw_helpers/tasks/parse_ansible_args_string.yml @@ -0,0 +1,34 @@ +--- +# This would help to parse variables, that +# are called in nested ansible execution using shell/command +# module. +# For example: +# +# cifmw_cmd_args: "-e@somefile.yml -e @/tmp/someotherfile.yml -e myvar=test" +# +# to: +# +# cifmw_cmd_args_vars: [{'myvar': 'test'}] +# cifmw_cmd_args_files: ['somefile.yml', '/tmp/someotherfile.yml'] +# + +- name: Split string of arguments into the lists of vars and files + when: cifmw_cmd_args | length > 1 + ansible.builtin.set_fact: + cifmw_cmd_args_vars: "{{ cifmw_cmd_args + | split(' -e ') + | reject('search', '@') + | reject('equalto', '') + | map('regex_replace', '^(.*?)=(.*)$', '{\"\\1\": \"\\2\"}') + | map('from_yaml') + | list + }}" + cifmw_cmd_args_files: "{{ cifmw_cmd_args + | split('-e') + | select() + | map('trim') + | select('match', '^@.*\\.(yml|yaml)$') + | list + | replace('@', '') + }}" + no_log: "{{ cifmw_helpers_no_log }}" From 25fa1a82d2bc1b5991a865e859382ec896c7845f Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Fri, 3 Oct 2025 14:38:08 +0200 Subject: [PATCH 408/480] Revert change to make preference nncp on worker nodes thatn master nodes It seems these scenarios has master nodes and a single worker node which is tainted. This worker node is used for running tempest but nothing related with OCP. Problematic scenarios are those which has a worker node tainted for tempest, but the workload is set in master nodes. There's no reason to configure NNCP in master nodes when we have regular worker nodes, so we were assuming that in case of there's a worker node, then all NNCP goes to worker node, which is problematic when single worker node is tainted. So we need to enhance the templates to: * Check if there's a worker node, if so, check if those are tainted. * If not tainted, worker takes preference. * If tainted, master takes preference. * If there's no worker node, then we go with master nodes. * Same for SNO CRC scenarios. Adding also uni02beta that takes the template from common --- .../edpm-nodeset-values/values.yaml.j2 | 77 +++++++++++ .../uni02beta/edpm-values/values.yaml.j2 | 71 ++++++++++ .../uni02beta/network-values/values.yaml.j2 | 128 ++++++++++++++++++ .../uni02beta/olm-values/values.yaml.j2 | 14 ++ .../network-values/values.yaml.j2 | 10 +- .../network-values/values.yaml.j2 | 10 +- 6 files changed, 292 insertions(+), 18 deletions(-) create mode 100644 roles/ci_gen_kustomize_values/templates/uni02beta/edpm-nodeset-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/uni02beta/edpm-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/uni02beta/network-values/values.yaml.j2 create mode 100644 roles/ci_gen_kustomize_values/templates/uni02beta/olm-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/uni02beta/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni02beta/edpm-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..9fbadbb696 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/uni02beta/edpm-nodeset-values/values.yaml.j2 @@ -0,0 +1,77 @@ +--- +# source: uni02beta/edpm-nodeset-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set instances_names = [] %} +{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_services = _original_nodeset['services'] | default([]) %} +{% set _vm_type = (_original_nodes.keys() | first).split('-')[1] %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith(_vm_type) %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{% endfor %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_network_config_os_net_config_mappings: +{% for instance in instances_names %} + edpm-{{ instance }}: +{% if cifmw_baremetal_hosts is defined %} +{% for interface in cifmw_baremetal_hosts[instance].nics %} + nic{{ loop.index }}: "{{ interface.mac }}" +{% endfor %} +{% else %} +{% if hostvars[instance] is defined %} + nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" +{% endif %} + nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane.mac_addr }}" +{% endif %} +{% endfor %} +{% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} + edpm_sshd_allowed_ranges: +{% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} + - "{{ range }}" +{% endfor %} +{% endif %} + nodes: +{% for instance in instances_names %} + edpm-{{ instance }}: + ansible: + host: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} + - name: {{ net }} + subnetName: subnet1 + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[net][_ipv.ip_vX] }} +{% if net is match('ctlplane') %} + defaultRoute: true +{% endif %} +{% endfor %} +{% endfor %} +{% if ('repo-setup' not in _original_services) and + ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} + services: + - "repo-setup" +{% for svc in _original_services %} + - "{{ svc }}" +{% endfor %} +{% endif %} + +{% if _vm_type.startswith('compute') %} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} +{% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/uni02beta/edpm-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni02beta/edpm-values/values.yaml.j2 new file mode 100644 index 0000000000..43a43ce61f --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/uni02beta/edpm-values/values.yaml.j2 @@ -0,0 +1,71 @@ +--- +# source: uni02beta/edpm-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set instances_names = [] %} +{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_services = _original_nodeset['services'] | default([]) %} +{% set _vm_type = (_original_nodes.keys() | first).split('-')[1] %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith(_vm_type) %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{% endfor %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} +{% if _vm_type.startswith('compute') %} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} +{% endif %} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_network_config_os_net_config_mappings: +{% for instance in instances_names %} + edpm-{{ instance }}: +{% if hostvars[instance] is defined %} + nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" +{% endif %} + nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane.mac_addr }}" +{% endfor %} +{% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} + edpm_sshd_allowed_ranges: +{% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} + - "{{ range }}" +{% endfor %} +{% endif %} +{% if ('repo-setup' not in (_original_nodeset['services'] | default([]))) and + ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} + services: + - "repo-setup" +{% for svc in _original_services %} + - "{{ svc }}" +{% endfor %} +{% endif %} + + nodes: +{% for instance in instances_names %} + edpm-{{ instance }}: + ansible: + host: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} + - name: {{ net }} + subnetName: subnet1 +{% if net is match('ctlplane') %} + defaultRoute: true + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} +{% endif %} +{% endfor %} +{% endfor %} diff --git a/roles/ci_gen_kustomize_values/templates/uni02beta/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni02beta/network-values/values.yaml.j2 new file mode 100644 index 0000000000..0955d98d9a --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/uni02beta/network-values/values.yaml.j2 @@ -0,0 +1,128 @@ +--- +# source: uni02beta/network-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: +{% set ns.ocp_index = ns.ocp_index+1 %} + name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) %} + {{ network.network_name }}_ip: {{ network[_ipv.ip_vX] }} +{% endfor %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() %} +{% set ns.lb_tools = {} %} + {{ network.network_name }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} + subnets: +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} + - allocationRanges: +{% for range in network.tools.netconfig[_ipv.ipvX_ranges] %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network[_ipv.network_vX] }} +{% if network[_ipv.gw_vX] is defined %} + gateway: {{ network[_ipv.gw_vX] }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% for lb_range in network.tools[tool][_ipv.ipvX_ranges] %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network[_ipv.network_vX] | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} + base_iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% else %} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% endif %} +{% if network.tools.multus is defined %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "macvlan", +{% if network.vlan_id is defined%} + "master": "{{ network.network_name }}", +{% elif network.network_name == "ctlplane" %} + "master": "ospbr", +{% else %} + "master": "{{ ns.interfaces[network.network_name] }}", +{% endif %} + "ipam": { + "type": "whereabouts", + "range": "{{ network[_ipv.network_vX] }}", + "range_start": "{{ network.tools.multus[_ipv.ipvX_ranges].0.start }}", + "range_end": "{{ network.tools.multus[_ipv.ipvX_ranges].0.end }}" + } + } +{% endif %} +{% endfor %} + + dns-resolver: + config: + server: + - "{{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }}" + search: [] + options: + - key: server + values: + - {{ cifmw_networking_env_definition.networks.ctlplane[_ipv.gw_vX] }} +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + + routes: + config: [] + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'][_ipv.network_vX] | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: {{ cifmw_ci_gen_kustomize_values_storage_class }} diff --git a/roles/ci_gen_kustomize_values/templates/uni02beta/olm-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni02beta/olm-values/values.yaml.j2 new file mode 100644 index 0000000000..b2d79b0f57 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/uni02beta/olm-values/values.yaml.j2 @@ -0,0 +1,14 @@ +# source: uni02beta/olm-values/values.yaml.j2 +data: + openstack-operator-image: {{ cifmw_ci_gen_kustomize_values_ooi_image | default('quay.io/openstack-k8s-operators/openstack-operator-index:latest', true) }} +{% if cifmw_ci_gen_kustomize_values_sub_channel is defined %} + openstack-operator-channel: {{ cifmw_ci_gen_kustomize_values_sub_channel }} +{% endif %} +{% if cifmw_ci_gen_kustomize_values_deployment_version is defined %} +{% if cifmw_ci_gen_kustomize_values_deployment_version not in ['v1.0.3', 'v1.0.6'] %} + openstack-operator-version: openstack-operator.{{ cifmw_ci_gen_kustomize_values_deployment_version }} +{% endif %} +{% endif %} +{% if cifmw_ci_gen_kustomize_values_installplan_approval is defined %} + openstack-operator-installplanapproval: {{ cifmw_ci_gen_kustomize_values_installplan_approval }} +{% endif %} diff --git a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 index be731f91ca..5c33c206fb 100644 --- a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6-adoption/network-values/values.yaml.j2 @@ -4,17 +4,9 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} - -{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} -{% set filter="^ocp-worker" %} -{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} -{% set filter="^crc" %} -{% else %} -{% set filter="^ocp" %} -{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match(filter) %} +{% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} diff --git a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 index 7af40fb95d..3b48d68758 100644 --- a/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/uni04delta-ipv6/network-values/values.yaml.j2 @@ -4,17 +4,9 @@ {% set ns = namespace(interfaces={}, ocp_index=0, lb_tools={}) %} - -{% if cifmw_networking_env_definition.instances.keys() | select('match', '^ocp-worker') | list | length > 0 %} -{% set filter="^ocp-worker" %} -{% elif cifmw_networking_env_definition.instances.keys() | select('match', 'crc') | list | length > 0 %} -{% set filter="^crc" %} -{% else %} -{% set filter="^ocp" %} -{% endif %} data: {% for host in cifmw_networking_env_definition.instances.keys() -%} -{% if host is match(filter) %} +{% if host is match('^(ocp|crc).*') %} node_{{ ns.ocp_index }}: {% set ns.ocp_index = ns.ocp_index+1 %} name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} From 5d5e2d59cecf94ebfbb70b8a31a67a1764f0a34f Mon Sep 17 00:00:00 2001 From: Sofer Athlan-Guyot Date: Wed, 24 Sep 2025 16:11:39 +0200 Subject: [PATCH 409/480] [update] Allow curl to use ipv6 for downloading cirros image. For testing we use a cirros image that is used to create a vm on the cloud. We used to force ipv4 on curl, but it fails in ipv6 unidelta job. Remove the constraint to suit ipv4 and ipv6 jobs. Closes: [uni04delta-ipv6-update failing to download cirros-cloud.net image ](https://issues.redhat.com/browse/OSPRH-17249) --- roles/update/templates/workload_launch.sh.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/update/templates/workload_launch.sh.j2 b/roles/update/templates/workload_launch.sh.j2 index e9adf9aa58..cba70bf5e2 100644 --- a/roles/update/templates/workload_launch.sh.j2 +++ b/roles/update/templates/workload_launch.sh.j2 @@ -291,7 +291,7 @@ function workload_launch { openstack image list | grep ${IMAGE_NAME} if [ $? -ne 0 ]; then echo "Downloading image ${IMAGE_URL}" - curl -4fsSLk --retry 5 -o ${IMAGE_FILE} ${IMAGE_URL} + curl -fsSLk --retry 5 -o ${IMAGE_FILE} ${IMAGE_URL} if [ $? -ne 0 ]; then echo "Failed to download ${IMAGE_URL}" From 2480cc4f247eccb12160dbdd3cddd9c19a086c4d Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 8 Oct 2025 17:27:20 +0200 Subject: [PATCH 410/480] Create group_vars directory before starting the molecule tests Without this directory, Molecule would fail with an error: CRITICAL The source path '/home/zuul/src/github.com/openstack-k8s-operators/edpm-ansible/roles/edpm_kernel/molecule/default/../../../../group_vars/' does not exist. WARNING An error occurred during the test sequence action: 'prepare'. Cleaning up. CRITICAL The source path '/home/zuul/src/github.com/openstack-k8s-operators/edpm-ansible/roles/edpm_kernel/molecule/default/../../../../group_vars/' does not exist. Signed-off-by: Daniel Pawlik --- ci/playbooks/molecule-test.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ci/playbooks/molecule-test.yml b/ci/playbooks/molecule-test.yml index 871988d9f3..6d7c6669d6 100644 --- a/ci/playbooks/molecule-test.yml +++ b/ci/playbooks/molecule-test.yml @@ -17,6 +17,11 @@ ansible.builtin.include_vars: file: "{{ cifmw_reproducer_molecule_env_file }}" + - name: Ensure group_vars dir exists + ansible.builtin.file: + path: "{{ roles_dir }}/../../group_vars" + state: directory + - name: Run molecule environment: ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/zuul-output/logs/ansible-execution.log" From b86dcbb9892d1b297ee42a6f38d39324b3ad9bb7 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Wed, 8 Oct 2025 11:10:39 +0200 Subject: [PATCH 411/480] refactor(scenarios): replace hardcoded /home/zuul paths in va-multi.yml Replace hardcoded /home/zuul/ paths with ansible_user_dir variable in source paths to support different user environments and improve consistency with configurable user variables pattern. --- scenarios/reproducers/va-multi.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scenarios/reproducers/va-multi.yml b/scenarios/reproducers/va-multi.yml index 0c71d6b25a..b0e45d6675 100644 --- a/scenarios/reproducers/va-multi.yml +++ b/scenarios/reproducers/va-multi.yml @@ -381,7 +381,7 @@ cifmw_networking_definition: post_deploy: - name: Discover hypervisors for openstack2 namespace type: playbook - source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/hooks/playbooks/nova_manage_discover_hosts.yml" + source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/hooks/playbooks/nova_manage_discover_hosts.yml" extra_vars: namespace: openstack2 _cell_conductors: nova-cell0-conductor-0 @@ -389,7 +389,7 @@ post_deploy: pre_admin_setup: - name: Prepare OSP networks in openstack2 namespace type: playbook - source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_osp_networks.yaml" + source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_osp_networks.yaml" extra_vars: cifmw_os_net_setup_namespace: openstack2 cifmw_os_net_setup_public_cidr: "192.168.133.0/24" @@ -400,7 +400,7 @@ pre_admin_setup: post_tests: - name: Run tempest against openstack2 namespace type: playbook - source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_validation.yaml" + source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_validation.yaml" extra_vars: cifmw_test_operator_tempest_name: tempest-tests2 cifmw_test_operator_namespace: openstack2 From 234ab31568e819d644feaa5a1858115d163ba761 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 9 Oct 2025 08:45:48 +0200 Subject: [PATCH 412/480] Try to retry get delorean.repo.md5 file Sometimes the DLRN server can be overloaded, so response might take a while. Try to make few retries on getting the file. Signed-off-by: Daniel Pawlik --- roles/repo_setup/tasks/artifacts.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/repo_setup/tasks/artifacts.yml b/roles/repo_setup/tasks/artifacts.yml index b20a05d290..d466573686 100644 --- a/roles/repo_setup/tasks/artifacts.yml +++ b/roles/repo_setup/tasks/artifacts.yml @@ -39,6 +39,10 @@ url: "{{ cifmw_repo_setup_dlrn_uri }}/{{ cifmw_repo_setup_os_release }}{{ cifmw_repo_setup_dist_major_version }}-{{ cifmw_repo_setup_branch }}/current-podified/delorean.repo.md5" dest: "{{ cifmw_repo_setup_basedir }}/artifacts/repositories/delorean.repo.md5" mode: "0644" + register: _file_result + until: _file_result is succeeded + retries: 10 + delay: 15 - name: Slurp current podified hash ansible.builtin.slurp: From 4fadc6bf66d55ecc4aabd7a3314e2546f42ad3e7 Mon Sep 17 00:00:00 2001 From: Michael Burke Date: Fri, 26 Sep 2025 15:59:51 -0400 Subject: [PATCH 413/480] feat(libvirt_manager): Account for non-zuul user when configuring ssh It is possible to set a non-zuul user in a vm's config, this pulls in the user from the config to account for this possibility when managing ssh keys and ansible inventories. --- .../tasks/generate_networking_data.yml | 1 + roles/libvirt_manager/tasks/manage_vms.yml | 45 ++++++++++--------- .../templates/inventory.yml.j2 | 2 +- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/roles/libvirt_manager/tasks/generate_networking_data.yml b/roles/libvirt_manager/tasks/generate_networking_data.yml index 45539656bb..1bf06d309a 100644 --- a/roles/libvirt_manager/tasks/generate_networking_data.yml +++ b/roles/libvirt_manager/tasks/generate_networking_data.yml @@ -109,6 +109,7 @@ _ssh_user: >- {{ _cifmw_libvirt_manager_layout.vms[_vm_type].admin_user | + default(_cifmw_libvirt_manager_layout.vms[_vm_type].user) | default('zuul') }} _add_ansible_host: >- diff --git a/roles/libvirt_manager/tasks/manage_vms.yml b/roles/libvirt_manager/tasks/manage_vms.yml index 4a39cf8337..b41a943f05 100644 --- a/roles/libvirt_manager/tasks/manage_vms.yml +++ b/roles/libvirt_manager/tasks/manage_vms.yml @@ -22,7 +22,7 @@ regex_replace('^.*-([0-9]+)$', vm_type ~ '-\1') }} - _user: "{{ 'core' if vm is match('^(crc|ocp).*') else 'zuul' }}" + _user: "{{ 'core' if vm is match('^(crc|ocp).*') else vm_data.get('user', 'zuul') }}" dataset: ssh_dir: "{{ ansible_user_dir }}/.ssh" user: "{{ _user }}" @@ -86,28 +86,29 @@ ssh core@{{ vm_con_name }} "sudo growpart /dev/sda {{ _root_part }}; sudo xfs_growfs /;" -- name: "Inject private key on hosts {{ vm }}" +- name: "Manage ssh keys on {{ vm }}" when: - vm_type is match('^controller.*$') - _cifmw_libvirt_manager_layout.vms[vm_type].start | default(true) - delegate_to: "{{ vm_con_name }}" - remote_user: "{{ _init_admin_user }}" - ansible.builtin.copy: - dest: "/home/zuul/.ssh/id_cifw" - content: "{{ priv_key }}" - owner: "{{ cifmw_libvirt_manager_user }}" - group: "{{ cifmw_libvirt_manager_user }}" - mode: "0400" + vars: + _user: "{{ vm_data.get('user', 'zuul') }}" + block: + - name: "Inject private key on hosts {{ vm }}" + delegate_to: "{{ vm_con_name }}" + remote_user: "{{ _init_admin_user }}" + ansible.builtin.copy: + dest: "/home/{{ _user }}/.ssh/id_cifw" + content: "{{ priv_key }}" + owner: "{{ _user }}" + group: "{{ _user }}" + mode: "0400" -- name: "Inject public key on hosts {{ vm }}" - when: - - vm_type is match('^controller.*$') - - _cifmw_libvirt_manager_layout.vms[vm_type].start | default(true) - delegate_to: "{{ vm_con_name }}" - remote_user: "{{ _init_admin_user }}" - ansible.builtin.copy: - dest: "/home/zuul/.ssh/id_cifw.pub" - content: "{{ pub_key }}" - owner: "{{ cifmw_libvirt_manager_user }}" - group: "{{ cifmw_libvirt_manager_user }}" - mode: "0444" + - name: "Inject public key on hosts {{ vm }}" + delegate_to: "{{ vm_con_name }}" + remote_user: "{{ _init_admin_user }}" + ansible.builtin.copy: + dest: "/home/{{ _user }}/.ssh/id_cifw.pub" + content: "{{ pub_key }}" + owner: "{{ _user }}" + group: "{{ _user }}" + mode: "0444" diff --git a/roles/libvirt_manager/templates/inventory.yml.j2 b/roles/libvirt_manager/templates/inventory.yml.j2 index 17509fe351..0d3414b2b5 100644 --- a/roles/libvirt_manager/templates/inventory.yml.j2 +++ b/roles/libvirt_manager/templates/inventory.yml.j2 @@ -5,7 +5,7 @@ {% set hostname = (host.key is match('^ocp.*')) | ternary(ocp_name, host.key) %} {{ host.key }}: ansible_host: {{ hostname }}.utility - ansible_user: {{ _cifmw_libvirt_manager_layout.vms[item].admin_user | default('zuul') }} + ansible_user: {{ _cifmw_libvirt_manager_layout.vms[item].admin_user | default(_cifmw_libvirt_manager_layout.vms[item].user) | default('zuul') }} ansible_ssh_common_args: '-o StrictHostKeyChecking=no' {% if item is match('^crc.*') %} ansible_ssh_private_key_file: ~/.ssh/crc_key From a57402f5d6df6d6bc2bde3e4441b6625946569fb Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 8 Oct 2025 14:24:28 +0200 Subject: [PATCH 414/480] Do not relay on cifmw_openshift_kubeconfig if set in os_must_gather The variable cifmw_openshift_kubeconfig might not be set, but the file can exist. Try to check if the kubeconfig file exists, then trigger must-gather command. Signed-off-by: Daniel Pawlik --- roles/os_must_gather/defaults/main.yml | 1 + roles/os_must_gather/tasks/main.yml | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/roles/os_must_gather/defaults/main.yml b/roles/os_must_gather/defaults/main.yml index 89aea8c31f..aa7b8b9b1e 100644 --- a/roles/os_must_gather/defaults/main.yml +++ b/roles/os_must_gather/defaults/main.yml @@ -37,3 +37,4 @@ cifmw_os_must_gather_namespaces: - crc-storage cifmw_os_must_gather_host_network: false cifmw_os_must_gather_dump_db: "ALL" +cifmw_os_must_gather_kubeconfig: "{{ ansible_user_dir }}/.kube/config" diff --git a/roles/os_must_gather/tasks/main.yml b/roles/os_must_gather/tasks/main.yml index c11d6e9e55..7df2086910 100644 --- a/roles/os_must_gather/tasks/main.yml +++ b/roles/os_must_gather/tasks/main.yml @@ -44,15 +44,20 @@ register: oc_installed ignore_errors: true +- name: Check if kubeconfig exists + ansible.builtin.stat: + path: "{{ cifmw_openshift_kubeconfig | default(cifmw_os_must_gather_kubeconfig) }}" + register: _kubeconfig_stat + - name: Running openstack-must-gather tool when: - oc_installed is defined - oc_installed.rc == 0 - - cifmw_openshift_kubeconfig is defined + - _kubeconfig_stat.stat.exists block: - name: Run openstack-must-gather command environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default(cifmw_os_must_gather_kubeconfig) }}" PATH: "{{ cifmw_path }}" SOS_EDPM: "all" SOS_DECOMPRESS: "0" @@ -93,7 +98,7 @@ - name: Run fallback generic must-gather command environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default(cifmw_os_must_gather_kubeconfig) }}" PATH: "{{ cifmw_path }}" ansible.builtin.command: cmd: oc adm must-gather --dest-dir {{ ansible_user_dir }}/ci-framework-data/must-gather @@ -107,7 +112,7 @@ - name: Inspect the cluster after must-gather failure ignore_errors: true # noqa: ignore-errors environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default(cifmw_os_must_gather_kubeconfig) }}" PATH: "{{ cifmw_path }}" cifmw.general.ci_script: output_dir: "{{ cifmw_os_must_gather_output_dir }}/artifacts" From b1a0fdc481dde7facbeabd8225d105a2b67b764b Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 8 Oct 2025 12:29:56 +0200 Subject: [PATCH 415/480] DNM Add molecule jobs from edpm-ansible The edpm-ansible folks would like to run own job also when our changes are related to molecule job. Let's trigger some of they jobs here. Signed-off-by: Daniel Pawlik --- ci/playbooks/molecule-test.yml | 1 + scripts/create_role_molecule.py | 12 ++++++++++++ zuul.d/molecule-base.yaml | 2 ++ zuul.d/projects.yaml | 3 +++ 4 files changed, 18 insertions(+) diff --git a/ci/playbooks/molecule-test.yml b/ci/playbooks/molecule-test.yml index 6d7c6669d6..0cb4410879 100644 --- a/ci/playbooks/molecule-test.yml +++ b/ci/playbooks/molecule-test.yml @@ -49,5 +49,6 @@ chdir: "{{ roles_dir }}" cmd: >- set -o pipefail; + mkdir -p {{ roles_dir }}/../../group_vars ; molecule -c {{ mol_config_dir }} test --all | tee {{ ansible_user_dir }}/ci-framework-data/logs/molecule-execution.log diff --git a/scripts/create_role_molecule.py b/scripts/create_role_molecule.py index c03bf0f072..f20148a728 100755 --- a/scripts/create_role_molecule.py +++ b/scripts/create_role_molecule.py @@ -20,6 +20,12 @@ import logging from jinja2 import Environment, FileSystemLoader +additional_molecule_jobs = [ + "edpm-ansible-molecule-edpm_kernel", + "edpm-ansible-molecule-edpm_podman", + "edpm-ansible-molecule-edpm_ovs", +] + def get_project_paths(project_dir=None): """ @@ -90,6 +96,12 @@ def regenerate_projects_zuul_jobs_yaml(generated_paths): f"cifmw-molecule-{role_directory.name}" ) + if additional_molecule_jobs: + for additional_job in additional_molecule_jobs: + projects_jobs_info[0]["project"]["github-check"]["jobs"].append( + additional_job + ) + with open(generated_paths["zuul_job_dir"] / "projects.yaml", "w") as projects_file: yaml.dump(projects_jobs_info, projects_file) diff --git a/zuul.d/molecule-base.yaml b/zuul.d/molecule-base.yaml index 709544963a..d954065b5b 100644 --- a/zuul.d/molecule-base.yaml +++ b/zuul.d/molecule-base.yaml @@ -15,6 +15,7 @@ - ci/playbooks/collect-logs.yml required-projects: - github.com/openstack-k8s-operators/install_yamls + - github.com/openstack-k8s-operators/edpm-ansible vars: roles_dir: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}/roles/{{ TEST_RUN }}" mol_config_dir: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}/.config/molecule/config_local.yml" @@ -33,6 +34,7 @@ - ci/playbooks/collect-logs.yml required-projects: - github.com/openstack-k8s-operators/install_yamls + - github.com/openstack-k8s-operators/edpm-ansible vars: roles_dir: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}/roles/{{ TEST_RUN }}" mol_config_dir: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}/.config/molecule/config_local.yml" diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 7e94482b1b..0d89c3eb5f 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -104,6 +104,9 @@ - cifmw-molecule-update_containers - cifmw-molecule-validations - cifmw-molecule-virtualbmc + - edpm-ansible-molecule-edpm_kernel + - edpm-ansible-molecule-edpm_podman + - edpm-ansible-molecule-edpm_ovs github-post: jobs: - build-push-container-cifmw-client-post From 07f6a4f6ba7b0865b97d5c8d7e4396ab0259a62b Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 3 Oct 2025 15:23:11 +0200 Subject: [PATCH 416/480] Add molecule jobs from edpm-ansible The edpm-ansible folks would like to run own job also when our changes are related to molecule job. Let's trigger some of they jobs here. Depends-On: https://github.com/openstack-k8s-operators/ci-framework/pull/3385 Signed-off-by: Daniel Pawlik --- ci/playbooks/molecule-test.yml | 1 - scripts/create_role_molecule.py | 1 - zuul.d/projects.yaml | 1 - 3 files changed, 3 deletions(-) diff --git a/ci/playbooks/molecule-test.yml b/ci/playbooks/molecule-test.yml index 0cb4410879..6d7c6669d6 100644 --- a/ci/playbooks/molecule-test.yml +++ b/ci/playbooks/molecule-test.yml @@ -49,6 +49,5 @@ chdir: "{{ roles_dir }}" cmd: >- set -o pipefail; - mkdir -p {{ roles_dir }}/../../group_vars ; molecule -c {{ mol_config_dir }} test --all | tee {{ ansible_user_dir }}/ci-framework-data/logs/molecule-execution.log diff --git a/scripts/create_role_molecule.py b/scripts/create_role_molecule.py index f20148a728..a40078edb2 100755 --- a/scripts/create_role_molecule.py +++ b/scripts/create_role_molecule.py @@ -21,7 +21,6 @@ from jinja2 import Environment, FileSystemLoader additional_molecule_jobs = [ - "edpm-ansible-molecule-edpm_kernel", "edpm-ansible-molecule-edpm_podman", "edpm-ansible-molecule-edpm_ovs", ] diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 0d89c3eb5f..182be6866a 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -104,7 +104,6 @@ - cifmw-molecule-update_containers - cifmw-molecule-validations - cifmw-molecule-virtualbmc - - edpm-ansible-molecule-edpm_kernel - edpm-ansible-molecule-edpm_podman - edpm-ansible-molecule-edpm_ovs github-post: From 83c519fd71419e4026bedcf1c5a674babf8a073c Mon Sep 17 00:00:00 2001 From: Yatin Karel Date: Wed, 8 Oct 2025 18:32:00 +0530 Subject: [PATCH 417/480] [libvirt_manager] Honor network mtu in dnsmasq config When creating a Libvirt network even with custom mtu, dnsmasq was providing default mtu(1500) to the VMs. This patch fixes it by configuring option:mtu as per the network facts. Signed-off-by: Yatin Karel --- roles/libvirt_manager/tasks/create_networks.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/libvirt_manager/tasks/create_networks.yml b/roles/libvirt_manager/tasks/create_networks.yml index fe4c2db563..4ecaf0dd9b 100644 --- a/roles/libvirt_manager/tasks/create_networks.yml +++ b/roles/libvirt_manager/tasks/create_networks.yml @@ -156,6 +156,9 @@ {% if _no_prefix_name not in _default_gw_net -%} - "option:router" {% endif -%} + {% if ansible_facts[_name].mtu is defined -%} + - "option:mtu,{{ ansible_facts[_name].mtu }}" + {% endif -%} _dns_listener: - "{{ ansible_facts[_name].ipv4.address | default('') }}" - "{{ _ipv6.address | default('') }}" From af88093276fe0b7436b90bf89345dfcb7133ca48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Sat, 11 Oct 2025 18:54:38 +0200 Subject: [PATCH 418/480] sushy_emulator: healthcheck + restart on failure In adoption jobs the sushy_emulator stops working somewhere in the job run. We see errors raised from python libvirt library: - `libvirt: XML-RPC error : Cannot write data: Broken pipe` - `libvirt: XML-RPC error : internal error: client socket is closed` This change adds a healthcheck to the podman pod, probe the service every 30 seconds, and trigger a container restart on two failures. Jira: OSPRH-15686 --- roles/sushy_emulator/tasks/create_container.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/sushy_emulator/tasks/create_container.yml b/roles/sushy_emulator/tasks/create_container.yml index c66b8833ba..8982e20e45 100644 --- a/roles/sushy_emulator/tasks/create_container.yml +++ b/roles/sushy_emulator/tasks/create_container.yml @@ -41,3 +41,12 @@ - "{{ dest_dir }}/known_hosts:/root/.ssh/known_hosts:ro,Z" - "{{ cifmw_sushy_emulator_sshkey_path }}:/root/.ssh/id_rsa:ro,Z" - "{{ cifmw_sushy_emulator_sshkey_path }}.pub:/root/.ssh/id_rsa.pub:ro,Z" + healthcheck: >- + python3 -c "import urllib.request, base64; + req = urllib.request.Request('http://localhost:8000/redfish/v1/Systems/{{ _cifmw_sushy_emulator_instances[0] }}'); + req.add_header('Authorization', 'Basic ' + base64.b64encode(b'{{ cifmw_sushy_emulator_redfish_username }}:{{ cifmw_sushy_emulator_redfish_password }}').decode()); + urllib.request.urlopen(req, timeout=5).read()" + healthcheck_interval: "30s" + healthcheck_timeout: "30s" + healthcheck_retries: 2 + healthcheck_failure_action: restart From e2489758b823c58958ab0d41e8fd3322576ee1fe Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Wed, 8 Oct 2025 14:24:22 +0200 Subject: [PATCH 419/480] feat(cifmw_cephadm_log_path): Replace hardcoded ceph logs path We've moving scenarios variables for cephadm_logs to configurable variable, but also we're moving back logs files to specific ceph folder located at logs folder --- scenarios/centos-9/hci_ceph_backends.yml | 2 +- scenarios/reproducers/dt-dcn.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scenarios/centos-9/hci_ceph_backends.yml b/scenarios/centos-9/hci_ceph_backends.yml index a23a951f37..d4ba9620b8 100644 --- a/scenarios/centos-9/hci_ceph_backends.yml +++ b/scenarios/centos-9/hci_ceph_backends.yml @@ -17,7 +17,7 @@ post_ceph: type: playbook source: ceph.yml -cifmw_cephadm_log_path: /home/zuul/ci-framework-data/logs +cifmw_cephadm_log_path: "{{ cifmw_basedir ~ '/logs/ceph'}}" post_deploy: - name: 81 Kustomize OpenStack CR with Ceph diff --git a/scenarios/reproducers/dt-dcn.yml b/scenarios/reproducers/dt-dcn.yml index bdec27c3f2..74f60cb89b 100644 --- a/scenarios/reproducers/dt-dcn.yml +++ b/scenarios/reproducers/dt-dcn.yml @@ -19,7 +19,7 @@ cifmw_ceph_daemons_layout: ceph_nfs_enabled: false ceph_rbd_mirror_enabled: true cifmw_run_tests: false -cifmw_cephadm_log_path: /home/zuul/ci-framework-data/logs +cifmw_cephadm_log_path: "{{ cifmw_basedir ~ '/logs/ceph'}}" cifmw_arch_automation_file: dcn.yaml cifmw_libvirt_manager_pub_net: ocpbm cifmw_reproducer_validate_network_host: "192.168.122.1" From 150757e4138794fd30439833f4e766a56d690336 Mon Sep 17 00:00:00 2001 From: Yatin Karel Date: Tue, 14 Oct 2025 16:28:00 +0530 Subject: [PATCH 420/480] [install_yamls] Filter antelope branch Required for jobs running on tcib antelope branch as described in the Jira. Resolves: https://issues.redhat.com/browse/OSPCIX-1046 --- roles/install_yamls/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/install_yamls/tasks/main.yml b/roles/install_yamls/tasks/main.yml index 0a70460e67..e78b1d9f6c 100644 --- a/roles/install_yamls/tasks/main.yml +++ b/roles/install_yamls/tasks/main.yml @@ -71,7 +71,7 @@ 'OUT': cifmw_install_yamls_manifests_dir, 'OUTPUT_DIR': cifmw_install_yamls_edpm_dir, 'CHECKOUT_FROM_OPENSTACK_REF': cifmw_install_yamls_checkout_openstack_ref, - 'OPENSTACK_K8S_BRANCH': (zuul is defined and not zuul.branch |regex_search('master|rhos')) | ternary(zuul.branch, 'main') + 'OPENSTACK_K8S_BRANCH': (zuul is defined and not zuul.branch |regex_search('master|antelope|rhos')) | ternary(zuul.branch, 'main') }) | combine(install_yamls_operators_repos) }} From 0d6c58af02795043800a57843ec236f08efbdd8e Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 13 Oct 2025 17:37:23 +0200 Subject: [PATCH 421/480] Add include_file and include_dir helpers The future group_vars directory, might not contain "clear" variables in the yaml file - instead of each variable has own type like string, integer, bool etc. it would be jinja2 variable, which is not "translated" in Ansible during setting facts. This helper would be useful for parsing group_vars variables, that might be based on jinja2 vars. Signed-off-by: Daniel Pawlik --- roles/cifmw_helpers/README.md | 66 ++++++++++++++++++++++ roles/cifmw_helpers/tasks/include_dir.yml | 35 ++++++++++++ roles/cifmw_helpers/tasks/include_file.yml | 51 +++++++++++++++++ 3 files changed, 152 insertions(+) create mode 100644 roles/cifmw_helpers/tasks/include_dir.yml create mode 100644 roles/cifmw_helpers/tasks/include_file.yml diff --git a/roles/cifmw_helpers/README.md b/roles/cifmw_helpers/README.md index 8a1afc454c..3a8c5d9eac 100644 --- a/roles/cifmw_helpers/README.md +++ b/roles/cifmw_helpers/README.md @@ -323,3 +323,69 @@ To: loop_control: loop_var: files_item ``` + +#### Include file + +In some cases, yaml file that would have vars would be using +Jinja2 vars, which means that on setting fact, variable would not be +"translated". It means, that if variable is: + +```yaml +test: "{{ ansible_user_dir }}" +``` + +Result when we will use `var_file.yml`, would be: + +```yaml +{ "test": "{{ ansible_user_dir}}" } +``` + +This is not want we would like to have. The `ansible_user_dir` should be "translated", +so expected value should be: + +```yaml +{ "test": "/home/testuser" } +``` + +This helper would include vars properly. + +Example: + +```yaml +- name: Test include vars + hosts: somehost + tasks: + - name: Read group_vars all file + vars: + included_file: group_vars/all.yml + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: include_file.yml + + - name: Print vars from group_vars all + ansible.builtin.debug: + msg: | + {{ noop_helper_var }} +``` + +Similar to what `include_file` is doing, but instead of parsing single file, +it parse all yaml files available in the directory. + +#### Include dir + +```yaml +- name: Test include vars - dr + hosts: somehost + tasks: + - name: Read group_vars dir file + vars: + included_dir: ./group_vars + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: include_dir.yml + + - name: Print vars from group_vars all + ansible.builtin.debug: + msg: | + {{ noop_helper_var }} +``` diff --git a/roles/cifmw_helpers/tasks/include_dir.yml b/roles/cifmw_helpers/tasks/include_dir.yml new file mode 100644 index 0000000000..d6a3682df2 --- /dev/null +++ b/roles/cifmw_helpers/tasks/include_dir.yml @@ -0,0 +1,35 @@ +--- +# This is a workaround for reading Ansible yaml files, +# that instead of have clear values, it uses jinja2 variables, +# so reading the file and parse as fact does not work. + +- name: Check directory is available + ansible.builtin.stat: + path: "{{ included_dir | trim }}" + register: _included_dir + +- name: List files available in dir and parse + when: _included_dir.stat.exists + block: + - name: Find yaml files + ansible.builtin.find: + paths: "{{ included_dir | trim }}" + patterns: "*.yml,*.yaml" + file_type: file + recurse: false + register: _yaml_files + + - name: Print available yaml files + ansible.builtin.debug: + msg: | + Found yaml files to parse: {{ _yaml_files.files | map(attribute='path') | list }} + + - name: Create files on localhost and use include_vars + vars: + included_file: "{{ _file_to_parse.path }}" + ansible.builtin.include_tasks: + file: include_file.yml + loop: "{{ _yaml_files.files }}" + loop_control: + loop_var: _file_to_parse + no_log: "{{ cifmw_helpers_no_log }}" diff --git a/roles/cifmw_helpers/tasks/include_file.yml b/roles/cifmw_helpers/tasks/include_file.yml new file mode 100644 index 0000000000..1e239ebdf6 --- /dev/null +++ b/roles/cifmw_helpers/tasks/include_file.yml @@ -0,0 +1,51 @@ +--- +# This is a workaround for reading Ansible yaml files, +# that instead of have clear values, it uses jinja2 variables, +# so reading the file and parse as fact does not work. + +- name: Fail if file is not yaml or yml extension + ansible.builtin.fail: + msg: "File needs to be yaml/yml extension" + when: + - included_file | trim | regex_search('\.(yml|yaml)$') == None + +- name: Check if file is available + ansible.builtin.stat: + path: "{{ included_file | trim }}" + register: _included_file + +- name: Read file and include vars + when: _included_file.stat.exists + block: + - name: Create temporary directory + ansible.builtin.tempfile: + state: directory + register: _tmp_dir + delegate_to: localhost + + - name: Read vars + ansible.builtin.slurp: + src: "{{ included_file | trim }}" + register: _parsed_vars + no_log: "{{ cifmw_helpers_no_log }}" + + - name: Create new variable files with content + when: "'content' in _parsed_vars" + ansible.builtin.copy: + content: "{{ _parsed_vars['content'] | b64decode }}" + dest: "{{ _tmp_dir.path }}/{{ included_file | basename }}" + mode: "0644" + no_log: "{{ cifmw_helpers_no_log }}" + delegate_to: localhost + + - name: Include vars + when: "'content' in _parsed_vars" + ansible.builtin.include_vars: + file: "{{ _tmp_dir.path }}/{{ included_file | basename }}" + no_log: "{{ cifmw_helpers_no_log }}" + delegate_to: localhost + always: + - name: Remove temporary directory + ansible.builtin.file: + path: "{{ _tmp_dir.path }}" + state: absent From 12ad0668ea30fabff7cf725f2ecc3c120ba4fb96 Mon Sep 17 00:00:00 2001 From: bshewale Date: Thu, 9 Oct 2025 15:30:52 +0530 Subject: [PATCH 422/480] Replace hardcoded /home/zuul paths in federation role Replace hardcoded /home/zuul/ paths with ansible_user_dir variable in hook source paths to support different user environments and improve consistency with configurable user variables pattern. --- roles/federation/tasks/run_openstack_setup.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/federation/tasks/run_openstack_setup.yml b/roles/federation/tasks/run_openstack_setup.yml index 4affbde457..07f40baba4 100644 --- a/roles/federation/tasks/run_openstack_setup.yml +++ b/roles/federation/tasks/run_openstack_setup.yml @@ -16,8 +16,8 @@ - name: Link kubeconfg for comparability ansible.builtin.copy: - src: /home/zuul/.crc/machines/crc/kubeconfig - dest: /home/zuul/.kube/config + src: "{{ ansible_user_dir }}/.crc/machines/crc/kubeconfig" + dest: "{{ ansible_user_dir }}/.kube/config" mode: "0640" when: cifmw_federation_deploy_type == "crc" From 95cdf583b8165864d00d9f3be819b0d355ecf9f6 Mon Sep 17 00:00:00 2001 From: rabi Date: Thu, 2 Oct 2025 11:15:16 +0530 Subject: [PATCH 423/480] Add periodic bootc jobs Also use cifmw_update_containers_edpm_image_url to patch openstackversion CR. We can also remove BAREMETAL_OS_IMG var when we use the same qcow image name for bootc images. Signed-off-by: rabi --- roles/edpm_deploy_baremetal/tasks/main.yml | 2 ++ zuul.d/edpm.yaml | 5 ++++- zuul.d/edpm_periodic.yaml | 19 ++++++++++++++++++- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/roles/edpm_deploy_baremetal/tasks/main.yml b/roles/edpm_deploy_baremetal/tasks/main.yml index e67a7f654f..d51cca1977 100644 --- a/roles/edpm_deploy_baremetal/tasks/main.yml +++ b/roles/edpm_deploy_baremetal/tasks/main.yml @@ -120,6 +120,7 @@ target_path: "{{ cifmw_edpm_deploy_openstack_crs_path }}" sort_ascending: false kustomizations: |- + {% if content_provider_registry_ip is defined or not cifmw_edpm_deploy_baremetal_bootc %} apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization patches: @@ -137,6 +138,7 @@ path: /spec/nodeTemplate/ansible/ansibleVars/edpm_bootstrap_command value: sudo dnf -y update {% endif %} + {% endif %} kustomizations_paths: >- {{ [ diff --git a/zuul.d/edpm.yaml b/zuul.d/edpm.yaml index 8b65d21f38..dcd9cf514d 100644 --- a/zuul.d/edpm.yaml +++ b/zuul.d/edpm.yaml @@ -31,10 +31,13 @@ crc_parameters: "--memory 32000 --disk-size 240 --cpus 12" cifmw_manage_secrets_pullsecret_content: '{}' cifmw_rhol_crc_binary_folder: "/usr/local/bin" + # This needs to be updated later to not use hardcoded image url but the one pushed by + # the periodic job for pushing the bootc images to the registry + cifmw_update_containers_edpm_image_url: quay.io/openstack-k8s-operators/edpm-bootc:latest-qcow2 cifmw_install_yamls_vars: - BAREMETAL_OS_CONTAINER_IMG: quay.io/openstack-k8s-operators/edpm-bootc:latest-qcow2 BAREMETAL_OS_IMG: edpm-bootc.qcow2 cifmw_edpm_deploy_baremetal_bootc: true + cifmw_update_containers: true # Podified galera job - job: diff --git a/zuul.d/edpm_periodic.yaml b/zuul.d/edpm_periodic.yaml index c9bf9db7ed..dbba5e6a98 100644 --- a/zuul.d/edpm_periodic.yaml +++ b/zuul.d/edpm_periodic.yaml @@ -20,7 +20,6 @@ cifmw_tempest_container: openstack-tempest-all cifmw_tempest_image_tag: "{{ cifmw_repo_setup_full_hash }}" - - job: name: periodic-podified-multinode-edpm-deployment-master-ocp-crc-cs9 parent: podified-multinode-edpm-deployment-crc @@ -59,6 +58,24 @@ cifmw_update_containers_org: podified-{{ cifmw_repo_setup_branch }}-centos9 cifmw_tempest_namespace: podified-{{ cifmw_repo_setup_branch }}-centos9 +- job: + name: periodic-podified-edpm-baremetal-bootc-antelope-ocp-crc + parent: cifmw-crc-podified-edpm-baremetal-bootc + vars: + cifmw_repo_setup_branch: antelope + cifmw_repo_setup_promotion: podified-ci-testing + cifmw_dlrn_report_result: true + cifmw_tempest_registry: quay.rdoproject.org + cifmw_tempest_namespace: podified-{{ cifmw_repo_setup_branch }}-centos9 + cifmw_tempest_container: openstack-tempest-all + cifmw_tempest_image_tag: "{{ cifmw_repo_setup_full_hash }}" + cifmw_update_containers_registry: quay.rdoproject.org + cifmw_update_containers_org: "podified-{{ cifmw_repo_setup_branch }}-centos9" + cifmw_update_containers_tag: "{{ cifmw_repo_setup_full_hash }}" + cifmw_update_containers_openstack: true + cifmw_extras: + - '@scenarios/centos-9/nested_virt.yml' + - job: name: periodic-podified-multinode-edpm-deployment-antelope-ocp-crc-cs9 parent: periodic-podified-multinode-edpm-deployment-master-ocp-crc-cs9 From 2c7f6a095a3c9258aa167c8f6ba325062adfe36b Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Mon, 13 Oct 2025 14:22:35 +0200 Subject: [PATCH 424/480] adoption: allow files to be copied to the undercloud Add a new (optional) key `additional_files` for each item in the list of stacks. `additional_files` is a list and each item is the name of a file which must exist inside the scenario directory. The file will be copied inside the home directory of the ansible user (by default the `zuul` user) of the undercloud, so that it can be used during the overcloud deployment. --- roles/adoption_osp_deploy/tasks/deploy_overcloud.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml index 4275ba65be..59136b57e0 100644 --- a/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml +++ b/roles/adoption_osp_deploy/tasks/deploy_overcloud.yml @@ -48,6 +48,14 @@ _vips_provision_output: "vips_provision_{{ _overcloud_name }}_out.yaml" _private_overcloud_conf_file: "{{ ansible_user_dir }}/internal-configuration.yaml" block: + - name: Copy additional files to the undercloud home, if specified + delegate_to: "osp-undercloud-0" + ansible.builtin.copy: + src: "{{ [cifmw_adoption_source_scenario_path, item] | path_join }}" + dest: "{{ [ansible_user_dir, item | basename ] | path_join }}" + mode: "0644" + loop: "{{ _stack.additional_files | default([]) }}" + - name: Copy roles file delegate_to: "osp-undercloud-0" ansible.builtin.copy: From afe8f950d4a302e8a25ea872f0c77c2423e6796e Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 15 Oct 2025 11:40:25 +0200 Subject: [PATCH 425/480] Add debug vars before starting molecule job Printing some variables that later are used in molecule job will be helpful to debug molecule job locally. Signed-off-by: Daniel Pawlik --- ci/playbooks/molecule-test.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ci/playbooks/molecule-test.yml b/ci/playbooks/molecule-test.yml index 6d7c6669d6..f7be41302a 100644 --- a/ci/playbooks/molecule-test.yml +++ b/ci/playbooks/molecule-test.yml @@ -22,6 +22,12 @@ path: "{{ roles_dir }}/../../group_vars" state: directory + - name: Print related variables + ansible.builtin.debug: + msg: | + mol_config_dir: {{ mol_config_dir }} + roles_dir: {{ roles_dir }} + - name: Run molecule environment: ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/zuul-output/logs/ansible-execution.log" From 94fa1e51f037a41b90656b4f0a9e67f6db166aa5 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 14 Oct 2025 17:01:19 +0200 Subject: [PATCH 426/480] Replace var_dir and var_file tasks with include_dir and include_var Some of the variables files sometimes contains jinja2 template vars, so on set fact they are not "translated". Signed-off-by: Daniel Pawlik --- ci/playbooks/e2e-collect-logs.yml | 4 ++-- ci/playbooks/read_global_vars.yml | 4 ++-- roles/cifmw_setup/tasks/run_logs.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ci/playbooks/e2e-collect-logs.yml b/ci/playbooks/e2e-collect-logs.yml index 1760900b4f..a1719a31a2 100644 --- a/ci/playbooks/e2e-collect-logs.yml +++ b/ci/playbooks/e2e-collect-logs.yml @@ -22,12 +22,12 @@ - name: Read base centos-9 scenarios vars: - provided_file: > + included_file: > {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ ci-framework/scenarios/centos-9/base.yml ansible.builtin.include_role: name: cifmw_helpers - tasks_from: var_file.yml + tasks_from: include_file.yml - name: Run log collection ansible.builtin.import_role: diff --git a/ci/playbooks/read_global_vars.yml b/ci/playbooks/read_global_vars.yml index c4dc24b7a9..91d86e1009 100644 --- a/ci/playbooks/read_global_vars.yml +++ b/ci/playbooks/read_global_vars.yml @@ -4,7 +4,7 @@ tasks: - name: Read group_vars all file vars: - provided_file: "{{ playbook_dir }}/group_vars/all.yml" + included_file: "{{ playbook_dir }}/group_vars/all.yml" ansible.builtin.include_role: name: cifmw_helpers - tasks_from: var_file.yml + tasks_from: include_file.yml diff --git a/roles/cifmw_setup/tasks/run_logs.yml b/roles/cifmw_setup/tasks/run_logs.yml index b8e5992112..c35a1d3327 100644 --- a/roles/cifmw_setup/tasks/run_logs.yml +++ b/roles/cifmw_setup/tasks/run_logs.yml @@ -14,10 +14,10 @@ # Ensure, that the directory exists on localhost before continue. - name: Read artifacts parameters dir and set as facts vars: - provided_dir: "{{ cifmw_basedir }}/artifacts/parameters" + included_dir: "{{ cifmw_basedir }}/artifacts/parameters" ansible.builtin.include_role: name: cifmw_helpers - tasks_from: var_dir.yml + tasks_from: include_dir.yml always: - name: Set custom cifmw PATH reusable fact when: From 52784b63be256f4742e2fea9bb4c64cae83971b7 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 7 Oct 2025 10:02:39 +0200 Subject: [PATCH 427/480] Add loop_var in cifmw_helpers task files; trim variables To avoid unecessary conflicts issue where "item" was already used in higher task, let's add loop_var to have better control on the vars what are parsed. Also trim variables in var_dir and var_file and improve finding files to parse in var_dir helper. Signed-off-by: Daniel Pawlik --- roles/cifmw_helpers/tasks/set_dir_facts.yml | 8 +++--- roles/cifmw_helpers/tasks/var_dir.yml | 30 ++++++++++++++------- roles/cifmw_helpers/tasks/var_file.yml | 14 +++++++--- roles/cifmw_helpers/tasks/various_vars.yml | 8 ++++-- 4 files changed, 43 insertions(+), 17 deletions(-) diff --git a/roles/cifmw_helpers/tasks/set_dir_facts.yml b/roles/cifmw_helpers/tasks/set_dir_facts.yml index 91f6b17bf1..83b4688cb0 100644 --- a/roles/cifmw_helpers/tasks/set_dir_facts.yml +++ b/roles/cifmw_helpers/tasks/set_dir_facts.yml @@ -1,8 +1,10 @@ --- - name: Set files as fact - when: "'content' in item" + when: "'content' in dir_item" ansible.builtin.set_fact: - "{{ item.key }}": "{{ item.value }}" + "{{ _file_content.key }}": "{{ _file_content.value }}" cacheable: true - loop: "{{ item['content'] | b64decode | from_yaml | dict2items }}" + loop: "{{ dir_item['content'] | b64decode | from_yaml | dict2items }}" + loop_control: + loop_var: _file_content no_log: "{{ cifmw_helpers_no_log }}" diff --git a/roles/cifmw_helpers/tasks/var_dir.yml b/roles/cifmw_helpers/tasks/var_dir.yml index c2ee42bd8a..e1ad22204f 100644 --- a/roles/cifmw_helpers/tasks/var_dir.yml +++ b/roles/cifmw_helpers/tasks/var_dir.yml @@ -4,26 +4,38 @@ # In that case, include_vars would not work. - name: Check directory is available ansible.builtin.stat: - path: "{{ provided_dir }}" + path: "{{ provided_dir | trim }}" register: param_dir - name: List files available in dir and parse when: param_dir.stat.exists block: - - name: List available files - ansible.builtin.command: | - ls {{ provided_dir }} - register: _param_dir + - name: Find yaml files + ansible.builtin.find: + paths: "{{ provided_dir | trim }}" + patterns: "*.yml,*.yaml" + file_type: file + recurse: false + register: _yaml_files + + - name: Print available yaml files + ansible.builtin.debug: + msg: | + Found yaml files to parse: {{ _yaml_files.files | map(attribute='path') | list }} - name: Read vars ansible.builtin.slurp: - src: "{{ provided_dir }}/{{ item }}" - register: _parsed_vars - loop: "{{ _param_dir.stdout_lines }}" + src: "{{ _file_to_parse.path }}" + loop: "{{ _yaml_files.files }}" + loop_control: + loop_var: _file_to_parse no_log: "{{ cifmw_helpers_no_log }}" + register: _parsed_vars - name: Call task to parse all files as fact ansible.builtin.include_tasks: file: set_dir_facts.yml - loop: '{{ _parsed_vars["results"] }}' + loop: "{{ _parsed_vars['results'] }}" + loop_control: + loop_var: dir_item no_log: "{{ cifmw_helpers_no_log }}" diff --git a/roles/cifmw_helpers/tasks/var_file.yml b/roles/cifmw_helpers/tasks/var_file.yml index a0b5513ca2..6d2c6e4678 100644 --- a/roles/cifmw_helpers/tasks/var_file.yml +++ b/roles/cifmw_helpers/tasks/var_file.yml @@ -1,20 +1,28 @@ --- +- name: Fail if file is not yaml or yml extension + ansible.builtin.fail: + msg: "File needs to be yaml/yml extension" + when: + - provided_file | trim | regex_search('\.(yml|yaml)$') == None + - name: Check if file is available ansible.builtin.stat: - path: "{{ provided_file }}" + path: "{{ provided_file | trim }}" register: _param_file - name: Read vars when: _param_file.stat.exists ansible.builtin.slurp: - src: "{{ provided_file }}" + src: "{{ provided_file | trim }}" register: _parsed_vars no_log: "{{ cifmw_helpers_no_log }}" - name: Set vars as fact when: "'content' in _parsed_vars" ansible.builtin.set_fact: - "{{ item.key }}": "{{ item.value }}" + "{{ file_item.key }}": "{{ file_item.value }}" cacheable: true loop: "{{ _parsed_vars['content'] | b64decode | from_yaml | dict2items }}" no_log: "{{ cifmw_helpers_no_log }}" + loop_control: + loop_var: file_item diff --git a/roles/cifmw_helpers/tasks/various_vars.yml b/roles/cifmw_helpers/tasks/various_vars.yml index 0bc17536fd..c307e2c960 100644 --- a/roles/cifmw_helpers/tasks/various_vars.yml +++ b/roles/cifmw_helpers/tasks/various_vars.yml @@ -2,12 +2,16 @@ # various_vars - name: Filter Ansible variable files and set as fact vars: - provided_file: "{{ item | replace('@','') }}" + provided_file: "{{ various_file_item | replace('@','') }}" ansible.builtin.include_tasks: var_file.yml loop: "{{ various_vars | select('match', '^@.*\\.(yml|yaml)$') | list }}" + loop_control: + loop_var: various_file_item - name: Filter just dict and set as fact ansible.builtin.set_fact: - "{{ item.key }}": "{{ item.value }}" + "{{ various_item.key }}": "{{ various_item.value }}" cacheable: true loop: "{{ (various_vars | select('mapping') | list) | map('dict2items') | flatten }}" + loop_control: + loop_var: various_item From 3e31ef974cf003aca07732b6c9695f854015dde1 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 7 Oct 2025 10:02:39 +0200 Subject: [PATCH 428/480] Add molecule tests for cifmw helpers The cifmw_helpers role does not have molecule job, would be good to verify if it always returns correct result. Depends-On: https://github.com/openstack-k8s-operators/ci-framework/pull/3404 Signed-off-by: Daniel Pawlik --- .../molecule/default/converge.yml | 111 +++++++++++++++++ .../molecule/default/molecule.yml | 6 + .../molecule/default/prepare.yml | 116 ++++++++++++++++++ zuul.d/molecule.yaml | 20 +-- 4 files changed, 244 insertions(+), 9 deletions(-) create mode 100644 roles/cifmw_helpers/molecule/default/converge.yml create mode 100644 roles/cifmw_helpers/molecule/default/molecule.yml create mode 100644 roles/cifmw_helpers/molecule/default/prepare.yml diff --git a/roles/cifmw_helpers/molecule/default/converge.yml b/roles/cifmw_helpers/molecule/default/converge.yml new file mode 100644 index 0000000000..5a25336de3 --- /dev/null +++ b/roles/cifmw_helpers/molecule/default/converge.yml @@ -0,0 +1,111 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +- name: Converge + hosts: all + vars: + zuul: + projects: + github.com/openstack-k8s-operators/ci-framework: + src_dir: src/github.com/openstack-k8s-operators/ci-framework + tasks: + # var file + - name: Read file with facts + vars: + provided_file: /tmp/provided_file.yml + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: var_file.yml + + - name: Check if some_var is available + ansible.builtin.assert: + that: some_var is defined and some_var + + # var dir + - name: Read all files in directory and set as fact + vars: + provided_dir: /tmp/provided_dir + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: var_dir.yml + + - name: Check if variables from dir are available + ansible.builtin.assert: + that: + - first_in_dir is defined and first_in_dir + - second_in_dir is defined and second_in_dir + + # various vars + - name: Check various files + vars: + various_vars: + - "@/tmp/various_vars.yml" + - mytest: true + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: various_vars.yml + + - name: Check if variables from various vars exists + ansible.builtin.assert: + that: + - my_various_file is defined and my_various_file + - mytest is defined and mytest + + # symlink cifmw collection + - name: Make a symlink to local .ansible collection dir + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: symlink_cifmw_collection.yml + + - name: Check if symlink was done + ansible.builtin.stat: + path: "{{ ansible_user_dir }}/.ansible/collections/ansible_collections/cifmw" + register: _cifmw_collection + + - name: Assert that symlink was done + ansible.builtin.assert: + that: _cifmw_collection.stat.exists + + # include file + - name: Check include file + vars: + included_file: /tmp/include_file.yml + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: include_file.yml + + - name: Check if jinja2 vars are translated + ansible.builtin.assert: + that: + - "my_include_file is defined and my_include_file == 'test'" + - "my_second_include_file is defined and my_second_include_file == 'test'" + + # include dir + - name: Check include dir + vars: + included_dir: /tmp/included_dir + ansible.builtin.include_role: + name: cifmw_helpers + tasks_from: include_dir.yml + + - name: Check if all files were parsed + ansible.builtin.assert: + that: + - "my_include_dir is defined and my_include_dir == 'test'" + - "my_second_include_dir is defined and my_second_include_dir == 'test'" + - my_fake_include_dir is not defined + - my_fake_second_include_dir is not defined diff --git a/roles/cifmw_helpers/molecule/default/molecule.yml b/roles/cifmw_helpers/molecule/default/molecule.yml new file mode 100644 index 0000000000..aeab077e2e --- /dev/null +++ b/roles/cifmw_helpers/molecule/default/molecule.yml @@ -0,0 +1,6 @@ +--- +log: true + +provisioner: + name: ansible + log: true diff --git a/roles/cifmw_helpers/molecule/default/prepare.yml b/roles/cifmw_helpers/molecule/default/prepare.yml new file mode 100644 index 0000000000..bb32ad6289 --- /dev/null +++ b/roles/cifmw_helpers/molecule/default/prepare.yml @@ -0,0 +1,116 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +- name: Prepare + hosts: all + vars: + cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" + zuul: + projects: + github.com/openstack-k8s-operators/ci-framework: + src_dir: src/github.com/openstack-k8s-operators/ci-framework + roles: + - role: test_deps + - role: ci_setup + tasks: + # var_file + - name: Create file with vars + ansible.builtin.copy: + content: | + --- + some_var: true + dest: /tmp/provided_file.yml + mode: "0644" + + # var_dir + - name: Create directory for var files + ansible.builtin.file: + path: /tmp/provided_dir + state: directory + mode: "0755" + + - name: Create first var file in directory + ansible.builtin.copy: + content: | + --- + first_in_dir: true + dest: /tmp/provided_dir/firstfile.yml + mode: "0644" + + - name: Create second var file in directory + ansible.builtin.copy: + content: | + --- + second_in_dir: true + dest: /tmp/provided_dir/secondfile.yml + mode: "0644" + + # various file + - name: Create file for various vars + ansible.builtin.copy: + content: | + --- + my_various_file: true + dest: /tmp/various_vars.yml + mode: "0644" + + # symlink cifmw + - name: Install required packages + become: true + ansible.builtin.package: + name: git + + # include file + - name: Create file with jinja2 var + ansible.builtin.copy: + content: | + --- + {% raw %} + my_include_file: test + my_second_include_file: "{{ my_include_file }}" + {% endraw %} + dest: /tmp/include_file.yml + mode: "0644" + + # include dir + - name: Create directory for include dir + ansible.builtin.file: + path: /tmp/included_dir + state: directory + mode: "0755" + + - name: Create file with jinja2 var in include dir + ansible.builtin.copy: + content: | + --- + {% raw %} + my_include_dir: test + my_second_include_dir: "{{ my_include_file }}" + {% endraw %} + dest: /tmp/included_dir/somefile.yml + mode: "0644" + + - name: Create file without extension + ansible.builtin.copy: + content: | + --- + {% raw %} + my_fake_include_dir: fake + my_fake_second_include_dir: "{{ my_include_file }}" + {% endraw %} + dest: /tmp/included_dir/something + mode: "0644" diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 085b8d0d10..8ed6149217 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -191,6 +191,17 @@ parent: cifmw-molecule-base vars: TEST_RUN: cifmw_create_admin +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/cifmw_helpers/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-cifmw_helpers + parent: cifmw-molecule-base + vars: + TEST_RUN: cifmw_helpers - job: files: - ^common-requirements.txt @@ -889,15 +900,6 @@ - ^.config/molecule/.* name: cifmw-molecule-cifmw_external_dns parent: cifmw-molecule-noop -- job: - files: - - ^common-requirements.txt - - ^test-requirements.txt - - ^roles/cifmw_helpers/.* - - ^ci/playbooks/molecule.* - - ^.config/molecule/.* - name: cifmw-molecule-cifmw_helpers - parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt From 1f8e45ccf3e8eb82eedcdf6509885ab9585f5f0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Mon, 13 Oct 2025 22:54:08 +0200 Subject: [PATCH 429/480] Add DHCP options support for VM types Enable per-VM-type DHCP options in libvirt_manager for PXE boot scenarios. VMs are tagged by type and dnsmasq applies corresponding options to each group via dhcp_options field in VM definitions. Assisted-By: Claude Code/claude-sonnet-4 --- docs/dictionary/en-custom.txt | 1 + roles/dnsmasq/README.md | 14 ++ roles/dnsmasq/molecule/default/converge.yml | 119 +++++++++++++ roles/dnsmasq/tasks/manage_host.yml | 6 +- roles/libvirt_manager/DHCP_OPTIONS_EXAMPLE.md | 161 ++++++++++++++++++ roles/libvirt_manager/README.md | 1 + .../generate_network_data/tasks/test.yml | 72 ++++++++ .../generate_network_data/vars/scenarios.yml | 27 +++ .../tasks/create_dhcp_options.yml | 46 +++++ .../tasks/generate_networking_data.yml | 3 + .../tasks/reserve_dnsmasq_ips.yml | 2 + .../templates/vm-types-dhcp-options.conf.j2 | 8 + 12 files changed, 459 insertions(+), 1 deletion(-) create mode 100644 roles/libvirt_manager/DHCP_OPTIONS_EXAMPLE.md create mode 100644 roles/libvirt_manager/tasks/create_dhcp_options.yml create mode 100644 roles/libvirt_manager/templates/vm-types-dhcp-options.conf.j2 diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 547e6b3346..4706600ccf 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -178,6 +178,7 @@ env envfile epel epyc +etcd eth extraimages extraRPMs diff --git a/roles/dnsmasq/README.md b/roles/dnsmasq/README.md index 972e243550..602baf2d2a 100644 --- a/roles/dnsmasq/README.md +++ b/roles/dnsmasq/README.md @@ -168,6 +168,7 @@ supported in libvirt). * `mac`: (String) Entry MAC address. Mandatory. * `ips`: (List[string]) List of IP addresses associated to the MAC (v4, v6). Mandatory. * `name`: (String) Host name. Optional. +* `tag`: (String) Tag to assign to this host. Tags can be used to apply specific DHCP options to groups of hosts. Optional. #### Examples @@ -182,7 +183,20 @@ supported in libvirt). - "2345:0425:2CA1::0567:5673:cafe" - "192.168.254.11" name: r2d2 + tag: droid # Optional: assign tag for DHCP options ansible.builtin.include_role: name: dnsmasq tasks_from: manage_host.yml ``` + +#### Using tags for DHCP options + +When you assign a `tag` to DHCP entries, you can then configure DHCP options for that tag: + +``` +# In /etc/cifmw-dnsmasq.d/custom-options.conf +dhcp-option=tag:droid,60,HTTPClient +dhcp-option=tag:droid,67,http://192.168.254.1/boot.ipxe +``` + +All hosts with the `droid` tag will receive these DHCP options. diff --git a/roles/dnsmasq/molecule/default/converge.yml b/roles/dnsmasq/molecule/default/converge.yml index 2b5e24cecd..914af886e8 100644 --- a/roles/dnsmasq/molecule/default/converge.yml +++ b/roles/dnsmasq/molecule/default/converge.yml @@ -145,6 +145,125 @@ name: dnsmasq tasks_from: manage_host.yml + - name: Inject nodes with tags for DHCP options + vars: + cifmw_dnsmasq_dhcp_entries: + - network: starwars + state: present + mac: "0a:19:02:f8:4c:b1" + ips: + - "192.168.254.21" + - "2345:0425:2CA1::0567:5673:0021" + name: "r2d2" + tag: "droid" + - network: starwars + state: present + mac: "0a:19:02:f8:4c:b2" + ips: + - "192.168.254.22" + name: "c3po" + tag: "droid" + - network: startrek + state: present + mac: "0a:19:02:f8:4c:b3" + ips: + - "192.168.253.31" + name: "data" + tag: "android" + ansible.builtin.include_role: + name: dnsmasq + tasks_from: manage_host.yml + + - name: Verify DHCP host entries with tags + block: + - name: Read r2d2 DHCP host entry + become: true + ansible.builtin.slurp: + path: "/etc/cifmw-dnsmasq.d/dhcp-hosts.d/starwars_r2d2_0a:19:02:f8:4c:b1" + register: _r2d2_entry + + - name: Read c3po DHCP host entry + become: true + ansible.builtin.slurp: + path: "/etc/cifmw-dnsmasq.d/dhcp-hosts.d/starwars_c3po_0a:19:02:f8:4c:b2" + register: _c3po_entry + + - name: Read data DHCP host entry + become: true + ansible.builtin.slurp: + path: "/etc/cifmw-dnsmasq.d/dhcp-hosts.d/startrek_data_0a:19:02:f8:4c:b3" + register: _data_entry + + - name: Decode entries + ansible.builtin.set_fact: + _r2d2_content: "{{ _r2d2_entry.content | b64decode | trim }}" + _c3po_content: "{{ _c3po_entry.content | b64decode | trim }}" + _data_content: "{{ _data_entry.content | b64decode | trim }}" + + - name: Assert r2d2 entry has droid tag + ansible.builtin.assert: + that: + - "'set:droid' in _r2d2_content" + - "'0a:19:02:f8:4c:b1' in _r2d2_content" + - "'192.168.254.21' in _r2d2_content" + - "'r2d2' in _r2d2_content" + msg: "r2d2 DHCP entry should contain tag 'droid': {{ _r2d2_content }}" + + - name: Assert c3po entry has droid tag + ansible.builtin.assert: + that: + - "'set:droid' in _c3po_content" + - "'0a:19:02:f8:4c:b2' in _c3po_content" + - "'192.168.254.22' in _c3po_content" + - "'c3po' in _c3po_content" + msg: "c3po DHCP entry should contain tag 'droid': {{ _c3po_content }}" + + - name: Assert data entry has android tag + ansible.builtin.assert: + that: + - "'set:android' in _data_content" + - "'0a:19:02:f8:4c:b3' in _data_content" + - "'192.168.253.31' in _data_content" + - "'data' in _data_content" + msg: "data DHCP entry should contain tag 'android': {{ _data_content }}" + + - name: "Verify entry without tag has no set: prefix" + become: true + ansible.builtin.slurp: + path: "/etc/cifmw-dnsmasq.d/dhcp-hosts.d/starwars_solo_0a:19:02:f8:4c:a8" + register: _solo_entry + + - name: "Assert solo entry does not have a tag" + vars: + _solo_content: "{{ _solo_entry.content | b64decode | trim }}" + ansible.builtin.assert: + that: + - "'set:' not in _solo_content" + - "'0a:19:02:f8:4c:a8' in _solo_content" + - "'solo' in _solo_content" + msg: "solo DHCP entry should not contain any tag: {{ _solo_content }}" + + - name: "Create DHCP options configuration for tagged hosts" + become: true + ansible.builtin.copy: + dest: "/etc/cifmw-dnsmasq.d/test-dhcp-options.conf" + content: | + # Test DHCP options for droids + dhcp-option=tag:droid,60,HTTPClient + dhcp-option=tag:droid,67,http://192.168.254.1/droid-boot.ipxe + # Test DHCP options for androids + dhcp-option=tag:android,60,HTTPClient + dhcp-option=tag:android,67,http://192.168.253.1/android-boot.ipxe + mode: '0644' + validate: "/usr/sbin/dnsmasq -C %s --test" + notify: Restart dnsmasq + + - name: Verify dnsmasq configuration is valid + become: true + ansible.builtin.command: + cmd: /usr/sbin/dnsmasq -C /etc/cifmw-dnsmasq.conf --test + changed_when: false + - name: Add a domain specific forwarder vars: cifmw_dnsmasq_forwarder: diff --git a/roles/dnsmasq/tasks/manage_host.yml b/roles/dnsmasq/tasks/manage_host.yml index 30666e5678..73a5778853 100644 --- a/roles/dnsmasq/tasks/manage_host.yml +++ b/roles/dnsmasq/tasks/manage_host.yml @@ -62,7 +62,11 @@ {%- set _ = data.append(entry.mac) -%} {{ data | join('_') }} _entry: >- - {% set data = [entry.mac] -%} + {% set data = [] -%} + {% if entry.tag is defined and entry.tag | length > 0 -%} + {% set _ = data.append('set:' + entry.tag) -%} + {% endif -%} + {% set _ = data.append(entry.mac) -%} {% for ip in entry.ips if ip is not none and ip | length > 0 -%} {% set _ = data.append(ip | ansible.utils.ipwrap) -%} {% endfor -%} diff --git a/roles/libvirt_manager/DHCP_OPTIONS_EXAMPLE.md b/roles/libvirt_manager/DHCP_OPTIONS_EXAMPLE.md new file mode 100644 index 0000000000..3737dda578 --- /dev/null +++ b/roles/libvirt_manager/DHCP_OPTIONS_EXAMPLE.md @@ -0,0 +1,161 @@ +# DHCP Options Support in libvirt_manager + +This document explains how to add DHCP options to VM groups in the libvirt_manager role. + +## Overview + +The libvirt_manager role now supports assigning DHCP options to groups of VMs based on their type. This is useful for scenarios like PXE booting where you need to provide specific boot parameters to certain VM types. + +## How It Works + +1. **VM Type Tagging**: Each VM is automatically tagged with its type (e.g., `compute`, `controller`, `baremetal_instance`) +2. **DHCP Options**: You can specify DHCP options in the VM type definition +3. **dnsmasq Configuration**: The role automatically generates dnsmasq configuration that applies these options to all VMs of that type + +## Configuration Example + +### Basic Example + +Here's how to add DHCP options for PXE booting to baremetal instances: + +```yaml +cifmw_libvirt_manager_configuration: + vms: + baremetal_instance: + amount: 3 + disk_file_name: "blank" + disksize: 50 + memory: 8 + cpus: 4 + bootmenu_enable: "yes" + nets: + - public + - provisioning + dhcp_options: + - "60,HTTPClient" # Vendor class identifier + - "67,http://192.168.122.1:8081/boot.ipxe" # Boot filename (iPXE script) +``` + +### Advanced Example with Multiple VM Types + +```yaml +cifmw_libvirt_manager_configuration: + vms: + controller: + amount: 1 + image_url: "{{ cifmw_discovered_image_url }}" + sha256_image_name: "{{ cifmw_discovered_hash }}" + disk_file_name: "centos-stream-9.qcow2" + disksize: 50 + memory: 4 + cpus: 2 + nets: + - public + - osp_trunk + # No DHCP options for controllers - they'll use defaults + + compute: + amount: 3 + disk_file_name: blank + disksize: 40 + memory: 8 + cpus: 4 + nets: + - public + - osp_trunk + dhcp_options: + - "60,HTTPClient" + - "67,http://192.168.122.1:8081/boot-artifacts/compute-boot.ipxe" + + baremetal_instance: + amount: 2 + disk_file_name: "blank" + disksize: 50 + memory: 8 + cpus: 4 + bootmenu_enable: "yes" + nets: + - public + dhcp_options: + - "60,HTTPClient" + - "67,http://192.168.122.1:8081/boot-artifacts/agent.x86_64.ipxe" +``` + +## Common DHCP Options + +Here are some commonly used DHCP options for PXE/network booting: + +| Option | Name | Purpose | Example | +|--------|------|---------|---------| +| 60 | vendor-class-identifier | Identifies the vendor/client type | `60,HTTPClient` | +| 67 | bootfile-name | Path to boot file | `67,http://server/boot.ipxe` | +| 66 | tftp-server-name | TFTP server address | `66,192.168.1.10` | +| 150 | tftp-server-address | TFTP server IP (Cisco) | `150,192.168.1.10` | +| 210 | path-prefix | Path prefix for boot files | `210,/tftpboot/` | + + +## Technical Details + +### Under the Hood + +1. **Tag Assignment**: When VMs are created, each is assigned a tag matching its type in the dnsmasq DHCP host entry: + ``` + set:baremetal_instance,52:54:00:xx:xx:xx,192.168.122.10,hostname + ``` + +2. **DHCP Options Configuration**: A configuration file is generated at `/etc/cifmw-dnsmasq.d/vm-types-dhcp-options.conf`: + ``` + # Options for baremetal_instance VMs + dhcp-option=tag:baremetal_instance,60,HTTPClient + dhcp-option=tag:baremetal_instance,67,http://192.168.122.1:8081/boot.ipxe + ``` + +3. **dnsmasq Processing**: When a VM with the `baremetal_instance` tag requests DHCP, it receives both the standard network options AND the VM-type-specific options. + +### Files Modified + +- `roles/libvirt_manager/tasks/reserve_dnsmasq_ips.yml`: Adds VM type tags to DHCP entries +- `roles/libvirt_manager/tasks/create_dhcp_options.yml`: New file that generates DHCP options configuration +- `roles/libvirt_manager/tasks/generate_networking_data.yml`: Includes the new task +- `roles/dnsmasq/tasks/manage_host.yml`: Updated to support tags in DHCP entries + +## Troubleshooting + +### Verify DHCP Options Are Applied + +1. Check the generated configuration: + ```bash + cat /etc/cifmw-dnsmasq.d/vm-types-dhcp-options.conf + ``` + +2. Check DHCP host entries: + ```bash + ls -la /etc/cifmw-dnsmasq.d/dhcp-hosts.d/ + cat /etc/cifmw-dnsmasq.d/dhcp-hosts.d/public_* + ``` + +3. Verify dnsmasq configuration is valid: + ```bash + dnsmasq -C /etc/cifmw-dnsmasq.conf --test + ``` + +4. Monitor DHCP requests: + ```bash + journalctl -u cifmw-dnsmasq -f + ``` + +### Common Issues + +**Issue**: DHCP options not being sent to VMs +- **Solution**: Ensure dnsmasq service is restarted after making changes +- **Check**: Verify the VM type tag matches between the DHCP host entry and the options configuration + +**Issue**: VMs not PXE booting correctly +- **Solution**: Verify the boot file URL is accessible from the VM's network +- **Check**: Ensure option 67 contains the full URL including protocol (http://) + +## References + +- [dnsmasq manual](http://www.thekelleys.org.uk/dnsmasq/docs/dnsmasq-man.html) +- [DHCP Options RFC](https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.xhtml) +- [iPXE documentation](https://ipxe.org/howto/dhcpd) diff --git a/roles/libvirt_manager/README.md b/roles/libvirt_manager/README.md index a8beb7c61f..6e6e86a7fe 100644 --- a/roles/libvirt_manager/README.md +++ b/roles/libvirt_manager/README.md @@ -97,6 +97,7 @@ cifmw_libvirt_manager_configuration: bootmenu_enable: (string, toggle bootmenu. Optional, defaults to "no") networkconfig: (dict or list[dict], [network-config](https://cloudinit.readthedocs.io/en/latest/reference/network-config-format-v2.html#network-config-v2) v2 config, needed if a static ip address should be defined at boot time in absence of a dhcp server in special scenarios. Optional) devices: (dict, optional, defaults to {}. The keys are the VMs of that type that needs devices to be attached, and the values are lists of strings, where each string must contain a valid libvirt XML element that will be passed to virsh attach-device) + dhcp_options: (list, optional, defaults to []. List of DHCP options to apply to all VMs of this type. Format: ["option_number,value", ...]) networks: net_name: ``` diff --git a/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml b/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml index 04f360b08f..a867d77fe3 100644 --- a/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml +++ b/roles/libvirt_manager/molecule/generate_network_data/tasks/test.yml @@ -94,6 +94,78 @@ _run_fail: true _failure: true + - name: Validate DHCP options + when: + - not _run_fail | bool + - scenario.check_dhcp_options is defined + - scenario.check_dhcp_options | bool + block: + - name: Check DHCP options configuration file exists + become: true + ansible.builtin.stat: + path: "/etc/cifmw-dnsmasq.d/vm-types-dhcp-options.conf" + register: _dhcp_options_file + + - name: Assert DHCP options file exists + ansible.builtin.assert: + quiet: true + that: + - _dhcp_options_file.stat.exists + msg: "DHCP options file should exist" + + - name: Read DHCP options file + become: true + ansible.builtin.slurp: + path: "/etc/cifmw-dnsmasq.d/vm-types-dhcp-options.conf" + register: _dhcp_options_content + + - name: Decode DHCP options content + ansible.builtin.set_fact: + _dhcp_opts: "{{ _dhcp_options_content.content | b64decode }}" + + - name: Verify DHCP options content for compute VMs + ansible.builtin.assert: + quiet: true + that: + - "'dhcp-option=tag:compute,60,HTTPClient' in _dhcp_opts" + - "'dhcp-option=tag:compute,67,http://192.168.140.1:8081/boot-artifacts/compute.ipxe' in _dhcp_opts" + msg: "DHCP options should contain correct entries for compute VMs" + + - name: Verify DHCP host entry has tag + become: true + ansible.builtin.shell: + cmd: "grep -l 'set:compute' /etc/cifmw-dnsmasq.d/dhcp-hosts.d/osp_trunk_compute-0*" + register: _tagged_entry + changed_when: false + failed_when: _tagged_entry.rc != 0 + + - name: Read tagged DHCP host entry + become: true + ansible.builtin.slurp: + path: "{{ _tagged_entry.stdout }}" + register: _dhcp_host_entry + + - name: Verify tag format in DHCP host entry + vars: + _entry_content: "{{ _dhcp_host_entry.content | b64decode | trim }}" + ansible.builtin.assert: + quiet: true + that: + - "'set:compute' in _entry_content" + - "_entry_content.startswith('set:compute,')" + msg: "DHCP host entry should start with 'set:compute,': {{ _entry_content }}" + + rescue: + - name: Debug DHCP options content + when: _dhcp_opts is defined + ansible.builtin.debug: + var: _dhcp_opts + + - name: Mark run as failed + ansible.builtin.set_fact: + _run_fail: true + _failure: true + - name: Assert we have expected facts set block: - name: Ensure it failed at the right place diff --git a/roles/libvirt_manager/molecule/generate_network_data/vars/scenarios.yml b/roles/libvirt_manager/molecule/generate_network_data/vars/scenarios.yml index b70b46176e..cbd4fdc314 100644 --- a/roles/libvirt_manager/molecule/generate_network_data/vars/scenarios.yml +++ b/roles/libvirt_manager/molecule/generate_network_data/vars/scenarios.yml @@ -38,6 +38,33 @@ scenarios: + - name: DHCP options for VM types + check_dns: + - rec: "compute-0.utility" + ip: "192.168.140.10" + - rec: "compute-0.ctlplane.local" + ip: "192.168.140.10" + - rec: "compute-0.public.local" + ip: "192.168.110.10" + check_dhcp: + - osp_trunk_compute-0 + - public_compute-0 + check_dhcp_options: true + lm_config_patch: + vms: + compute: + dhcp_options: + - "60,HTTPClient" + - "67,http://192.168.140.1:8081/boot-artifacts/compute.ipxe" + networks: + osp_trunk: | + + osp_trunk + + + + + - name: Baremetal integration check_dns: - rec: "compute-0.utility" diff --git a/roles/libvirt_manager/tasks/create_dhcp_options.yml b/roles/libvirt_manager/tasks/create_dhcp_options.yml new file mode 100644 index 0000000000..d31c5d1bae --- /dev/null +++ b/roles/libvirt_manager/tasks/create_dhcp_options.yml @@ -0,0 +1,46 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Initialize empty _lm_dhcp_options fact + ansible.builtin.set_fact: + _lm_dhcp_options: {} + +- name: Collect DHCP options from VM definitions + when: + - item.value.dhcp_options is defined + - item.value.dhcp_options | length > 0 + vars: + _vm_type: "{{ item.key }}" + _options: "{{ item.value.dhcp_options }}" + ansible.builtin.set_fact: + _lm_dhcp_options: >- + {{ + _lm_dhcp_options | combine({_vm_type: _options}) + }} + loop: "{{ _cifmw_libvirt_manager_layout.vms | dict2items }}" + loop_control: + label: "{{ item.key }}" + +- name: Generate DHCP option configuration for VM types + when: + - _lm_dhcp_options | length > 0 + become: true + notify: "Restart dnsmasq" + ansible.builtin.template: + src: "vm-types-dhcp-options.conf.j2" + dest: "/etc/cifmw-dnsmasq.d/vm-types-dhcp-options.conf" + mode: '0644' + validate: "/usr/sbin/dnsmasq -C %s --test" diff --git a/roles/libvirt_manager/tasks/generate_networking_data.yml b/roles/libvirt_manager/tasks/generate_networking_data.yml index 1bf06d309a..c464a0867e 100644 --- a/roles/libvirt_manager/tasks/generate_networking_data.yml +++ b/roles/libvirt_manager/tasks/generate_networking_data.yml @@ -303,6 +303,9 @@ - name: Reserve IPs in DHCP and create DNS entries ansible.builtin.import_tasks: create_dns_records.yml +- name: Create DHCP options for VM types + ansible.builtin.import_tasks: create_dhcp_options.yml + # This task might also be done via the reproducer/prepare_networking.yml # but, depending on how we call the libvirt_manager, we might not have it. # Using the same filename/permissions/content, we can ensure it's there diff --git a/roles/libvirt_manager/tasks/reserve_dnsmasq_ips.yml b/roles/libvirt_manager/tasks/reserve_dnsmasq_ips.yml index b56ebde9ef..5fe2d3ff53 100644 --- a/roles/libvirt_manager/tasks/reserve_dnsmasq_ips.yml +++ b/roles/libvirt_manager/tasks/reserve_dnsmasq_ips.yml @@ -37,6 +37,7 @@ (host_data.key is match('^ocp.*')) | ternary(_ocp_name, host_data.key) }} + _vm_type: "{{ hostvars[host_data.key].vm_type | default('') }}" _host: network: "{{ _translated_name }}" name: "{{ _hostname }}" @@ -49,6 +50,7 @@ _net_data.ip_v6 | default('') ] }} + tag: "{{ _vm_type }}" ansible.builtin.set_fact: _lm_dhcp_entries: "{{ _lm_dhcp_entries + [_host] }}" loop: "{{ cifmw_networking_env_definition.instances | dict2items }}" diff --git a/roles/libvirt_manager/templates/vm-types-dhcp-options.conf.j2 b/roles/libvirt_manager/templates/vm-types-dhcp-options.conf.j2 new file mode 100644 index 0000000000..905467f77a --- /dev/null +++ b/roles/libvirt_manager/templates/vm-types-dhcp-options.conf.j2 @@ -0,0 +1,8 @@ +# Managed by ci-framework/libvirt_manager +# DHCP options for VM types +{% for vm_type, options in _lm_dhcp_options.items() %} +# Options for {{ vm_type }} VMs +{% for option in options %} +dhcp-option=tag:{{ vm_type }},{{ option }} +{% endfor %} +{% endfor %} From 1f658d858bd6b17aef6e4e6f6c6e11c80d334764 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Thu, 16 Oct 2025 14:40:13 +0200 Subject: [PATCH 430/480] [Polarion] Merge files in predictable order MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When merging the files, we want to rely on alphabetical order on case of having `tempest_results.xml` and `tempest_retry.xml` in the same directory – the entries from retry shall override the original values from first results. Hence, this change adds explicit sorting of XML files processed. --- roles/polarion/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/polarion/tasks/main.yml b/roles/polarion/tasks/main.yml index d097e6fd0f..b391db3180 100644 --- a/roles/polarion/tasks/main.yml +++ b/roles/polarion/tasks/main.yml @@ -100,7 +100,7 @@ cmd: >- source "{{ cifmw_polarion_jump_repo_dir }}/jump-venv/bin/activate" && junitparser merge {{ item.path | dirname }}/*.xml {{item.path | dirname }}/results_merged.xml - loop: "{{ xml_files.files }}" + loop: "{{ xml_files.files | sort(attribute='path') }}" - name: Look for test result XML files in artifacts directory ansible.builtin.find: From ea8dfd1dc6562eaecc206f0416c134d667109926 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 15 Oct 2025 11:59:33 +0530 Subject: [PATCH 431/480] Add cifmw_project_dir and cifmw_project_dir_absolute vars This commit is one of the steps of replacing common used vars with group_vars to improve overall maintenance of variables in ci-framework Signed-off-by: Amartya Sinha --- group_vars/all.yml | 3 +++ roles/build_containers/molecule/default/converge.yml | 2 +- roles/build_openstack_packages/molecule/default/converge.yml | 2 +- roles/build_openstack_packages/molecule/default/prepare.yml | 2 +- roles/build_push_container/molecule/default/converge.yml | 4 ++-- roles/ci_dcn_site/defaults/main.yml | 2 +- roles/copy_container/molecule/default/converge.yml | 2 +- roles/dlrn_promote/molecule/check_criteria/converge.yml | 2 +- roles/reproducer/molecule/crc_layout/converge.yml | 2 +- roles/reproducer/tasks/configure_controller.yml | 4 ++-- roles/reproducer/templates/play.yml.j2 | 2 +- roles/reproducer/vars/main.yml | 2 +- 12 files changed, 16 insertions(+), 13 deletions(-) diff --git a/group_vars/all.yml b/group_vars/all.yml index eff219b575..981a942cce 100644 --- a/group_vars/all.yml +++ b/group_vars/all.yml @@ -2,3 +2,6 @@ # This file contains all repeating variables, that can be set # globaly instead of parse Zuul inventory file to get proper value. #### GLOBAL VARS #### +ansible_user_dir: "{{ lookup('env', 'HOME') }}" +cifmw_project_dir: src/github.com/openstack-k8s-operators/ci-framework +cifmw_project_dir_absolute: "{{ ansible_user_dir }}/{{ cifmw_project_dir }}" diff --git a/roles/build_containers/molecule/default/converge.yml b/roles/build_containers/molecule/default/converge.yml index 9fa889476d..8011bf9678 100644 --- a/roles/build_containers/molecule/default/converge.yml +++ b/roles/build_containers/molecule/default/converge.yml @@ -19,6 +19,6 @@ hosts: all vars: cifmw_build_containers_cleanup: true - cifmw_build_containers_config_file: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/roles/build_containers/files/containers.yaml" + cifmw_build_containers_config_file: "{{ cifmw_project_dir_absolute }}/roles/build_containers/files/containers.yaml" roles: - role: "build_containers" diff --git a/roles/build_openstack_packages/molecule/default/converge.yml b/roles/build_openstack_packages/molecule/default/converge.yml index a3e5e0d3e4..31c1356e18 100644 --- a/roles/build_openstack_packages/molecule/default/converge.yml +++ b/roles/build_openstack_packages/molecule/default/converge.yml @@ -19,7 +19,7 @@ hosts: all vars: ansible_user_dir: "{{ lookup('env', 'HOME') }}" - cifmw_basedir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" + cifmw_basedir: "{{ cifmw_project_dir_absolute }}" cifmw_bop_openstack_release: master cifmw_bop_dlrn_baseurl: https://trunk.rdoproject.org/centos9-master cifmw_bop_yum_repos_dir: "{{ cifmw_basedir }}/artifacts/repositories/" diff --git a/roles/build_openstack_packages/molecule/default/prepare.yml b/roles/build_openstack_packages/molecule/default/prepare.yml index 246e0f5591..ba0e8f86f8 100644 --- a/roles/build_openstack_packages/molecule/default/prepare.yml +++ b/roles/build_openstack_packages/molecule/default/prepare.yml @@ -19,7 +19,7 @@ hosts: all vars: ansible_user_dir: "{{ lookup('env', 'HOME') }}" - cifmw_basedir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" + cifmw_basedir: "{{ cifmw_project_dir_absolute }}" roles: - role: test_deps - role: repo_setup diff --git a/roles/build_push_container/molecule/default/converge.yml b/roles/build_push_container/molecule/default/converge.yml index 5665507318..720ea1057d 100644 --- a/roles/build_push_container/molecule/default/converge.yml +++ b/roles/build_push_container/molecule/default/converge.yml @@ -32,7 +32,7 @@ cifmw_build_push_container_patch_number: 123 cifmw_build_push_container_name: test_container_multi_arch cifmw_build_push_container_containerfile_path: >- - {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/roles/build_push_container/molecule/default/files/containerfile + "{{ cifmw_project_dir_absolute }}/roles/build_push_container/molecule/default/files/containerfile" cifmw_build_push_container_registry_name: 127.0.0.1:5001/cifmw-client/test_container_multi_arch cifmw_build_push_container_registry_tls_verify: false cifmw_build_push_container_supported_platform: [linux/amd64, linux/arm64] @@ -78,7 +78,7 @@ cifmw_build_push_container_patch_number: 123 cifmw_build_push_container_name: test_container_single_arch cifmw_build_push_container_containerfile_path: >- - {{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/roles/build_push_container/molecule/default/files/containerfile + "{{ cifmw_project_dir_absolute }}/roles/build_push_container/molecule/default/files/containerfile" cifmw_build_push_container_registry_name: 127.0.0.1:5001/cifmw-client/test_container_single_arch cifmw_build_push_container_registry_tls_verify: false ansible.builtin.include_role: diff --git a/roles/ci_dcn_site/defaults/main.yml b/roles/ci_dcn_site/defaults/main.yml index 8e11237b27..c1056953ef 100644 --- a/roles/ci_dcn_site/defaults/main.yml +++ b/roles/ci_dcn_site/defaults/main.yml @@ -16,7 +16,7 @@ ci_dcn_site_arch_repo_path: "{{ cifmw_architecture_repo | default('/home/zuul/src/github.com/openstack-k8s-operators/architecture') }}" ci_dcn_site_arch_path: "{{ ci_dcn_site_arch_repo_path }}/examples/dt/dcn" -ci_dcn_site_cifmw_repo_path: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework +ci_dcn_site_cifmw_repo_path: "{{ cifmw_project_dir_absolute }}" ci_dcn_site_search_storage_network_names: - "storage" - "storagedcn1" diff --git a/roles/copy_container/molecule/default/converge.yml b/roles/copy_container/molecule/default/converge.yml index a80508c0bd..8eb79c9b11 100644 --- a/roles/copy_container/molecule/default/converge.yml +++ b/roles/copy_container/molecule/default/converge.yml @@ -52,7 +52,7 @@ --config /tmp/copy-quay-config.yaml --release antelopecentos9 copy args: - chdir: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/roles/copy_container/files/copy-quay" + chdir: "{{ cifmw_project_dir_absolute }}/roles/copy_container/files/copy-quay" - name: Curl local registry ansible.builtin.uri: diff --git a/roles/dlrn_promote/molecule/check_criteria/converge.yml b/roles/dlrn_promote/molecule/check_criteria/converge.yml index f4e22db285..7abf8c98aa 100644 --- a/roles/dlrn_promote/molecule/check_criteria/converge.yml +++ b/roles/dlrn_promote/molecule/check_criteria/converge.yml @@ -21,7 +21,7 @@ - periodic-podified-edpm-baremetal-antelope-ocp-crc - periodic-podified-edpm-deployment-antelope-ocp-crc-1cs9 - periodic-data-plane-adoption-github-rdo-centos-9-crc-single-node-antelope - cifmw_dlrn_promote_criteria_file: '~/src/github.com/openstack-k8s-operators/ci-framework/roles/dlrn_promote/files/centos9_antelope.yaml' + cifmw_dlrn_promote_criteria_file: '{{ cifmw_project_dir_absolute }}/roles/dlrn_promote/files/centos9_antelope.yaml' cifmw_dlrn_promote_promotion_target: current-podified tasks: - name: Check check_promotion_criteria playbook diff --git a/roles/reproducer/molecule/crc_layout/converge.yml b/roles/reproducer/molecule/crc_layout/converge.yml index ab6727d7db..bc0f23858d 100644 --- a/roles/reproducer/molecule/crc_layout/converge.yml +++ b/roles/reproducer/molecule/crc_layout/converge.yml @@ -37,7 +37,7 @@ mtu: 1500 cifmw_use_libvirt: true cifmw_reproducer_repositories: - - src: "{{ lookup('env', 'HOME') }}/src/github.com/openstack-k8s-operators/ci-framework" + - src: "{{ cifmw_project_dir_absolute }}" dest: "/home/zuul/src/github.com/openstack-k8s-operators/" cifmw_libvirt_manager_configuration: vms: diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index a8995ae1f5..d1d1404108 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -374,7 +374,7 @@ - name: Check if local common-requirements.txt exists delegate_to: localhost ansible.builtin.stat: - path: "{{ cifmw_reproducer_src_dir }}/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + path: "{{ cifmw_project_dir_absolute }}/common-requirements.txt" register: _local_common_requirements_check run_once: true ignore_errors: true @@ -387,7 +387,7 @@ requirements: "{{ have_local | ternary(local, remote) }}" vars: have_local: "{{ _local_common_requirements_check.stat is defined and _local_common_requirements_check.stat.exists }}" - local: "{{ cifmw_reproducer_src_dir }}/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + local: "{{ cifmw_project_dir_absolute }}/common-requirements.txt" remote: https://raw.githubusercontent.com/openstack-k8s-operators/ci-framework/main/common-requirements.txt - name: Inject most of the cifmw_ parameters passed to the reproducer run diff --git a/roles/reproducer/templates/play.yml.j2 b/roles/reproducer/templates/play.yml.j2 index 2863bc7d94..226b863dc6 100644 --- a/roles/reproducer/templates/play.yml.j2 +++ b/roles/reproducer/templates/play.yml.j2 @@ -22,7 +22,7 @@ when: - not _venv.stat.exists vars: - src_dir: "{{ zuul_vars.zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}" + src_dir: "{{ cifmw_project_dir_absolute }}" community.general.make: {%- raw %} chdir: "{{ ansible_user_dir }}/{{ src_dir }}" diff --git a/roles/reproducer/vars/main.yml b/roles/reproducer/vars/main.yml index b55fe1d469..e8a967bb76 100644 --- a/roles/reproducer/vars/main.yml +++ b/roles/reproducer/vars/main.yml @@ -2,7 +2,7 @@ # Default repositories we always want to have cifmw_reproducer_default_repositories: - src: "https://github.com/openstack-k8s-operators/ci-framework" - dest: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework" + dest: "{{ cifmw_project_dir_absolute }}" - src: "https://github.com/openstack-k8s-operators/install_yamls" dest: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" - src: "https://github.com/openstack-k8s-operators/architecture" From 83e9f0d2e4c4080ae9a46befd65616346febcb9a Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Fri, 17 Oct 2025 11:23:06 +0530 Subject: [PATCH 432/480] Create separate molecule config file for edpm-ansible New changes in the existing molecule config file is not compatible with edpm-ansible repo, and would require a lot of changes in there repo. It is better to create a separate config file for their molecule job with the older config. Signed-off-by: Amartya Sinha --- .config/molecule/config_edpm_ansible.yml | 47 ++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 .config/molecule/config_edpm_ansible.yml diff --git a/.config/molecule/config_edpm_ansible.yml b/.config/molecule/config_edpm_ansible.yml new file mode 100644 index 0000000000..c931941ba4 --- /dev/null +++ b/.config/molecule/config_edpm_ansible.yml @@ -0,0 +1,47 @@ +--- +driver: + name: delegated + options: + managed: false + ansible_connection_options: + ansible_connection: local + +log: true + +platforms: + - name: instance + environment: &env + http_proxy: "{{ lookup('env', 'http_proxy') }}" + https_proxy: "{{ lookup('env', 'https_proxy') }}" + +provisioner: + name: ansible + # Expose configuration to all jobs by default + # Useful when an fix requires to provide some + # CIFMW parameter to many roles, such as a broken + # CentOS image. + # inventory: + # group_vars: + # all: + # cifmw_discover_latest_image_qcow_prefix: "CentOS-Stream-GenericCloud-9-20240506" + + config_options: + defaults: + fact_caching: jsonfile + fact_caching_connection: /tmp/molecule/facts + remote_tmp: /tmp + log: true + env: + ANSIBLE_STDOUT_CALLBACK: yaml + ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH}:${HOME}/zuul-jobs/roles:${HOME}/ci-framework-data/artifacts/roles:${HOME}/src/github.com/openstack-k8s-operators/ci-framework/roles" + ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}:${HOME}/.ansible/plugins/modules:${HOME}/src/github.com/openstack-k8s-operators/ci-framework/plugins/modules" + ANSIBLE_ACTION_PLUGINS: "${ANSIBLE_ACTION_PLUGINS:-/usr/share/ansible/plugins/action}:${HOME}/.ansible/plugins/action:${HOME}/src/github.com/openstack-k8s-operators/ci-framework/plugins/action" + +scenario: + test_sequence: + - prepare + - converge + - cleanup + +verifier: + name: ansible From ea3287ce0727bee2f2c72d7f2a9d4b83e08b7e91 Mon Sep 17 00:00:00 2001 From: Roberto Alfieri Date: Wed, 8 Oct 2025 20:11:52 +0200 Subject: [PATCH 433/480] fix: improve Ceph container image patching for multiple keys MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactor the Ceph container image patching task in containers-prepare-parameters.yaml to use a loop with specific regular expressions for each relevant key. This approach ensures that all necessary Ceph-related image and tag fields—including ceph_grafana_image, ceph_image, ceph_tag, and others—are correctly updated for Ceph 7 (Reef) deployment. The previous implementation used a single, overly broad regexp that did not cover all required keys or value formats. Signed-off-by: Roberto Alfieri --- .../tasks/prepare_undercloud.yml | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml index 14cf0b4b08..522fb5ed0a 100644 --- a/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml +++ b/roles/adoption_osp_deploy/tasks/prepare_undercloud.yml @@ -80,13 +80,23 @@ # Adoption requires Ceph 7 (Reef) as a requirement. Instead of performing a Ceph # upgrade from 6 (the default) to 7, let's try to deploy 7 in greenfield - - name: Patch containers-prepare-parameters Ceph container images + - name: Patch ceph-related container image tags and names in containers-prepare-parameters.yaml ansible.builtin.lineinfile: path: "{{ ansible_user_dir }}/containers-prepare-parameters.yaml" - regexp: '^(\s.*)+: rhceph-6-rhel9' - line: '\1: rhceph-7-rhel9' - backup: true + regexp: "{{ img_details.regexp }}" + line: "{{ img_details.line }}" backrefs: true + backup: true + loop: + - { regexp: '^(\s*ceph_alertmanager_tag:\s*)v4\.\d+', line: '\1v4.15' } + - { regexp: '^(\s*ceph_grafana_image:\s*).+', line: '\1grafana-rhel9' } + - { regexp: '^(\s*ceph_node_exporter_tag:\s*)v4\.\d+', line: '\1v4.15' } + - { regexp: '^(\s*ceph_prometheus_tag:\s*)v4\.\d+', line: '\1v4.15' } + - { regexp: '^(\s*ceph_tag:\s)latest', line: '\1 7' } + - { regexp: '^(\s*ceph_image:\s*)rhceph-6-rhel9', line: '\1rhceph-7-rhel9' } + loop_control: + label: "{{ img_details.regexp }}" + loop_var: img_details - name: Ensure os-net-config folder exists become: true From be5cdb0b1ec553c1da0340b2ba5f6e2555e95f84 Mon Sep 17 00:00:00 2001 From: Brian Date: Mon, 13 Oct 2025 13:46:02 +0100 Subject: [PATCH 434/480] OSPRH-19986: Replace hardcoded /home/zuul in: bgp scenarios replaced /home/zuul with {{ ansible_user_dir }} in three locations The goal of this change is to improve flexibility and consistency by replacing hardcoded home paths with variables. --- scenarios/reproducers/bgp-l3-xl.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scenarios/reproducers/bgp-l3-xl.yml b/scenarios/reproducers/bgp-l3-xl.yml index 16c108dd5d..d831d42029 100644 --- a/scenarios/reproducers/bgp-l3-xl.yml +++ b/scenarios/reproducers/bgp-l3-xl.yml @@ -754,7 +754,7 @@ cifmw_libvirt_manager_configuration: cifmw_devscripts_config_overrides: fips_mode: "{{ cifmw_fips_enabled | default(false) | bool }}" cluster_subnet_v4: "192.172.0.0/16" - network_config_folder: "/home/zuul/netconf" + network_config_folder: "{{ ansible_user_dir }}/netconf" # Required for egress traffic from pods to the osp_trunk network cifmw_devscripts_enable_ocp_nodes_host_routing: true @@ -763,7 +763,7 @@ cifmw_devscripts_enable_ocp_nodes_host_routing: true # controller-0 as-is and be consumed by the `deploy-va.sh` script. # Please note, all paths are on the controller-0, meaning managed by the # Framework. Please do not edit them! -_arch_repo: "/home/zuul/src/github.com/openstack-k8s-operators/architecture" +_arch_repo: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/architecture" cifmw_architecture_scenario: bgp-l3-xl cifmw_kustomize_deploy_architecture_examples_path: "examples/dt/" cifmw_arch_automation_file: "bgp-l3-xl.yaml" @@ -791,7 +791,7 @@ cifmw_kustomize_deploy_metallb_source_files: >- pre_deploy: - name: BGP spines and leaves configuration type: playbook - source: "/home/zuul/src/github.com/openstack-k8s-operators/ci-framework/playbooks/bgp/prepare-bgp-spines-leaves.yaml" + source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/playbooks/bgp/prepare-bgp-spines-leaves.yaml" extra_vars: num_racks: "{{ num_racks }}" router_bool: true From 39fba35fc042bf533298289bae6cbd1516d2e794 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 17 Oct 2025 12:05:22 +0200 Subject: [PATCH 435/480] Make mol_config_dir optional; update molecule development doc The molecule config parameter should be optional, just to give possibility to create molecule config file that contains all required parameters. Also update molecule development document, that would explain in better way how to start molecule job manually. Signed-off-by: Daniel Pawlik --- ci/playbooks/molecule-test.yml | 4 +++- docs/source/development/02_molecule.md | 30 +++++++++++++++++++------- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/ci/playbooks/molecule-test.yml b/ci/playbooks/molecule-test.yml index f7be41302a..08a70f1e6e 100644 --- a/ci/playbooks/molecule-test.yml +++ b/ci/playbooks/molecule-test.yml @@ -55,5 +55,7 @@ chdir: "{{ roles_dir }}" cmd: >- set -o pipefail; - molecule -c {{ mol_config_dir }} test --all | + molecule + {% if mol_config_dir is defined and mol_config_dir %} -c {{ mol_config_dir }} {% endif %} + test --all | tee {{ ansible_user_dir }}/ci-framework-data/logs/molecule-execution.log diff --git a/docs/source/development/02_molecule.md b/docs/source/development/02_molecule.md index 638cdecfd4..e5efa2809d 100644 --- a/docs/source/development/02_molecule.md +++ b/docs/source/development/02_molecule.md @@ -77,7 +77,6 @@ cifmw_artifacts_basedir: "{{ ansible_user_dir }}/ci-framework-data/artifacts " cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" nodepool: cloud: "" -roles_dir: /home/$(whoami)/src/github.com/openstack-k8s-operators/ci-framework/roles mol_config_dir: /home/$(whoami)/src/github.com/openstack-k8s-operators/ci-framework/.config/molecule/config_local.yml cifmw_zuul_target_host: localhost EOF @@ -87,6 +86,7 @@ ansible-galaxy install -r requirements.yml # Mock some roles, that are needed for Zuul CI, but not for local deployment mkdir -p roles/mirror-info-fork/tasks mkdir -p roles/prepare-workspace/tasks +mkdir -p group_vars # Execute Ansible to prepare molecule environment ansible-playbook -i inventory.yml \ @@ -97,21 +97,35 @@ ansible-playbook -i inventory.yml \ ### START MOLECULE JOB ### ########################## -# Copy molecule job - example: crc_layout -mkdir -p roles/molecule/default/ -cp -a ./roles/reproducer/molecule/crc_layout/* roles/molecule/default/ +# Execute molecule job +## Example +## role: reproducer, scenario: crc_layout # It can be done using: -# - Ansible + +### - Ansible - recommended ### ansible-playbook -i inventory.yml \ + -e roles_dir="$(pwd)/roles/reproducer" \ -e@custom-vars.yaml \ ci/playbooks/molecule-test.yml -# - shell steps -ln -s roles/molecule . +#### - shell steps #### pip3 install -r test-requirements.txt -molecule -c .config/molecule/config_local.yml test --all +cd roles/reproducer + +# NOTE: Usually it is: config_local.yml. There is also config_podman.yml scenario +# NOTE: In some cases, when molecule provides all parameters, +# do not include config file (skip adding '-c' parameter) +molecule -c ../../.config/molecule/config_local.yml test --all + +# or just one scenario +molecule -c ../../.config/molecule/config_local.yml test -s crc_layout + +# Sometimes it is required to force recreate preparation. +# For example for crc_layout scenario +cd roles/reproducer +molecule prepare --force -s crc_layout ``` ### SSH to controller-0 - molecule VM From 722411e5e84134221516c666e2d6ea6629ee19b5 Mon Sep 17 00:00:00 2001 From: Jiri Macku Date: Fri, 17 Oct 2025 14:45:23 +0200 Subject: [PATCH 436/480] Network template for uni02beta-adoption scenario It is not possible to use the common template any moreIt is not possible to use the common template any more. --- .../templates/uni02beta-adoption/network-values/values.yaml.j2 | 1 + 1 file changed, 1 insertion(+) create mode 120000 roles/ci_gen_kustomize_values/templates/uni02beta-adoption/network-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/uni02beta-adoption/network-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/uni02beta-adoption/network-values/values.yaml.j2 new file mode 120000 index 0000000000..48e13bd3a8 --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/uni02beta-adoption/network-values/values.yaml.j2 @@ -0,0 +1 @@ +../../uni02beta/network-values/values.yaml.j2 \ No newline at end of file From 446f0526c3352ce5d9301c45e7e63bd3dd98978d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Ciecierski?= Date: Wed, 15 Oct 2025 16:13:44 +0200 Subject: [PATCH 437/480] Add extra condition for starting update_containers role Ensure update_containers role only runs for cifmw_update_containers_edpm_image_url varaible for osContainerImage when cifmw_update_containers_openstack is also true. --- roles/edpm_prepare/tasks/kustomize_and_deploy.yml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/roles/edpm_prepare/tasks/kustomize_and_deploy.yml b/roles/edpm_prepare/tasks/kustomize_and_deploy.yml index 37f238c10f..0a65a20397 100644 --- a/roles/edpm_prepare/tasks/kustomize_and_deploy.yml +++ b/roles/edpm_prepare/tasks/kustomize_and_deploy.yml @@ -30,12 +30,14 @@ - name: Prepare OpenStackVersion CR when: >- - (cifmw_update_containers_edpm_image_url is defined) or + (cifmw_update_containers_edpm_image_url is defined and + cifmw_update_containers_openstack is defined and + cifmw_update_containers_openstack | bool) or (cifmw_update_containers_ansibleee_image_url is defined) or - ((cifmw_update_containers_openstack is defined and - cifmw_update_containers_openstack | bool)) or - ((cifmw_update_containers_watcher is defined and - cifmw_update_containers_watcher | bool)) + (cifmw_update_containers_openstack is defined and + cifmw_update_containers_openstack | bool) or + (cifmw_update_containers_watcher is defined and + cifmw_update_containers_watcher | bool) vars: cifmw_update_containers_metadata: "{{ _ctlplane_name }}" ansible.builtin.include_role: From e81274d7d1e8ae82a01264218a4edcddb58314a9 Mon Sep 17 00:00:00 2001 From: Miguel Angel Nieto Jimenez Date: Thu, 16 Oct 2025 17:56:02 +0200 Subject: [PATCH 438/480] [NFV] Configure provisioning operator to watch all namespaces Set provisioning.watch_all_namespaces to true in the operator's configuration during deployment. Our CI was failing because the current process of patching the provisioning configuration live was causing race conditions or instability. Setting watch_all_namespaces to true from the start allows the provisioning operator to correctly find and manage provisioning CRs across all namespaces, which is required by NFV. This avoids the need for the unstable runtime patch. Jira: OSPRH-20407 --- roles/openshift_setup/README.md | 1 - roles/openshift_setup/defaults/main.yml | 1 - roles/openshift_setup/tasks/main.yml | 4 ---- roles/openshift_setup/tasks/metal3_config.yml | 21 ------------------- .../dt-nfv-ovs-dpdk-sriov-2nodesets.yml | 3 ++- .../reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml | 3 ++- .../dt-nfv-ovs-dpdk-sriov-networker.yml | 3 ++- .../reproducers/va-nfv-ovs-dpdk-sriov.yml | 3 ++- scenarios/reproducers/va-nfv-ovs-dpdk.yml | 3 ++- scenarios/reproducers/va-nfv-ovs-sriov.yml | 3 ++- 10 files changed, 12 insertions(+), 33 deletions(-) delete mode 100644 roles/openshift_setup/tasks/metal3_config.yml diff --git a/roles/openshift_setup/README.md b/roles/openshift_setup/README.md index c8b0b4c3ef..e748274afb 100644 --- a/roles/openshift_setup/README.md +++ b/roles/openshift_setup/README.md @@ -26,5 +26,4 @@ effect if `cifmw_openshift_setup_ca_registry_to_add` is set. mirrors: - mirror.quay.rdoproject.org ``` -* `cifmw_openshift_setup_metal3_watch_all_ns`: (Boolean) Tells Metal3 BMO to watch resources out of its namespace. Defaults to `false`. * `cifmw_openshift_setup_apply_marketplace_fix`: (Boolean) Apply openshift-marketplace workaround which is recreating all pods in the namespace. NOTE: same step is done in `base` job. diff --git a/roles/openshift_setup/defaults/main.yml b/roles/openshift_setup/defaults/main.yml index 3f514ae50b..2133089aeb 100644 --- a/roles/openshift_setup/defaults/main.yml +++ b/roles/openshift_setup/defaults/main.yml @@ -24,7 +24,6 @@ cifmw_openshift_setup_skip_internal_registry: false cifmw_openshift_setup_skip_internal_registry_tls_verify: false cifmw_openshift_setup_ca_bundle_path: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" cifmw_openshift_setup_digest_mirrors: [] -cifmw_openshift_setup_metal3_watch_all_ns: false cifmw_openshift_setup_operator_override_catalog_name: "redhat-operators-4.17" cifmw_openshift_setup_operator_override_catalog_namespace: "openshift-marketplace" cifmw_openshift_setup_operator_override_catalog_image: "registry.redhat.io/redhat/redhat-operator-index:v4.17" diff --git a/roles/openshift_setup/tasks/main.yml b/roles/openshift_setup/tasks/main.yml index 8b19bb697d..3e48b3e70f 100644 --- a/roles/openshift_setup/tasks/main.yml +++ b/roles/openshift_setup/tasks/main.yml @@ -213,10 +213,6 @@ spec: repositoryDigestMirrors: "{{ cifmw_openshift_setup_digest_mirrors }}" -- name: Metal3 tweaks - when: not cifmw_openshift_setup_dry_run - ansible.builtin.include_tasks: metal3_config.yml - - name: Patch network operator when using OVNKubernetes backend ansible.builtin.import_tasks: patch_network_operator.yml diff --git a/roles/openshift_setup/tasks/metal3_config.yml b/roles/openshift_setup/tasks/metal3_config.yml deleted file mode 100644 index 0653914538..0000000000 --- a/roles/openshift_setup/tasks/metal3_config.yml +++ /dev/null @@ -1,21 +0,0 @@ -- name: Make Metal3 watch all namespaces - when: - - cifmw_openshift_setup_metal3_watch_all_ns | bool - environment: - KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" - PATH: "{{ cifmw_path }}" - block: - - name: Fetch Metal3 configuration name - ansible.builtin.command: - cmd: "oc get Provisioning -o name" - register: _cifmw_openshift_setup_provisioning_name - changed_when: false - - - name: Apply the patch to Metal3 Provisioning - ansible.builtin.command: - cmd: >- - oc patch {{ _cifmw_openshift_setup_provisioning_name.stdout }} - --type='json' - -p='[{"op": "replace", "path": "/spec/watchAllNamespaces", "value": true}]' - register: _cifmw_openshift_setup_provisioning_ns_patch_out - changed_when: "'no change' not in _cifmw_openshift_setup_provisioning_ns_patch_out.stdout" diff --git a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets.yml b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets.yml index f491145b4c..f83e476f68 100644 --- a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets.yml +++ b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-2nodesets.yml @@ -104,7 +104,8 @@ cifmw_config_bmh: true # BMH are deployed in a differnt NS than the secret OSP BMO # references in each BMH. Metal3 requires the referenced # secrets to be in the same NS or be allowed to access them -cifmw_openshift_setup_metal3_watch_all_ns: true +cifmw_devscripts_config_overrides_patch_bmo_watch_all_namespaces: + bmo_watch_all_namespaces: true # Use EDPM image for computes cifmw_update_containers_edpm_image_url: "{{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/edpm-hardened-uefi:{{ cifmw_update_containers_tag }}" diff --git a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml index 4aa27ae06d..c20e433037 100644 --- a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml +++ b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-hci.yml @@ -104,7 +104,8 @@ cifmw_config_bmh: true # BMH are deployed in a differnt NS than the secret OSP BMO # references in each BMH. Metal3 requires the referenced # secrets to be in the same NS or be allowed to access them -cifmw_openshift_setup_metal3_watch_all_ns: true +cifmw_devscripts_config_overrides_patch_bmo_watch_all_namespaces: + bmo_watch_all_namespaces: true # Use EDPM image for computes cifmw_update_containers_edpm_image_url: "{{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/edpm-hardened-uefi:{{ cifmw_update_containers_tag }}" diff --git a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-networker.yml b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-networker.yml index 5c12324eb4..2158e72b6b 100644 --- a/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-networker.yml +++ b/scenarios/reproducers/dt-nfv-ovs-dpdk-sriov-networker.yml @@ -104,7 +104,8 @@ cifmw_config_bmh: true # BMH are deployed in a differnt NS than the secret OSP BMO # references in each BMH. Metal3 requires the referenced # secrets to be in the same NS or be allowed to access them -cifmw_openshift_setup_metal3_watch_all_ns: true +cifmw_devscripts_config_overrides_patch_bmo_watch_all_namespaces: + bmo_watch_all_namespaces: true # Use EDPM image for computes cifmw_update_containers_edpm_image_url: "{{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/edpm-hardened-uefi:{{ cifmw_update_containers_tag }}" diff --git a/scenarios/reproducers/va-nfv-ovs-dpdk-sriov.yml b/scenarios/reproducers/va-nfv-ovs-dpdk-sriov.yml index 965edd9310..11d50ee59c 100644 --- a/scenarios/reproducers/va-nfv-ovs-dpdk-sriov.yml +++ b/scenarios/reproducers/va-nfv-ovs-dpdk-sriov.yml @@ -104,7 +104,8 @@ cifmw_config_bmh: true # BMH are deployed in a differnt NS than the secret OSP BMO # references in each BMH. Metal3 requires the referenced # secrets to be in the same NS or be allowed to access them -cifmw_openshift_setup_metal3_watch_all_ns: true +cifmw_devscripts_config_overrides_patch_bmo_watch_all_namespaces: + bmo_watch_all_namespaces: true # Use EDPM image for computes cifmw_update_containers_edpm_image_url: "{{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/edpm-hardened-uefi:{{ cifmw_update_containers_tag }}" diff --git a/scenarios/reproducers/va-nfv-ovs-dpdk.yml b/scenarios/reproducers/va-nfv-ovs-dpdk.yml index 8b895d0f26..7808626270 100644 --- a/scenarios/reproducers/va-nfv-ovs-dpdk.yml +++ b/scenarios/reproducers/va-nfv-ovs-dpdk.yml @@ -104,7 +104,8 @@ cifmw_config_bmh: true # BMH are deployed in a differnt NS than the secret OSP BMO # references in each BMH. Metal3 requires the referenced # secrets to be in the same NS or be allowed to access them -cifmw_openshift_setup_metal3_watch_all_ns: true +cifmw_devscripts_config_overrides_patch_bmo_watch_all_namespaces: + bmo_watch_all_namespaces: true # Use EDPM image for computes cifmw_update_containers_edpm_image_url: "{{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/edpm-hardened-uefi:{{ cifmw_update_containers_tag }}" diff --git a/scenarios/reproducers/va-nfv-ovs-sriov.yml b/scenarios/reproducers/va-nfv-ovs-sriov.yml index 73ab35f5cb..7e4d2d2f20 100644 --- a/scenarios/reproducers/va-nfv-ovs-sriov.yml +++ b/scenarios/reproducers/va-nfv-ovs-sriov.yml @@ -102,7 +102,8 @@ cifmw_config_bmh: true # BMH are deployed in a differnt NS than the secret OSP BMO # references in each BMH. Metal3 requires the referenced # secrets to be in the same NS or be allowed to access them -cifmw_openshift_setup_metal3_watch_all_ns: true +cifmw_devscripts_config_overrides_patch_bmo_watch_all_namespaces: + bmo_watch_all_namespaces: true # Use EDPM image for computes cifmw_update_containers_edpm_image_url: "{{ cifmw_update_containers_registry }}/{{ cifmw_update_containers_org }}/edpm-hardened-uefi:{{ cifmw_update_containers_tag }}" From 266ec4ce56c1c0c23f0577f53d01d3bb5bef5b2e Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Fri, 17 Oct 2025 16:44:21 +0200 Subject: [PATCH 439/480] adoption, container login: consider also the variables already used There are already some variables used for to specify the container registry credentials - just use them. As fallback for now, they may become the only variables considered. (Those variables should become a list of dictionaries, not just direct values, but that's a different story.) --- .../tasks/login_registries.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/roles/adoption_osp_deploy/tasks/login_registries.yml b/roles/adoption_osp_deploy/tasks/login_registries.yml index 2eba3d5496..6eae8824b8 100644 --- a/roles/adoption_osp_deploy/tasks/login_registries.yml +++ b/roles/adoption_osp_deploy/tasks/login_registries.yml @@ -41,10 +41,14 @@ msg: "{{ _current_rh_release.stdout }}" - name: Login in container registry + vars: + _container_user: "{{ cifmw_adoption_osp_deploy_container_user|default(cifmw_registry_token.credentials.username, True) }}" + _container_password: "{{ cifmw_adoption_osp_deploy_container_password|default(cifmw_registry_token.credentials.password, True) }}" + _container_registry: "{{ cifmw_adoption_osp_deploy_container_registry|default(cifmw_registry_token_registry_url, True) }}" when: - - cifmw_adoption_osp_deploy_container_user is defined - - cifmw_adoption_osp_deploy_container_password is defined - - cifmw_adoption_osp_deploy_container_registry is defined + - _container_user is defined + - _container_password is defined + - _container_registry is defined block: - name: Install podman for container registry login become: true @@ -57,9 +61,9 @@ no_log: true ansible.builtin.command: > podman login - --username "{{ cifmw_adoption_osp_deploy_container_user }}" - --password "{{ cifmw_adoption_osp_deploy_container_password }}" - {{ cifmw_adoption_osp_deploy_container_registry }} + --username "{{ _container_user }}" + --password "{{ _container_password }}" + {{ _container_registry }} loop: - zuul - root From 221015dc1adb3fa76abf04264d187b677eae3402 Mon Sep 17 00:00:00 2001 From: Vito Castellano Date: Fri, 10 Oct 2025 00:07:06 +0200 Subject: [PATCH 440/480] refactor(ci_dcn_site): replace hardcoded /home/zuul paths Replace hardcoded '/home/zuul' paths with ansible_user_dir variable to support non-zuul users in ci_cdn_role # Conflicts: # roles/ci_dcn_site/defaults/main.yml --- roles/ci_dcn_site/tasks/ceph.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ci_dcn_site/tasks/ceph.yml b/roles/ci_dcn_site/tasks/ceph.yml index e59bb66c22..4478ca8692 100644 --- a/roles/ci_dcn_site/tasks/ceph.yml +++ b/roles/ci_dcn_site/tasks/ceph.yml @@ -88,7 +88,7 @@ - name: Deploy Ceph cifmw.general.ci_script: - output_dir: "/home/zuul/ci-framework-data/artifacts" + output_dir: "{{ ansible_user_dir }}/ci-framework-data/artifacts" chdir: "{{ ci_dcn_site_cifmw_repo_path }}" script: >- ansible-playbook From e97a3526d93c9f64790e3600a0520433c71768bb Mon Sep 17 00:00:00 2001 From: Brian Date: Fri, 17 Oct 2025 13:16:42 +0100 Subject: [PATCH 441/480] feat(OSPRH-19974) cleanup: Replace hardcoded /home/zuul in cleanup_openstack role Improve flexibility and consistency by replacing a hardcoded home path with variables. --- roles/cleanup_openstack/tasks/main.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/cleanup_openstack/tasks/main.yaml b/roles/cleanup_openstack/tasks/main.yaml index 7a1241629a..b8e194df94 100644 --- a/roles/cleanup_openstack/tasks/main.yaml +++ b/roles/cleanup_openstack/tasks/main.yaml @@ -70,8 +70,8 @@ | unique }} _external_dns_crs: - - /home/zuul/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-dns.yml - - /home/zuul/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-cert.yml + - "{{ cifmw_basedir }}/artifacts/manifests/cifmw_external_dns/ceph-local-dns.yml" + - "{{ cifmw_basedir }}/artifacts/manifests/cifmw_external_dns/ceph-local-cert.yml" _operators_crs: - "{{ cifmw_kustomize_deploy_nmstate_dest_file }}" - "{{ cifmw_kustomize_deploy_metallb_dest_file }}" @@ -117,6 +117,6 @@ path: "{{ item }}" state: absent loop: - - "/home/zuul/ci-framework-data/logs" - - "/home/zuul/ci-framework-data/tests" + - "{{ cifmw_basedir }}/logs" + - "{{ cifmw_basedir }}/tests" become: true From 662c0caf5955c2a14ad51e384f88b2f6548bd973 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Tue, 14 Oct 2025 23:18:22 +0200 Subject: [PATCH 442/480] libvirt_manager: Add boot_order configuration support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for configuring VM boot device priority via boot_order parameter. Accepts list of 'hd'/'disk' and 'network' in desired boot order. Boot order is applied after device attachment. Example: boot_order: ['hd', 'network'] # Try disk first, then PXE Includes molecule test validating domain XML boot order attributes. Assisted-By: Claude Code/claude-sonnet-4 Signed-off-by: Harald Jensås --- roles/libvirt_manager/README.md | 4 + .../molecule/boot_order/cleanup.yml | 20 ++ .../molecule/boot_order/converge.yml | 238 ++++++++++++++++++ .../molecule/boot_order/molecule.yml | 6 + .../molecule/boot_order/prepare.yml | 19 ++ .../molecule/boot_order/vars/input.yml | 50 ++++ .../molecule/boot_order/vars/net-def.yml | 23 ++ .../tasks/configure_boot_order.yml | 112 +++++++++ roles/libvirt_manager/tasks/create_vms.yml | 8 + 9 files changed, 480 insertions(+) create mode 100644 roles/libvirt_manager/molecule/boot_order/cleanup.yml create mode 100644 roles/libvirt_manager/molecule/boot_order/converge.yml create mode 100644 roles/libvirt_manager/molecule/boot_order/molecule.yml create mode 100644 roles/libvirt_manager/molecule/boot_order/prepare.yml create mode 100644 roles/libvirt_manager/molecule/boot_order/vars/input.yml create mode 100644 roles/libvirt_manager/molecule/boot_order/vars/net-def.yml create mode 100644 roles/libvirt_manager/tasks/configure_boot_order.yml diff --git a/roles/libvirt_manager/README.md b/roles/libvirt_manager/README.md index 6e6e86a7fe..e5aa0e5308 100644 --- a/roles/libvirt_manager/README.md +++ b/roles/libvirt_manager/README.md @@ -95,6 +95,7 @@ cifmw_libvirt_manager_configuration: target: (Hypervisor hostname you want to deploy the family on. Optional) uefi: (boolean, toggle UEFI boot. Optional, defaults to false) bootmenu_enable: (string, toggle bootmenu. Optional, defaults to "no") + boot_order: (list, optional. Ordered list of boot devices. Valid values are 'hd' or 'disk' for disk boot, and 'network' for network boot. Example: ['hd', 'network'] will attempt disk boot first, then network boot. The boot order is applied after all devices are attached to the VM.) networkconfig: (dict or list[dict], [network-config](https://cloudinit.readthedocs.io/en/latest/reference/network-config-format-v2.html#network-config-v2) v2 config, needed if a static ip address should be defined at boot time in absence of a dhcp server in special scenarios. Optional) devices: (dict, optional, defaults to {}. The keys are the VMs of that type that needs devices to be attached, and the values are lists of strings, where each string must contain a valid libvirt XML element that will be passed to virsh attach-device) dhcp_options: (list, optional, defaults to []. List of DHCP options to apply to all VMs of this type. Format: ["option_number,value", ...]) @@ -167,6 +168,9 @@ cifmw_libvirt_manager_configuration: memory: 8 cpus: 4 bootmenu_enable: "yes" + boot_order: + - hd + - network nets: - public networks: diff --git a/roles/libvirt_manager/molecule/boot_order/cleanup.yml b/roles/libvirt_manager/molecule/boot_order/cleanup.yml new file mode 100644 index 0000000000..84af64c85f --- /dev/null +++ b/roles/libvirt_manager/molecule/boot_order/cleanup.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2025 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Cleanup + vars: + molecule_scenario: boot_order + ansible.builtin.import_playbook: ../deploy_layout/cleanup.yml diff --git a/roles/libvirt_manager/molecule/boot_order/converge.yml b/roles/libvirt_manager/molecule/boot_order/converge.yml new file mode 100644 index 0000000000..4b3d7b3aab --- /dev/null +++ b/roles/libvirt_manager/molecule/boot_order/converge.yml @@ -0,0 +1,238 @@ +--- +# Copyright 2025 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Test boot_order configuration + hosts: instance + gather_facts: true + vars_files: + - vars/net-def.yml + vars: + ansible_user_dir: "{{ lookup('env', 'HOME') }}" + cifmw_basedir: "/opt/basedir" + cifmw_libvirt_manager_configuration: + vms: + # Test VM with disk first, then network boot + disk_first: + amount: 1 + disksize: 10 + memory: 1 + cpus: 1 + disk_file_name: 'blank' + boot_order: + - hd + - network + nets: + - public + - osp_trunk + # Test VM with network first, then disk boot + net_first: + amount: 1 + disksize: 10 + memory: 1 + cpus: 1 + disk_file_name: 'blank' + boot_order: + - network + - disk + nets: + - public + - osp_trunk + # Test VM with only network boot + net_only: + amount: 1 + disksize: 10 + memory: 1 + cpus: 1 + disk_file_name: 'blank' + boot_order: + - network + nets: + - public + # Test VM without boot_order (should not have boot order attributes) + no_boot_order: + amount: 1 + disksize: 10 + memory: 1 + cpus: 1 + disk_file_name: 'blank' + nets: + - public + networks: + public: |- + + public + + + + + + + osp_trunk: |- + + osp_trunk + + + + + + + tasks: + - name: Load networking definition + ansible.builtin.include_vars: + file: input.yml + name: cifmw_networking_definition + + - name: Deploy layout with boot_order configurations + ansible.builtin.import_role: + name: libvirt_manager + tasks_from: deploy_layout + + - name: Verify boot_order configurations + block: + # Test 1: Verify disk-first VM has correct boot order + - name: Get disk_first VM XML + register: _disk_first_xml + community.libvirt.virt: + command: "get_xml" + name: "cifmw-disk-first-0" + uri: "qemu:///system" + + - name: Check disk boot order in disk-first VM + register: _disk_first_disk_boot + community.general.xml: + xmlstring: "{{ _disk_first_xml.get_xml }}" + xpath: "/domain/devices/disk[@device='disk']/boot" + content: "attribute" + + - name: Check interface boot order in disk-first VM + register: _disk_first_net_boot + community.general.xml: + xmlstring: "{{ _disk_first_xml.get_xml }}" + xpath: "/domain/devices/interface[1]/boot" + content: "attribute" + + - name: Assert disk-first VM has correct boot order + ansible.builtin.assert: + that: + - _disk_first_disk_boot.matches[0].boot.order == "1" + - _disk_first_net_boot.matches[0].boot.order == "2" + quiet: true + msg: >- + Expected disk boot order=1 and network boot order=2, + got disk={{ _disk_first_disk_boot.matches[0].boot.order }} + and network={{ _disk_first_net_boot.matches[0].boot.order }} + + # Test 2: Verify network-first VM has correct boot order + - name: Get net_first VM XML + register: _net_first_xml + community.libvirt.virt: + command: "get_xml" + name: "cifmw-net-first-0" + uri: "qemu:///system" + + - name: Check disk boot order in network-first VM + register: _net_first_disk_boot + community.general.xml: + xmlstring: "{{ _net_first_xml.get_xml }}" + xpath: "/domain/devices/disk[@device='disk']/boot" + content: "attribute" + + - name: Check interface boot order in network-first VM + register: _net_first_net_boot + community.general.xml: + xmlstring: "{{ _net_first_xml.get_xml }}" + xpath: "/domain/devices/interface[1]/boot" + content: "attribute" + + - name: Assert network-first VM has correct boot order + ansible.builtin.assert: + that: + - _net_first_net_boot.matches[0].boot.order == "1" + - _net_first_disk_boot.matches[0].boot.order == "2" + quiet: true + msg: >- + Expected network boot order=1 and disk boot order=2, + got network={{ _net_first_net_boot.matches[0].boot.order }} + and disk={{ _net_first_disk_boot.matches[0].boot.order }} + + # Test 3: Verify network-only VM has only network boot + - name: Get net_only VM XML + register: _net_only_xml + community.libvirt.virt: + command: "get_xml" + name: "cifmw-net-only-0" + uri: "qemu:///system" + + - name: Check interface boot order in network-only VM + register: _net_only_net_boot + community.general.xml: + xmlstring: "{{ _net_only_xml.get_xml }}" + xpath: "/domain/devices/interface[1]/boot" + content: "attribute" + + - name: Check disk boot order in network-only VM (should not exist) + register: _net_only_disk_boot + failed_when: false + community.general.xml: + xmlstring: "{{ _net_only_xml.get_xml }}" + xpath: "/domain/devices/disk[@device='disk']/boot" + content: "attribute" + + - name: Assert network-only VM has correct boot order + ansible.builtin.assert: + that: + - _net_only_net_boot.matches[0].boot.order == "1" + - _net_only_disk_boot.matches | default([]) | length == 0 + quiet: true + msg: >- + Expected only network boot with order=1, + got network={{ _net_only_net_boot.matches[0].boot.order }} + and disk boot count={{ _net_only_disk_boot.matches | default([]) | length }} + + # Test 4: Verify VM without boot_order has no boot order attributes + - name: Get no_boot_order VM XML + register: _no_boot_order_xml + community.libvirt.virt: + command: "get_xml" + name: "cifmw-no-boot-order-0" + uri: "qemu:///system" + + - name: Check for any boot order attributes in no-boot-order VM + register: _no_boot_order_check + failed_when: false + community.general.xml: + xmlstring: "{{ _no_boot_order_xml.get_xml }}" + xpath: "/domain/devices//boot" + content: "attribute" + + - name: Assert no-boot-order VM has no boot order attributes + ansible.builtin.assert: + that: + - _no_boot_order_check.matches | default([]) | length == 0 + quiet: true + msg: >- + Expected no boot order attributes, + but found {{ _no_boot_order_check.matches | default([]) | length }} boot elements + + - name: Output success message + ansible.builtin.debug: + msg: "All boot_order validations passed successfully!" diff --git a/roles/libvirt_manager/molecule/boot_order/molecule.yml b/roles/libvirt_manager/molecule/boot_order/molecule.yml new file mode 100644 index 0000000000..aeab077e2e --- /dev/null +++ b/roles/libvirt_manager/molecule/boot_order/molecule.yml @@ -0,0 +1,6 @@ +--- +log: true + +provisioner: + name: ansible + log: true diff --git a/roles/libvirt_manager/molecule/boot_order/prepare.yml b/roles/libvirt_manager/molecule/boot_order/prepare.yml new file mode 100644 index 0000000000..3ef9484b10 --- /dev/null +++ b/roles/libvirt_manager/molecule/boot_order/prepare.yml @@ -0,0 +1,19 @@ +--- +# Copyright 2025 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +- name: Prepare + ansible.builtin.import_playbook: ../deploy_layout/prepare.yml diff --git a/roles/libvirt_manager/molecule/boot_order/vars/input.yml b/roles/libvirt_manager/molecule/boot_order/vars/input.yml new file mode 100644 index 0000000000..3bd4e3275d --- /dev/null +++ b/roles/libvirt_manager/molecule/boot_order/vars/input.yml @@ -0,0 +1,50 @@ +--- +# Copyright 2025 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +networks: + ctlplane: + network: "192.168.140.0/24" + gateway: "192.168.140.1" + mtu: 1500 +group-templates: + disk_firsts: + network-template: + range: + start: 10 + length: 1 + networks: + ctlplane: {} + net_firsts: + network-template: + range: + start: 20 + length: 1 + networks: + ctlplane: {} + net_onlys: + network-template: + range: + start: 30 + length: 1 + networks: + ctlplane: {} + no_boot_orders: + network-template: + range: + start: 40 + length: 1 + networks: + ctlplane: {} diff --git a/roles/libvirt_manager/molecule/boot_order/vars/net-def.yml b/roles/libvirt_manager/molecule/boot_order/vars/net-def.yml new file mode 100644 index 0000000000..00cec3cf39 --- /dev/null +++ b/roles/libvirt_manager/molecule/boot_order/vars/net-def.yml @@ -0,0 +1,23 @@ +--- +# Copyright 2025 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +_networks: + osp_trunk: + default: true + range: "192.168.140.0/24" + mtu: 1500 + public: + range: "192.168.110.0/24" diff --git a/roles/libvirt_manager/tasks/configure_boot_order.yml b/roles/libvirt_manager/tasks/configure_boot_order.yml new file mode 100644 index 0000000000..385b5e4f4d --- /dev/null +++ b/roles/libvirt_manager/tasks/configure_boot_order.yml @@ -0,0 +1,112 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This task requires: +# _vm_name Domain name for which boot order needs to be configured +# vm_data VM data containing boot_order configuration + +- name: "Get current domain XML for {{ _vm_name }}" + become: true + register: _domain_xml + community.libvirt.virt: + command: "get_xml" + name: "{{ _vm_name }}" + uri: "qemu:///system" + +- name: "Configure boot order for {{ _vm_name }}" + become: true + vars: + _workload: "{{ cifmw_libvirt_manager_basedir }}/workload" + block: + - name: Create temporary file for domain XML + ansible.builtin.tempfile: + state: file + suffix: _domain.xml + register: _temp_domain_file + + - name: Write current domain XML to temporary file + ansible.builtin.copy: + content: "{{ _domain_xml.get_xml }}" + dest: "{{ _temp_domain_file.path }}" + mode: '0600' + + - name: Remove existing boot elements from os section + community.general.xml: + path: "{{ _temp_domain_file.path }}" + xpath: "/domain/os/boot" + state: absent + + - name: Add boot order to disk device + when: "'hd' in vm_data.boot_order or 'disk' in vm_data.boot_order" + vars: + _boot_index: >- + {{ + vm_data.boot_order.index('hd') + 1 + if 'hd' in vm_data.boot_order + else vm_data.boot_order.index('disk') + 1 + }} + community.general.xml: + path: "{{ _temp_domain_file.path }}" + xpath: "/domain/devices/disk[@device='disk']" + add_children: + - boot: + order: "{{ _boot_index }}" + + - name: Get interface count for boot order + when: "'network' in vm_data.boot_order" + register: _iface_count + community.general.xml: + path: "{{ _temp_domain_file.path }}" + xpath: "/domain/devices/interface" + count: true + + - name: Add boot order to network interfaces + when: + - "'network' in vm_data.boot_order" + - _iface_count.count | default(0) | int > 0 + vars: + _boot_index: "{{ vm_data.boot_order.index('network') + 1 }}" + community.general.xml: + path: "{{ _temp_domain_file.path }}" + xpath: "/domain/devices/interface[1]" + add_children: + - boot: + order: "{{ _boot_index }}" + + - name: Read updated domain XML + ansible.builtin.slurp: + src: "{{ _temp_domain_file.path }}" + register: _updated_domain_xml + + - name: Redefine domain with updated boot order + vars: + _xml_content: "{{ _updated_domain_xml.content | b64decode }}" + # Remove XML declaration if present to avoid encoding issues + _clean_xml: >- + {{ + _xml_content | regex_replace('^<\?xml[^?]*\?>\s*', '') + }} + community.libvirt.virt: + command: define + xml: "{{ _clean_xml }}" + uri: "qemu:///system" + + always: + - name: Clean up temporary domain XML file + ansible.builtin.file: + path: "{{ _temp_domain_file.path }}" + state: absent + when: _temp_domain_file.path is defined diff --git a/roles/libvirt_manager/tasks/create_vms.yml b/roles/libvirt_manager/tasks/create_vms.yml index a05520e6b3..a9cca33190 100644 --- a/roles/libvirt_manager/tasks/create_vms.yml +++ b/roles/libvirt_manager/tasks/create_vms.yml @@ -202,3 +202,11 @@ loop: "{{ _vm_devices_content }}" loop_control: loop_var: _vm_device + +- name: "Configure boot order for {{ vm }}" + when: + - vm_data.boot_order is defined + - vm_data.boot_order | length > 0 + vars: + _vm_name: "cifmw-{{ vm }}" + ansible.builtin.include_tasks: configure_boot_order.yml From f66a47bac48183c38985146dc7a6ff69ac790cc2 Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Wed, 22 Oct 2025 15:25:59 +0530 Subject: [PATCH 443/480] Move cifmw_installyamls_repos var to group_vars This commit is one of the steps of replacing common used vars with group_vars to improve overall maintenance of variables in ci-framework Signed-off-by: Amartya Sinha --- docs/source/development/02_molecule.md | 1 - group_vars/all.yml | 3 +++ plugins/modules/generate_make_tasks.py | 2 +- roles/edpm_deploy/molecule/default/prepare.yml | 1 - roles/edpm_deploy_baremetal/molecule/default/prepare.yml | 1 - roles/edpm_prepare/molecule/default/prepare.yml | 1 - roles/install_yamls/README.md | 6 +++--- roles/install_yamls/molecule/default/converge.yml | 2 +- roles/reproducer/molecule/crc_layout/converge.yml | 2 +- roles/reproducer/tasks/main.yml | 2 +- roles/reproducer/tasks/reuse_main.yaml | 2 +- roles/reproducer/vars/main.yml | 2 +- roles/tempest/molecule/default/prepare.yml | 1 - roles/tofu/molecule/default/prepare.yml | 1 - roles/update/molecule/default/prepare.yml | 1 - scenarios/centos-9/ci.yml | 1 - scenarios/centos-9/content_provider.yml | 1 - scenarios/centos-9/edpm_baremetal_deployment_ci.yml | 1 - scenarios/centos-9/edpm_ci.yml | 1 - scenarios/centos-9/install_yamls.yml | 1 - scenarios/centos-9/meta_content_provider.yml | 1 - scenarios/centos-9/podified_common.yml | 1 - scenarios/centos-9/tcib.yml | 1 - scenarios/reproducers/3-nodes.yml | 1 - 24 files changed, 12 insertions(+), 25 deletions(-) diff --git a/docs/source/development/02_molecule.md b/docs/source/development/02_molecule.md index e5efa2809d..7d94fa86dd 100644 --- a/docs/source/development/02_molecule.md +++ b/docs/source/development/02_molecule.md @@ -74,7 +74,6 @@ cifmw_internal_registry_login: false cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" cifmw_openshift_setup_skip_internal_registry: true cifmw_artifacts_basedir: "{{ ansible_user_dir }}/ci-framework-data/artifacts " -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" nodepool: cloud: "" mol_config_dir: /home/$(whoami)/src/github.com/openstack-k8s-operators/ci-framework/.config/molecule/config_local.yml diff --git a/group_vars/all.yml b/group_vars/all.yml index 981a942cce..3a84ffffbc 100644 --- a/group_vars/all.yml +++ b/group_vars/all.yml @@ -5,3 +5,6 @@ ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_project_dir: src/github.com/openstack-k8s-operators/ci-framework cifmw_project_dir_absolute: "{{ ansible_user_dir }}/{{ cifmw_project_dir }}" +cifmw_installyamls_repos_relative: src/github.com/openstack-k8s-operators/install_yamls +# since cifmw_installyamls_repos var already exists, let's use that and move all definition here in single place instead of creating another variable. +cifmw_installyamls_repos: "{{ ansible_user_dir }}/{{ cifmw_installyamls_repos_relative }}" diff --git a/plugins/modules/generate_make_tasks.py b/plugins/modules/generate_make_tasks.py index ec6d0da03d..ff72a9026d 100644 --- a/plugins/modules/generate_make_tasks.py +++ b/plugins/modules/generate_make_tasks.py @@ -42,7 +42,7 @@ - name: Generate make tasks generate_make_tasks: - install_yamls_path: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls/" + install_yamls_path: "{{ cifmw_installyamls_repos }}" output_directory: "{{ ansible_user_dir }}/make_installyamls/tasks" """ # noqa diff --git a/roles/edpm_deploy/molecule/default/prepare.yml b/roles/edpm_deploy/molecule/default/prepare.yml index 9360e433f5..2a57c338aa 100644 --- a/roles/edpm_deploy/molecule/default/prepare.yml +++ b/roles/edpm_deploy/molecule/default/prepare.yml @@ -20,7 +20,6 @@ vars: ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_install_yamls_tasks_out: "{{ ansible_user_dir }}/zuul-jobs/roles/install_yamls_makes/tasks" - cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_install_yamls_defaults: NAMESPACE: openstack roles: diff --git a/roles/edpm_deploy_baremetal/molecule/default/prepare.yml b/roles/edpm_deploy_baremetal/molecule/default/prepare.yml index d3c9a68493..39b3a811a3 100644 --- a/roles/edpm_deploy_baremetal/molecule/default/prepare.yml +++ b/roles/edpm_deploy_baremetal/molecule/default/prepare.yml @@ -21,7 +21,6 @@ ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" cifmw_install_yamls_tasks_out: "{{ ansible_user_dir }}/zuul-jobs/roles/install_yamls_makes/tasks" - cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" roles: - role: test_deps - role: ci_setup diff --git a/roles/edpm_prepare/molecule/default/prepare.yml b/roles/edpm_prepare/molecule/default/prepare.yml index 59b9ec5050..810486ac1a 100644 --- a/roles/edpm_prepare/molecule/default/prepare.yml +++ b/roles/edpm_prepare/molecule/default/prepare.yml @@ -21,7 +21,6 @@ ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" cifmw_install_yamls_tasks_out: "{{ ansible_user_dir }}/zuul-jobs/roles/install_yamls_makes/tasks" - cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_install_yamls_defaults: NAMESPACE: openstack roles: diff --git a/roles/install_yamls/README.md b/roles/install_yamls/README.md index dc3dbae312..ef2c84756d 100644 --- a/roles/install_yamls/README.md +++ b/roles/install_yamls/README.md @@ -9,7 +9,7 @@ It contains a set of playbooks to deploy podified control plane. * `cifmw_install_yamls_envfile`: (String) Environment file containing all the Makefile overrides. Defaults to `install_yamls`. * `cifmw_install_yamls_out_dir`: (String) `install_yamls` output directory to store generated output. Defaults to `{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts"`. * `cifmw_install_yamls_vars`: (Dict) A dict containing Makefile overrides. -* `cifmw_install_yamls_repo`: (String) `install_yamls` repo path. Defaults to `{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls`. +* `cifmw_install_yamls_repo`: (String) `install_yamls` repo path. Defaults to `{{ cifmw_installyamls_repos | default(ansible_user_dir ~ '/src/github.com/openstack-k8s-operators/install_yamls')}}` * `cifmw_install_yamls_whitelisted_vars`: (List) Allowed variables in `cifmw_install_yamls_vars` that are not part of `install_yamls` Makefiles. * `cifmw_install_yamls_edpm_dir`: (String) Output directory for EDPM related artifacts (OUTPUT_BASEDIR). Defaults to `{{ cifmw_install_yamls_out_dir_basedir ~ '/artifacts/edpm' }}` * `cifmw_install_yamls_checkout_openstack_ref`: (String) Enable the checkout from openstack-operator references @@ -43,7 +43,7 @@ The created role directory contains multiple task files, similar to register: "make_crc_storage_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" - chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" + chdir: "{{ cifmw_install_yamls_repo }}" script: make crc_storage dry_run: "{{ make_crc_storage_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_crc_storage_env|default({})), **(make_crc_storage_params|default({}))) }}" @@ -121,7 +121,7 @@ Let's look at below example:- register: "make_ansibleee_cleanup_status" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/artifacts" - chdir: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" + chdir: "{{ cifmw_install_yamls_repo }}" script: "make ansibleee_cleanup" dry_run: "{{ make_ansibleee_cleanup_dryrun|default(false)|bool }}" extra_args: "{{ dict((make_ansibleee_cleanup_env|default({})), **(make_ansibleee_cleanup_params|default({}))) }}" diff --git a/roles/install_yamls/molecule/default/converge.yml b/roles/install_yamls/molecule/default/converge.yml index 78ba7be9d2..1835746df9 100644 --- a/roles/install_yamls/molecule/default/converge.yml +++ b/roles/install_yamls/molecule/default/converge.yml @@ -22,7 +22,7 @@ namespace: foobar openstack_ctlplane: controlplane-yaml-file.yaml ansible_user_dir: "{{ lookup('env', 'HOME') }}" - cifmw_install_yamls_repo: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" + cifmw_install_yamls_repo: "{{ cifmw_installyamls_repos }}" zuul: branch: main items: diff --git a/roles/reproducer/molecule/crc_layout/converge.yml b/roles/reproducer/molecule/crc_layout/converge.yml index bc0f23858d..320bb35056 100644 --- a/roles/reproducer/molecule/crc_layout/converge.yml +++ b/roles/reproducer/molecule/crc_layout/converge.yml @@ -26,7 +26,7 @@ - src: "/tmp/ipmi-things" dest: "/home/zuul/ipmi-things" cifmw_basedir: "/opt/basedir" - cifmw_install_yamls_repo: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" + cifmw_install_yamls_repo: "{{ cifmw_installyamls_repos }}" cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" _networks: public: diff --git a/roles/reproducer/tasks/main.yml b/roles/reproducer/tasks/main.yml index 01a681be48..4f1b81479b 100644 --- a/roles/reproducer/tasks/main.yml +++ b/roles/reproducer/tasks/main.yml @@ -300,7 +300,7 @@ _devsetup_path: >- {{ ( - cifmw_install_yamls_repo | default('/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'), + cifmw_installyamls_repos, 'devsetup' ) | ansible.builtin.path_join }} diff --git a/roles/reproducer/tasks/reuse_main.yaml b/roles/reproducer/tasks/reuse_main.yaml index d299115285..a0c4d92a51 100644 --- a/roles/reproducer/tasks/reuse_main.yaml +++ b/roles/reproducer/tasks/reuse_main.yaml @@ -136,7 +136,7 @@ _devsetup_path: >- {{ ( - cifmw_install_yamls_repo | default('/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'), + cifmw_installyamls_repos, 'devsetup' ) | ansible.builtin.path_join }} diff --git a/roles/reproducer/vars/main.yml b/roles/reproducer/vars/main.yml index e8a967bb76..64ebcbfe97 100644 --- a/roles/reproducer/vars/main.yml +++ b/roles/reproducer/vars/main.yml @@ -4,7 +4,7 @@ cifmw_reproducer_default_repositories: - src: "https://github.com/openstack-k8s-operators/ci-framework" dest: "{{ cifmw_project_dir_absolute }}" - src: "https://github.com/openstack-k8s-operators/install_yamls" - dest: "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls" + dest: "{{ cifmw_installyamls_repos }}" - src: "https://github.com/openstack-k8s-operators/architecture" dest: "/home/zuul/src/github.com/openstack-k8s-operators/architecture" diff --git a/roles/tempest/molecule/default/prepare.yml b/roles/tempest/molecule/default/prepare.yml index 59b9ec5050..810486ac1a 100644 --- a/roles/tempest/molecule/default/prepare.yml +++ b/roles/tempest/molecule/default/prepare.yml @@ -21,7 +21,6 @@ ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" cifmw_install_yamls_tasks_out: "{{ ansible_user_dir }}/zuul-jobs/roles/install_yamls_makes/tasks" - cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_install_yamls_defaults: NAMESPACE: openstack roles: diff --git a/roles/tofu/molecule/default/prepare.yml b/roles/tofu/molecule/default/prepare.yml index 3c8642c5a2..d6287b5be6 100644 --- a/roles/tofu/molecule/default/prepare.yml +++ b/roles/tofu/molecule/default/prepare.yml @@ -21,7 +21,6 @@ ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" cifmw_install_yamls_tasks_out: "{{ ansible_user_dir }}/zuul-jobs/roles/install_yamls_makes/tasks" - cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_install_yamls_defaults: NAMESPACE: openstack roles: diff --git a/roles/update/molecule/default/prepare.yml b/roles/update/molecule/default/prepare.yml index 7899e26c1f..15e5eefb1b 100644 --- a/roles/update/molecule/default/prepare.yml +++ b/roles/update/molecule/default/prepare.yml @@ -20,7 +20,6 @@ vars: ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_install_yamls_tasks_out: "{{ ansible_user_dir }}/zuul-jobs/roles/install_yamls_makes/tasks" - cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_install_yamls_defaults: NAMESPACE: openstack roles: diff --git a/scenarios/centos-9/ci.yml b/scenarios/centos-9/ci.yml index 77114ebcf1..a0340e03fc 100644 --- a/scenarios/centos-9/ci.yml +++ b/scenarios/centos-9/ci.yml @@ -1,6 +1,5 @@ --- ansible_user_dir: "{{ lookup('env', 'HOME') }}" -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_openshift_user: "kubeadmin" cifmw_openshift_password: "123456789" diff --git a/scenarios/centos-9/content_provider.yml b/scenarios/centos-9/content_provider.yml index 96b5bf4fbb..a51b7a9537 100644 --- a/scenarios/centos-9/content_provider.yml +++ b/scenarios/centos-9/content_provider.yml @@ -1,6 +1,5 @@ --- ansible_user_dir: "{{ lookup('env', 'HOME') }}" -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_operator_build_push_registry: "{{ cifmw_rp_registry_ip | default('localhost') }}:5001" cifmw_operator_build_push_org: "openstack-k8s-operators" cifmw_operator_build_org: "openstack-k8s-operators" diff --git a/scenarios/centos-9/edpm_baremetal_deployment_ci.yml b/scenarios/centos-9/edpm_baremetal_deployment_ci.yml index edf47c28c4..f2a859afea 100644 --- a/scenarios/centos-9/edpm_baremetal_deployment_ci.yml +++ b/scenarios/centos-9/edpm_baremetal_deployment_ci.yml @@ -1,6 +1,5 @@ --- ansible_user_dir: "{{ lookup('env', 'HOME') }}" -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_install_yamls_vars: DEPLOY_DIR: "{{ cifmw_basedir }}/artifacts/edpm_compute" # used during Baremetal deployment BMAAS_INSTANCE_MEMORY: 8192 diff --git a/scenarios/centos-9/edpm_ci.yml b/scenarios/centos-9/edpm_ci.yml index f803557f6a..dbf9ab6990 100644 --- a/scenarios/centos-9/edpm_ci.yml +++ b/scenarios/centos-9/edpm_ci.yml @@ -1,6 +1,5 @@ --- ansible_user_dir: "{{ lookup('env', 'HOME') }}" -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_install_yamls_vars: BMO_SETUP: false INSTALL_CERT_MANAGER: false diff --git a/scenarios/centos-9/install_yamls.yml b/scenarios/centos-9/install_yamls.yml index 290ab87556..177806b07d 100644 --- a/scenarios/centos-9/install_yamls.yml +++ b/scenarios/centos-9/install_yamls.yml @@ -1,3 +1,2 @@ --- cifmw_rhol_crc_use_installyamls: true -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" diff --git a/scenarios/centos-9/meta_content_provider.yml b/scenarios/centos-9/meta_content_provider.yml index 2904205a5a..a5317f7f22 100644 --- a/scenarios/centos-9/meta_content_provider.yml +++ b/scenarios/centos-9/meta_content_provider.yml @@ -1,6 +1,5 @@ --- ansible_user_dir: "{{ lookup('env', 'HOME') }}" -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" # build_operators vars cifmw_operator_build_push_registry: "{{ cifmw_rp_registry_ip }}:{{ cifmw_rp_registry_port }}" diff --git a/scenarios/centos-9/podified_common.yml b/scenarios/centos-9/podified_common.yml index bb0e135d66..c50db2a4b1 100644 --- a/scenarios/centos-9/podified_common.yml +++ b/scenarios/centos-9/podified_common.yml @@ -2,7 +2,6 @@ # It is the common scenario file for EDPM multinode podified deployment ansible_user_dir: "{{ lookup('env', 'HOME') }}" -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" cifmw_openshift_user: "kubeadmin" cifmw_openshift_password: "123456789" diff --git a/scenarios/centos-9/tcib.yml b/scenarios/centos-9/tcib.yml index 4cff417831..b28f9ccab1 100644 --- a/scenarios/centos-9/tcib.yml +++ b/scenarios/centos-9/tcib.yml @@ -1,5 +1,4 @@ --- -cifmw_installyamls_repos: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" ansible_user_dir: "{{ lookup('env', 'HOME') }}" cifmw_build_containers_tcib_src: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/tcib" cifmw_repo_setup_src: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/repo-setup" diff --git a/scenarios/reproducers/3-nodes.yml b/scenarios/reproducers/3-nodes.yml index 3796e14d9e..eb30c5243c 100644 --- a/scenarios/reproducers/3-nodes.yml +++ b/scenarios/reproducers/3-nodes.yml @@ -2,7 +2,6 @@ # This is local to your desktop/laptop. # We can't use ansible_user_dir here, unless you have the same user on the # hypervisor and locally. -cifmw_install_yamls_repo: "~/src/github.com/openstack-k8s-operators/install_yamls" # This will be created on the hypervisor. cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" cifmw_path: "{{ ansible_user_dir }}/.crc/bin:{{ ansible_user_dir }}/.crc/bin/oc:{{ ansible_user_dir }}/bin:{{ ansible_env.PATH }}" From 0a01b4738aa6ed78471c43d33b11d9a8b5ae2e57 Mon Sep 17 00:00:00 2001 From: mkatari Date: Fri, 3 Oct 2025 12:31:44 +0530 Subject: [PATCH 444/480] Add IPv6 external Ceph support with gathering parameter This patch adds support for deploying external Ceph in IPv6 environment and introduces a new 'gathering' parameter for hook playbooks to control Ansible fact gathering behavior. Changes: - Add run_hook 'gathering' parameter to set ANSIBLE_GATHERING env var This allows hooks to force fresh fact gathering (implicit) instead of using cached facts (smart), fixing issues where network interfaces are configured after initial fact gathering - Add setup_cephnodes_ipv6.yaml hook playbook to prepare Ceph nodes for IPv6 deployment (repo setup, CA configuration, network setup) - Add create_external_ceph_params.yml hook playbook and create_external_ceph_params.sh script to generate external Ceph parameters tht file from deployed Ceph cluster and copy configuration files to controller node - Update run_hook README with documentation for gathering parameter The gathering parameter is particularly useful when network interfaces are configured during playbook execution and fresh facts are needed to capture all IPv6 addresses. --- .../playbooks/create_external_ceph_params.yml | 36 ++++++ hooks/playbooks/setup_cephnodes_ipv6.yaml | 104 ++++++++++++++++++ roles/run_hook/README.md | 3 + roles/run_hook/tasks/playbook.yml | 24 +++- scripts/create_external_ceph_params.sh | 63 +++++++++++ 5 files changed, 224 insertions(+), 6 deletions(-) create mode 100644 hooks/playbooks/create_external_ceph_params.yml create mode 100644 hooks/playbooks/setup_cephnodes_ipv6.yaml create mode 100755 scripts/create_external_ceph_params.sh diff --git a/hooks/playbooks/create_external_ceph_params.yml b/hooks/playbooks/create_external_ceph_params.yml new file mode 100644 index 0000000000..d25c2f0c03 --- /dev/null +++ b/hooks/playbooks/create_external_ceph_params.yml @@ -0,0 +1,36 @@ +--- +# This Playbook runs the shell script that extracts Ceph credentials to create the tht parameter file and copy required ceph conf files on osp-controller + +- name: Create external Ceph parameters file and copy ceph client conf files + hosts: localhost + gather_facts: false + + tasks: + - name: Execute external Ceph parameters creation script + ansible.builtin.script: "{{ playbook_dir }}/../../scripts/create_external_ceph_params.sh {{ ceph_node }} {{ ceph_mon_host }}" + register: script_output + + - name: Display script output + ansible.builtin.debug: + var: script_output.stdout_lines + + - name: Display script errors if any + when: script_output.stderr_lines | length > 0 + ansible.builtin.debug: + msg: "Script stderr: {{ script_output.stderr_lines }}" + + - name: Verify external_ceph_params.yaml was created + delegate_to: osp-undercloud-0 + ansible.builtin.stat: + path: "{{ ansible_user_dir }}/external_ceph_params.yaml" + register: params_file_stat + + - name: Confirm file creation + ansible.builtin.debug: + msg: "Successfully created external_ceph_params.yaml on osp-undercloud-0" + when: params_file_stat.stat.exists + + - name: Fail if file wasn't created + ansible.builtin.fail: + msg: "Failed to create external_ceph_params.yaml on osp-undercloud-0" + when: not params_file_stat.stat.exists diff --git a/hooks/playbooks/setup_cephnodes_ipv6.yaml b/hooks/playbooks/setup_cephnodes_ipv6.yaml new file mode 100644 index 0000000000..31d10559bc --- /dev/null +++ b/hooks/playbooks/setup_cephnodes_ipv6.yaml @@ -0,0 +1,104 @@ +--- +- name: Setup repos, CA and networks on ceph nodes + hosts: "{{ cifmw_ceph_target | default('ceph') }}" + gather_facts: true + become: true + vars: + cifmw_adoption_osp_deploy_ntp_server: "pool.ntp.org" + cifmw_adoption_osp_deploy_repos: + - rhel-9-for-x86_64-baseos-eus-rpms + - rhel-9-for-x86_64-appstream-eus-rpms + - rhel-9-for-x86_64-highavailability-eus-rpms + - openstack-17.1-for-rhel-9-x86_64-rpms + - fast-datapath-for-rhel-9-x86_64-rpms + - rhceph-7-tools-for-rhel-9-x86_64-rpms + common_dns: ["2620:cf:cf:aaaa::1"] + base_config: "/etc/os-net-config" + tasks: + - name: Setup repositories via rhos-release if needed + ansible.builtin.import_role: + name: repo_setup + tasks_from: rhos_release.yml + + - name: Install custom CA if needed + ansible.builtin.import_role: + name: install_ca + - name: Ensure needed logins + ansible.builtin.import_role: + name: adoption_osp_deploy + tasks_from: login_registries.yml + + - name: Ensure repos are setup + become: true + community.general.rhsm_repository: + name: "{{ cifmw_adoption_osp_deploy_repos }}" + state: enabled + + - name: Ensure os-net-config folder exists in ceph nodes + become: true + ansible.builtin.file: + path: "/etc/os-net-config" + state: directory + mode: '0755' + + - name: Ensure os-net-config and openvswitch is installed in ceph nodes + become: true + ansible.builtin.dnf: + name: + - os-net-config + - openvswitch + state: present + + - name: Generate os-net-config YAML + ansible.builtin.copy: + dest: "{{ base_config }}/network-os-net-config.yaml" + mode: '0644' + content: | + network_config: + - type: ovs_bridge + name: br-ex + mtu: 1500 + use_dhcp: false + dns_servers: {{ common_dns }} + addresses: + - ip_netmask: "{{ hostvars[inventory_hostname]['bridge_ip'] }}" + routes: [] + members: + - type: interface + name: nic2 + mtu: 1500 + primary: true + addresses: + - ip_netmask: "{{ hostvars[inventory_hostname]['external_ip'] }}" + routes: [] + - type: vlan + vlan_id: 20 + addresses: + - ip_netmask: "{{ hostvars[inventory_hostname]['internalapi_ip'] }}" + routes: [] + - type: vlan + vlan_id: 21 + addresses: + - ip_netmask: "{{ hostvars[inventory_hostname]['storage_ip'] }}" + routes: [] + - type: vlan + vlan_id: 23 + addresses: + - ip_netmask: "{{ hostvars[inventory_hostname]['storagemgmt_ip'] }}" + routes: [] + - type: vlan + vlan_id: 22 + addresses: + - ip_netmask: "{{ hostvars[inventory_hostname]['tenant_ip'] }}" + routes: [] + + - name: Apply network configuration + ansible.builtin.command: > + os-net-config -c {{ base_config }}/network-os-net-config.yaml -v + changed_when: true + + - name: Set net.ipv6.ip_nonlocal_bind + ansible.posix.sysctl: + name: net.ipv6.ip_nonlocal_bind + value: '1' + state: present diff --git a/roles/run_hook/README.md b/roles/run_hook/README.md index 8f78044a07..1c8a1cd923 100644 --- a/roles/run_hook/README.md +++ b/roles/run_hook/README.md @@ -37,6 +37,7 @@ name: * `source`: (String) Source of the playbook. If it's a filename, the playbook is expected in `hooks/playbooks`. It can be an absolute path. * `type`: (String) Type of the hook. In this case, set it to `playbook`. * `extra_vars`: (Dict) Structure listing the extra variables you would like to pass down ([extra_vars explained](#extra_vars-explained)) +* `gathering`: (String) Set the ANSIBLE_GATHERING environment variable. Valid values: `implicit`, `explicit`, `smart`. Defaults to empty string (uses ansible.cfg setting). * `hook_retry` (Boolean) Set true, if the hook execution should be retried on failure ##### About OpenShift namespaces and install_yamls @@ -56,6 +57,7 @@ Since `install_yamls` might not be initialized, the `run_hook` is exposing two n * `source`: (String) Source of the playbook. If it's a filename, the playbook is expected in `hooks/playbooks`. It can be an absolute path. * `type`: (String) Type of the hook. In this case, set it to `playbook`. * `extra_vars`: (Dict) Structure listing the extra variables you would like to pass down ([extra_vars explained](#extra_vars-explained)) +* `gathering`: (String) Set the ANSIBLE_GATHERING environment variable. Valid values: `implicit`, `explicit`, `smart`. Defaults to empty string (uses ansible.cfg setting). * `hook_retry` (Boolean) Set true, if the hook execution should be retried on failure #### Hook callback @@ -133,6 +135,7 @@ pre_deploy: - name: My hook source: ceph-deploy.yml type: playbook + gathering: implicit extra_vars: UUID: file: "ceph_env.yml" diff --git a/roles/run_hook/tasks/playbook.yml b/roles/run_hook/tasks/playbook.yml index 4cb6a1004c..5f6e79afd8 100644 --- a/roles/run_hook/tasks/playbook.yml +++ b/roles/run_hook/tasks/playbook.yml @@ -94,9 +94,15 @@ no_log: "{{ cifmw_nolog | default(true) | bool }}" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir }}/artifacts" - extra_args: - ANSIBLE_CONFIG: "{{ hook.config_file | default(ansible_config_file) }}" - ANSIBLE_LOG_PATH: "{{ log_path }}" + extra_args: >- + {{ + { + 'ANSIBLE_CONFIG': hook.config_file | default(ansible_config_file), + 'ANSIBLE_LOG_PATH': log_path + } | combine( + {'ANSIBLE_GATHERING': hook.gathering} if hook.gathering is defined else {} + ) + }} creates: "{{ hook.creates | default(omit) }}" script: >- ansible-playbook -i {{ hook.inventory | default(inventory_file) }} @@ -115,9 +121,15 @@ no_log: "{{ cifmw_nolog | default(true) | bool }}" cifmw.general.ci_script: output_dir: "{{ cifmw_basedir }}/artifacts" - extra_args: - ANSIBLE_CONFIG: "{{ hook.config_file | default(ansible_config_file) }}" - ANSIBLE_LOG_PATH: "{{ log_path }}" + extra_args: >- + {{ + { + 'ANSIBLE_CONFIG': hook.config_file | default(ansible_config_file), + 'ANSIBLE_LOG_PATH': log_path + } | combine( + {'ANSIBLE_GATHERING': hook.gathering} if hook.gathering is defined else {} + ) + }} creates: "{{ hook.creates | default(omit) }}" script: >- ansible-playbook -i {{ hook.inventory | default(inventory_file) }} diff --git a/scripts/create_external_ceph_params.sh b/scripts/create_external_ceph_params.sh new file mode 100755 index 0000000000..af928e6f50 --- /dev/null +++ b/scripts/create_external_ceph_params.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# Create tht external_ceph_params.yaml on undercloud and update ceph_conf files in osp-controller + +set -e # Exit on any error + +# Parameters - only Ceph-specific values +CEPH_NODE=${1} +CEPH_MON_HOST=${2} + +# Validate required parameters +if [ -z "$CEPH_NODE" ] || [ -z "$CEPH_MON_HOST" ]; then + echo "ERROR: Missing required parameters" + echo "Usage: $0 " + echo " ceph_node: Name of the Ceph node (e.g., osp-ext-ceph-uni04delta-ipv6-0)" + echo " ceph_mon_host: Comma-separated list of Ceph monitor IPs (e.g., 2620:cf:cf:cccc::6a,2620:cf:cf:cccc::6b,2620:cf:cf:cccc::6c)" + exit 1 +fi + +echo "Creating external Ceph parameters file..." +echo "Using Ceph node: $CEPH_NODE" +echo "Using Ceph monitor hosts: $CEPH_MON_HOST" + +# Extract Ceph credentials +echo "Fetching Ceph credentials from $CEPH_NODE..." +CEPH_OUTPUT=$(ssh "$CEPH_NODE" cat /etc/ceph/ceph.conf /etc/ceph/ceph.client.openstack.keyring) + +FSID=$(echo "$CEPH_OUTPUT" | awk '/fsid =/ {print $3}') +KEY=$(echo "$CEPH_OUTPUT" | awk '/key =/ {print $3}' | tr -d '"') + +if [ -z "$FSID" ] || [ -z "$KEY" ]; then + echo "ERROR: Failed to extract FSID or KEY from Ceph configuration" + exit 1 +fi + +echo "Found FSID: $FSID" +echo "Found Key: $KEY" + +# Create the parameter file on undercloud +echo "Creating ~/external_ceph_params.yaml on osp-undercloud-0..." +ssh osp-undercloud-0 "cat > ~/external_ceph_params.yaml" < $HOME/ceph_client/ceph.conf" +ssh "$CEPH_NODE" sudo cat /etc/ceph/ceph.client.admin.keyring | ssh osp-controller-0 "cat > $HOME/ceph_client/ceph.client.admin.keyring" + +echo " Done! Files copied to osp-controller-0:$HOME/ceph_client/" From 9462904ccd77d7223650d9414172b57d93579179 Mon Sep 17 00:00:00 2001 From: David Rosenfeld Date: Tue, 21 Oct 2025 11:13:41 -0400 Subject: [PATCH 445/480] Add a new custom service validation The validation verifies that an edpm custom service may be created. After creation it is verified that deployments containing ansibleTags, ansibleSkipTags, ansibleLimit, and ansibleExtraVars are processed correctly. --- roles/validations/defaults/main.yml | 4 +- .../validations/tasks/edpm/custom_service.yml | 253 ++++++++++++++++++ 2 files changed, 256 insertions(+), 1 deletion(-) create mode 100644 roles/validations/tasks/edpm/custom_service.yml diff --git a/roles/validations/defaults/main.yml b/roles/validations/defaults/main.yml index f33253a419..fbc9d2cfdd 100644 --- a/roles/validations/defaults/main.yml +++ b/roles/validations/defaults/main.yml @@ -35,8 +35,10 @@ cifmw_validations_default_path: "{{ role_path }}/tasks" # cifmw_validations_edpm_check_node is the node that we will validate for edpm jobs. We # achieve this by delegating_to the check node and executing the required commands to -# validate that our desired state change has been achieved. +# validate that our desired state change has been achieved. A second check node is also +# available. cifmw_validations_edpm_check_node: compute-0 +cifmw_validations_edpm_second_check_node: compute-1 cifmw_validations_basedir: "{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}" diff --git a/roles/validations/tasks/edpm/custom_service.yml b/roles/validations/tasks/edpm/custom_service.yml new file mode 100644 index 0000000000..7f6bab8c90 --- /dev/null +++ b/roles/validations/tasks/edpm/custom_service.yml @@ -0,0 +1,253 @@ +- name: Determine name of deployed NodeSet + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc get -n {{ cifmw_validations_namespace }} osdpns --no-headers -o custom-columns=":metadata.name" + register: deployed_nodeset_name + +# Define a custom service named hello-world. The service has tasks with tags helloworld +# and byeworld. Subsequent tests will use this service to verify that only tasks with +# the proper label are executed. +- name: Create hello-world custom service + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: | + oc apply -f - <- + echo Hello {{ target }} + tags: helloworld + - name: Bye {{ target }} + ansible.builtin.shell: + cmd: >- + echo Bye {{ target }} + tags: byeworld + {% endraw %} + EOF + +# Create a deployment that uses custom service hello-world and only executes +# ansible tasks with tags helloworld +- name: Create openstackdataplanedeployment for ansible tag test + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: | + oc apply -f - <- + oc wait openstackdataplanedeployment hello-world-ansible-tag + --namespace={{ cifmw_validations_namespace }} + --for=condition=ready + --timeout={{ cifmw_validations_timeout }}s + +- name: Get the ansible tag test log + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc logs --namespace={{ cifmw_validations_namespace }} job.batch/hello-world-hello-world-ansible-tag-openstack-edpm + register: ansible_tag_test_log + +# Need failure msg for xml results file +- name: Verify the ansible tag test log + ansible.builtin.fail: + msg: "Bye World in ansible tag test log or Hello World not in ansible tag test log" + when: "'Bye World' in ansible_tag_test_log.stdout or 'Hello World' not in ansible_tag_test_log.stdout" + +# Create a deployment that uses custom service hello-world and skips +# ansible tasks with tags helloworld +- name: Create openstackdataplanedeployment for ansible skip tags test + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: | + oc apply -f - <- + oc wait openstackdataplanedeployment hello-world-skip-tag + --namespace={{ cifmw_validations_namespace }} + --for=condition=ready + --timeout={{ cifmw_validations_timeout }}m + +- name: Get the ansible skip tag test log + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc logs --namespace={{ cifmw_validations_namespace }} job.batch/hello-world-hello-world-skip-tag-openstack-edpm + register: ansible_skip_tag_test_log + +# Need failure msg for xml results file +- name: Verify the ansible skip tag test log + ansible.builtin.fail: + msg: "Hello World in ansible skip tag test log or Bye World not in ansible skip tag test log" + when: "'Hello World' in ansible_skip_tag_test_log.stdout or 'Bye World' not in ansible_skip_tag_test_log.stdout" + +# Create a deployment that uses custom service hello-world and limits +# ansible task execution to a single compute node +- name: Create openstackdataplanedeployment for ansible limit test + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: | + oc apply -f - <- + oc wait openstackdataplanedeployment hello-world-ansible-limit + --namespace={{ cifmw_validations_namespace }} + --for=condition=ready + --timeout={{ cifmw_validations_timeout }}m + +- name: Get the ansible limit test log + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc logs --namespace={{ cifmw_validations_namespace }} job.batch/hello-world-hello-world-ansible-limit-openstack-edpm + register: ansible_limit_test_log + +# Need failure msg for xml results file +- name: Verify the ansible limit test log + ansible.builtin.fail: + msg: "{{ cifmw_validations_edpm_second_check_node }} in ansible limit test log or {{ cifmw_validations_edpm_check_node }} not in ansible skip tag test log" + when: 'cifmw_validations_edpm_second_check_node in ansible_limit_test_log.stdout or cifmw_validations_edpm_check_node not in ansible_limit_test_log.stdout' + +# Create a deployment that uses custom service hello-world and uses +# ansibleExtraVars when the service executes +- name: Create openstackdataplanedeployment for ansible extra vars test + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: | + oc apply -f - <- + oc wait openstackdataplanedeployment hello-world-extra-vars + --namespace={{ cifmw_validations_namespace }} + --for=condition=ready + --timeout={{ cifmw_validations_timeout }}m + +- name: Get the ansibleExtraVars test log + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + cifmw.general.ci_script: + output_dir: "{{ cifmw_validations_basedir }}/artifacts" + script: >- + oc logs --namespace={{ cifmw_validations_namespace }} job.batch/hello-world-hello-world-extra-vars-openstack-edpm + register: ansible_extra_vars_test_log + +# Need failure msg for xml results file +- name: Verify the ansibleExtraVars test log + ansible.builtin.fail: + msg: "World in ansibleExtraVars test log or Mars not in ansibleExtraVars test log" + when: "'World' in ansible_extra_vars_test_log.stdout or 'Mars' not in ansible_extra_vars_test_log.stdout" From 0ec3565cf7e7e97e2b7856b9089ba3acb7e33596 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 21 Oct 2025 14:45:36 +0200 Subject: [PATCH 446/480] Add edpm-ansible-molecule-edpm_kernel for being tested when molecule change After doing improvements in molecule CI job process [1][2], the edpm_kernel CI job should be working now. [1] https://github.com/openstack-k8s-operators/ci-framework/pull/3421 [2] https://github.com/openstack-k8s-operators/edpm-ansible/pull/1048 Signed-off-by: Daniel Pawlik --- scripts/create_role_molecule.py | 1 + zuul.d/projects.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/scripts/create_role_molecule.py b/scripts/create_role_molecule.py index a40078edb2..8259541a28 100755 --- a/scripts/create_role_molecule.py +++ b/scripts/create_role_molecule.py @@ -23,6 +23,7 @@ additional_molecule_jobs = [ "edpm-ansible-molecule-edpm_podman", "edpm-ansible-molecule-edpm_ovs", + "edpm-ansible-molecule-edpm_kernel", ] diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 182be6866a..0dd476ac34 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -106,6 +106,7 @@ - cifmw-molecule-virtualbmc - edpm-ansible-molecule-edpm_podman - edpm-ansible-molecule-edpm_ovs + - edpm-ansible-molecule-edpm_kernel github-post: jobs: - build-push-container-cifmw-client-post From fa902d4f45dff037d39c649a5d3e15072ea37eb1 Mon Sep 17 00:00:00 2001 From: bshewale Date: Thu, 9 Oct 2025 18:52:03 +0530 Subject: [PATCH 447/480] Replace hardcoded /home/zuul paths in kustomize_deploy & ci_gen_kustomize_values Replace hardcoded /home/zuul/ paths with ansible_user_dir variable in kustomize_deploy & ci_gen_kustomize_values role to support different user environments and improve consistency with configurable user variables pattern. --- roles/ci_gen_kustomize_values/molecule/default/prepare.yml | 5 +++-- roles/kustomize_deploy/tasks/cleanup.yml | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/roles/ci_gen_kustomize_values/molecule/default/prepare.yml b/roles/ci_gen_kustomize_values/molecule/default/prepare.yml index 34cbe58dd5..b11327d265 100644 --- a/roles/ci_gen_kustomize_values/molecule/default/prepare.yml +++ b/roles/ci_gen_kustomize_values/molecule/default/prepare.yml @@ -20,7 +20,7 @@ vars: cifmw_ci_gen_kustomize_values_src_dir: >- {{ - (lookup('env', 'HOME', '/home/zuul'), + (lookup('env', 'HOME'), 'ci-framework-data', 'artifacts', 'ci_k8s_snippets') | path_join }} @@ -47,5 +47,6 @@ - name: Download tools for later testing and validations ansible.builtin.import_playbook: >- - {{ ('/home/zuul/src/github.com/openstack-k8s-operators', + {{ (lookup('env', 'HOME'), + 'src/github.com/openstack-k8s-operators', 'install_yamls/devsetup/download_tools.yaml') | path_join }} diff --git a/roles/kustomize_deploy/tasks/cleanup.yml b/roles/kustomize_deploy/tasks/cleanup.yml index b42abc0b09..89f179eb17 100644 --- a/roles/kustomize_deploy/tasks/cleanup.yml +++ b/roles/kustomize_deploy/tasks/cleanup.yml @@ -50,8 +50,8 @@ - "{{ cifmw_kustomize_deploy_kustomizations_dest_dir }}/openstack.yaml" - "{{ cifmw_kustomize_deploy_olm_dest_file }}" _external_dns_crs: - - /home/zuul/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-dns.yml - - /home/zuul/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-cert.yml + - "{{ ansible_user_dir }}/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-dns.yml" + - "{{ ansible_user_dir }}/ci-framework-data/artifacts/manifests/cifmw_external_dns/ceph-local-cert.yml" register: _cifmw_kustomize_files ansible.builtin.set_fact: cifmw_kustomize_deploy_crs_to_delete: >- From 0080aa2f8220421be9cdb2688900e081b81f6840 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Thu, 16 Oct 2025 03:33:56 -0400 Subject: [PATCH 448/480] Remove deprecated tempest hooks The pre_tempest and post_tempest variables are now deprecated and the pre_tests and post_tests variables should be used now. This patch removes all the instances of these old variables. --- post-deployment.yml | 3 --- update-edpm.yml | 2 -- 2 files changed, 5 deletions(-) diff --git a/post-deployment.yml b/post-deployment.yml index a1bd51425e..b0e66a41ce 100644 --- a/post-deployment.yml +++ b/post-deployment.yml @@ -10,9 +10,6 @@ - admin-setup - name: Run Test - vars: - pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" - post_tests: "{{ (lookup('vars', 'post_tempest', default=[])) }}" ansible.builtin.import_role: name: cifmw_setup tasks_from: run_tests.yml diff --git a/update-edpm.yml b/update-edpm.yml index fff0d65cf5..c610262474 100644 --- a/update-edpm.yml +++ b/update-edpm.yml @@ -26,8 +26,6 @@ tasks: - name: Run Test vars: - pre_tests: "{{ (lookup('vars', 'pre_tempest', default=[])) }}" - post_tests: "{{ (lookup('vars', 'post_tempest', default=[])) }}" cifmw_test_operator_artifacts_basedir: "{{ cifmw_basedir|default(ansible_user_dir ~ '/ci-framework-data') }}/tests/test_operator_update" cifmw_test_operator_tempest_name: "post-update-tempest-tests" ansible.builtin.import_role: From a2d26b6b7ea109c37264fedc86b36cf541df9815 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Mon, 8 Sep 2025 22:30:37 +0200 Subject: [PATCH 449/480] Add pre and post logs hooks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds an option to define pre and post hooks for the logs gathering phase – it will allow us to define extra actions to be performed at the end of each job no matter what was the last stage and status. --- roles/cifmw_setup/tasks/run_logs.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/roles/cifmw_setup/tasks/run_logs.yml b/roles/cifmw_setup/tasks/run_logs.yml index c35a1d3327..895fc6a1de 100644 --- a/roles/cifmw_setup/tasks/run_logs.yml +++ b/roles/cifmw_setup/tasks/run_logs.yml @@ -1,3 +1,10 @@ +--- +- name: Run pre_logs hooks + vars: + step: pre_logs + ansible.builtin.import_role: + name: run_hook + - name: Ensure cifmw_basedir param is set when: - cifmw_basedir is not defined @@ -96,3 +103,9 @@ }} mode: "0777" remote_src: true + +- name: Run post_logs hooks + vars: + step: post_logs + ansible.builtin.import_role: + name: run_hook From 978cae43c9448533fc47137509f1380c7b9b9581 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Thu, 23 Oct 2025 22:21:27 +0200 Subject: [PATCH 450/480] Add symlink to CIFMW roles in hooks directory Because of recent rework related to logs collection, the specified playbook in pre- and post-logs steps are not executed inside CI-Framework main directory. Hence, to make the CIFMW roles available in hooks, the local symlink is added. Co-Authored-By: Daniel Pawlik --- hooks/playbooks/roles | 1 + 1 file changed, 1 insertion(+) create mode 120000 hooks/playbooks/roles diff --git a/hooks/playbooks/roles b/hooks/playbooks/roles new file mode 120000 index 0000000000..b741aa3dbc --- /dev/null +++ b/hooks/playbooks/roles @@ -0,0 +1 @@ +../../roles \ No newline at end of file From fae58e6c0bdff1d6a6b1611cc62633542b95c730 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Mon, 25 Aug 2025 11:01:06 +0200 Subject: [PATCH 451/480] [PCP] Add role for managing Performance Co-Pilot This role manages Performance Co-Pilot (PCP) toolkit for monitoring and analyzing the historical details of system performance. It would be useful for tracking the resources utilization in the end-to-end testing jobs. --- docs/dictionary/en-custom.txt | 1 + roles/pcp_metrics/README.md | 61 ++++++++++++++++++++++++++++ roles/pcp_metrics/defaults/main.yaml | 15 +++++++ roles/pcp_metrics/tasks/gather.yaml | 60 +++++++++++++++++++++++++++ roles/pcp_metrics/tasks/main.yaml | 12 ++++++ roles/pcp_metrics/tasks/setup.yaml | 15 +++++++ zuul.d/molecule.yaml | 9 ++++ zuul.d/projects.yaml | 1 + 8 files changed, 174 insertions(+) create mode 100644 roles/pcp_metrics/README.md create mode 100644 roles/pcp_metrics/defaults/main.yaml create mode 100644 roles/pcp_metrics/tasks/gather.yaml create mode 100644 roles/pcp_metrics/tasks/main.yaml create mode 100644 roles/pcp_metrics/tasks/setup.yaml diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 4706600ccf..170a695d37 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -244,6 +244,7 @@ igmp igogicbjyxbzig ihbyb img +IMVHO ingressvips ini init diff --git a/roles/pcp_metrics/README.md b/roles/pcp_metrics/README.md new file mode 100644 index 0000000000..38290b6c95 --- /dev/null +++ b/roles/pcp_metrics/README.md @@ -0,0 +1,61 @@ +PCP Metrics +=========== + +This role manages Performance Co-Pilot (PCP) toolkit [^1] on the target host +for monitoring and analyzing the historical details of system performance [^2]. + +**Note**: The PCP toolkit is not to be confused with GitHub Copilot [^3], +which is an AI coding assistant – and not a concern of this role here. + + +Usage +----- + +To setup and enable PCP on the target host, include the role with setup tasks: + +``` +- name: Setup PCP + include_role: + name: pcp_metrics + tasks_from: setup +``` + +To collect current metrics from the host, include the role with gather tasks: + +``` +- name: Gather metrics + include_role: + name: pcp_metrics + tasks_from: gather +``` + +Alternatively, `pcp_metrics_setup` and `pcp_metrics_gather` boolean variables +can be used to control which actions to perform when including the role with +just main tasks file (the default one). For example: + +``` +- name: Setup PCP + include_role: + name: pcp_metrics + vars: + pcp_metrics_setup: true +``` + + +Impact +------ + +According to my brief checks, enabling PCP causes negligible difference +in the system load. The metrics in default configuration took about 5 MB +of disk space per hour (although it can be reduced by over 90% with `xz`, +IMVHO it is not worth the additional CPU usage). + + +References +---------- + +[^1]: https://pcp.io + +[^2]: https://pcp.readthedocs.io/ + +[^3]: https://github.com/features/copilot diff --git a/roles/pcp_metrics/defaults/main.yaml b/roles/pcp_metrics/defaults/main.yaml new file mode 100644 index 0000000000..d8434ad96c --- /dev/null +++ b/roles/pcp_metrics/defaults/main.yaml @@ -0,0 +1,15 @@ +--- +# Variables to control which parts of main playbook should be executed +pcp_metrics_setup: false +pcp_metrics_gather: false + +# Setup-related variables +pcp_metrics_packages: + - pcp # for pmlogger + - pcp-system-tools # for pmrep + +# Gather-specific variables +pcp_metrics_archive: "/var/log/pcp/pmlogger/{{ ansible_nodename }}" +pcp_metrics_interval: 10 # seconds +pcp_metrics_metricspec: ':collectl-sc :collectl-sm :collectl-sd :collectl-sn' +pcp_metrics_output_dir: /tmp/pcp-metrics diff --git a/roles/pcp_metrics/tasks/gather.yaml b/roles/pcp_metrics/tasks/gather.yaml new file mode 100644 index 0000000000..4e1543ae21 --- /dev/null +++ b/roles/pcp_metrics/tasks/gather.yaml @@ -0,0 +1,60 @@ +--- +- name: Populate service facts + ansible.builtin.service_facts: + +- name: Fetch needed facts + ansible.builtin.setup: + gather_subset: + - min + filter: + - ansible_hostname + - ansible_nodename + when: + - ansible_hostname is not defined + or ansible_nodename is not defined + +- name: Check if pmrep is installed + ansible.builtin.command: >- + pmrep --version + ignore_errors: true + changed_when: false + register: _pmrep + +- name: Check if archive exists + stat: + path: "{{ pcp_metrics_archive }}" + register: _pcp_archive + +- name: Collect metrics when the pmlogger service is running + when: + - ansible_facts.services['pmlogger.service'] is defined + - ansible_facts.services['pmlogger.service']['state'] == 'running' + - _pmrep.rc == 0 + - _pcp_archive.stat.readable | default(false) + block: + - name: Collect the metrics from host + ansible.builtin.command: >- + pmrep + --archive "{{ pcp_metrics_archive }}" + --interval "{{ pcp_metrics_interval }}" + --timestamps + --timestamp-format '%Y-%m-%d_%H:%M:%S' + --timezone UTC + --output csv + --delimiter ',' + {{ pcp_metrics_metricspec }} + register: _pcp_metrics_pmrep + changed_when: false + + - name: Ensure the output directory exist + file: + path: "{{ pcp_metrics_output_dir }}" + state: directory + mode: '0755' + delegate_to: localhost + + - name: Save the collected metrics to a local file + copy: + content: "{{ _pcp_metrics_pmrep.stdout }}" + dest: "{{ pcp_metrics_output_dir }}/{{ ansible_hostname }}.csv" + delegate_to: localhost diff --git a/roles/pcp_metrics/tasks/main.yaml b/roles/pcp_metrics/tasks/main.yaml new file mode 100644 index 0000000000..5082e2001f --- /dev/null +++ b/roles/pcp_metrics/tasks/main.yaml @@ -0,0 +1,12 @@ +--- +- name: Setup PCP + ansible.builtin.include_role: + name: "{{ role_name }}" + tasks_from: setup + when: pcp_metrics_setup | bool + +- name: Gather metrics + ansible.builtin.include_role: + name: "{{ role_name }}" + tasks_from: gather + when: pcp_metrics_gather | bool diff --git a/roles/pcp_metrics/tasks/setup.yaml b/roles/pcp_metrics/tasks/setup.yaml new file mode 100644 index 0000000000..2d2c0f7117 --- /dev/null +++ b/roles/pcp_metrics/tasks/setup.yaml @@ -0,0 +1,15 @@ +--- +- name: Install and enable PCP + become: true + block: + - name: Install PCP + ansible.builtin.package: + name: "{{ pcp_metrics_packages }}" + state: present + use: ansible.builtin.dnf + + - name: Enable pmlogger service + ansible.builtin.systemd_service: + name: pmlogger.service + state: started + enabled: true diff --git a/zuul.d/molecule.yaml b/zuul.d/molecule.yaml index 8ed6149217..10decae4bd 100644 --- a/zuul.d/molecule.yaml +++ b/zuul.d/molecule.yaml @@ -981,6 +981,15 @@ - ^.config/molecule/.* name: cifmw-molecule-ovirt parent: cifmw-molecule-noop +- job: + files: + - ^common-requirements.txt + - ^test-requirements.txt + - ^roles/pcp_metrics/.* + - ^ci/playbooks/molecule.* + - ^.config/molecule/.* + name: cifmw-molecule-pcp_metrics + parent: cifmw-molecule-noop - job: files: - ^common-requirements.txt diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 0dd476ac34..f68e2c3351 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -81,6 +81,7 @@ - cifmw-molecule-os_must_gather - cifmw-molecule-os_net_setup - cifmw-molecule-ovirt + - cifmw-molecule-pcp_metrics - cifmw-molecule-pkg_build - cifmw-molecule-podman - cifmw-molecule-polarion From 9b47dc618c19298b20e15e816cfe61295f4e5989 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Fri, 24 Oct 2025 13:28:10 +0200 Subject: [PATCH 452/480] [PCP] Add CoreOS-specific tasks This commit introduces new playbook that allows patching the CoreOS environment, used e.g. in our CRC jobs, to make the setup of Performance Co-Pilot possible. --- roles/pcp_metrics/defaults/main.yaml | 1 + roles/pcp_metrics/tasks/coreos.yaml | 41 ++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 roles/pcp_metrics/tasks/coreos.yaml diff --git a/roles/pcp_metrics/defaults/main.yaml b/roles/pcp_metrics/defaults/main.yaml index d8434ad96c..26dbe386c8 100644 --- a/roles/pcp_metrics/defaults/main.yaml +++ b/roles/pcp_metrics/defaults/main.yaml @@ -7,6 +7,7 @@ pcp_metrics_gather: false pcp_metrics_packages: - pcp # for pmlogger - pcp-system-tools # for pmrep +pcp_repo_url: https://mirror.stream.centos.org/9-stream/ # Gather-specific variables pcp_metrics_archive: "/var/log/pcp/pmlogger/{{ ansible_nodename }}" diff --git a/roles/pcp_metrics/tasks/coreos.yaml b/roles/pcp_metrics/tasks/coreos.yaml new file mode 100644 index 0000000000..2710016fca --- /dev/null +++ b/roles/pcp_metrics/tasks/coreos.yaml @@ -0,0 +1,41 @@ +--- +# +# NOTE(sdatko): The OCP nodes we use have no yum repositories, so I add some. +# In CoreOS, things typically should be deployed as containers +# or in the layered filesystem via the rpm-ostree package tool. +# However, this requires either a lot of space or system reboot +# while all I need is to install a small service and enable it. +# So, this play allows to setup the PCP easily in our CI jobs, +# even though it may not be the way advised for real-world env. +# +- name: Set repositories + become: true + block: + - name: Set repositories (BaseOS) + ansible.builtin.yum_repository: + file: pcp-coreos-hack + name: baseos + description: BaseOS repository + baseurl: "{{ pcp_repo_url }}/BaseOS/$basearch/os/" + gpgcheck: no + + - name: Set repositories (AppStream) + ansible.builtin.yum_repository: + file: pcp-coreos-hack + name: appstream + description: AppStream repository + baseurl: "{{ pcp_repo_url }}/AppStream/$basearch/os/" + gpgcheck: no + +- name: Make /usr writable + become: true + ansible.posix.mount: + path: /usr + state: remounted + opts: rw + +- name: Create required directory + become: true + ansible.builtin.file: + path: /var/lib/rpm-state + state: directory From cf17b6af162491a0dd8929afaf8b92477d813267 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Wed, 17 Sep 2025 16:48:11 +0200 Subject: [PATCH 453/480] [PCP] Add custom pmrep metric specifications --- roles/pcp_metrics/defaults/main.yaml | 2 +- roles/pcp_metrics/files/pmrep-metricspec.conf | 63 +++++++++++++++++++ roles/pcp_metrics/tasks/setup.yaml | 8 +++ 3 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 roles/pcp_metrics/files/pmrep-metricspec.conf diff --git a/roles/pcp_metrics/defaults/main.yaml b/roles/pcp_metrics/defaults/main.yaml index 26dbe386c8..0d6aa3ebae 100644 --- a/roles/pcp_metrics/defaults/main.yaml +++ b/roles/pcp_metrics/defaults/main.yaml @@ -12,5 +12,5 @@ pcp_repo_url: https://mirror.stream.centos.org/9-stream/ # Gather-specific variables pcp_metrics_archive: "/var/log/pcp/pmlogger/{{ ansible_nodename }}" pcp_metrics_interval: 10 # seconds -pcp_metrics_metricspec: ':collectl-sc :collectl-sm :collectl-sd :collectl-sn' +pcp_metrics_metricspec: ':cifmw' pcp_metrics_output_dir: /tmp/pcp-metrics diff --git a/roles/pcp_metrics/files/pmrep-metricspec.conf b/roles/pcp_metrics/files/pmrep-metricspec.conf new file mode 100644 index 0000000000..b22c32da3c --- /dev/null +++ b/roles/pcp_metrics/files/pmrep-metricspec.conf @@ -0,0 +1,63 @@ +# +# pmrep configuration file +# +# Based on default supplied PCP config that mimics collectl reports. +# +# NOTE: The metric specifications are of form: +# pcp.metric.name = label,instances,unit/scale,type,width,precision,limit + +[cifmw] +# +# General settings +# +header = yes +unitinfo = no +globals = no +timestamp = yes +precision = 2 +delimiter = " " + +# +# CPU usage +# +cpu = %%cpu,,,,5 +cpu.label = cpu +cpu.formula = 100 * (kernel.all.cpu.user + kernel.all.cpu.nice) / hinv.ncpu +cpu.unit = s +sys = %%sys,,,,5 +sys.label = sys +sys.formula = 100 * rate(kernel.all.cpu.sys) / hinv.ncpu +kernel.all.intr = intr/s,,,,7 +kernel.all.pswitch = cswch/s,,,,8 + +# +# RAM usage +# +mem.physmem = Total,,GB,,5 +mem.freemem = Free,,GB,,5 +mem.util.available = Avail,,GB,,5 +mem.util.bufmem = Buff,,GB,,5 +mem.util.cached = Cach,,GB,,5 +mem.util.inactive = Inac,,GB,,5 +mem.util.slab = Slab,,GB,,5 +mem.util.mapped = Map,,GB,,5 + +# +# disk usage +# +disk.all.read_bytes = KBRead,,KB,,6 +disk.all.read = Reads,,,,6 +disk.all.write_bytes = KBWrite,,KB,,6 +disk.all.write = Writes,,,,6 + +# +# network usage +# +kbin = KBIn,,KB,,6 +kbin.formula = sum(network.interface.in.bytes) +pktin = PktIn,,,,6 +pktin.formula = sum(network.interface.in.packets) +kbout = KBOut,,KB,,6 +kbout.formula = sum(network.interface.out.bytes) +pktout = PktOut,,,,6 +pktout.formula = sum(network.interface.out.packets) diff --git a/roles/pcp_metrics/tasks/setup.yaml b/roles/pcp_metrics/tasks/setup.yaml index 2d2c0f7117..a73d607907 100644 --- a/roles/pcp_metrics/tasks/setup.yaml +++ b/roles/pcp_metrics/tasks/setup.yaml @@ -8,6 +8,14 @@ state: present use: ansible.builtin.dnf + - name: Copy custom pmrep metric specifications + ansible.builtin.copy: + src: pmrep-metricspec.conf + dest: /etc/pcp/pmrep/cifmw.conf + owner: root + group: root + mode: '0644' + - name: Enable pmlogger service ansible.builtin.systemd_service: name: pmlogger.service From 7437d9c2b81bfde718be602fad37a80bfcdfefc3 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Fri, 5 Sep 2025 10:20:44 +0200 Subject: [PATCH 454/480] [PCP] Add solution to plot metrics This commit adds a script to produce figures from the collected metrics files, using matplotlib and pandas libraries. Additionally the corresponding Ansible tasks to invoke the script are also provided in the `pcp_metrics` role. --- roles/pcp_metrics/defaults/main.yaml | 4 + roles/pcp_metrics/files/plot.py | 518 ++++++++++++++++++ roles/pcp_metrics/files/plot.requirements.txt | 2 + roles/pcp_metrics/tasks/main.yaml | 6 + roles/pcp_metrics/tasks/plot.yaml | 28 + 5 files changed, 558 insertions(+) create mode 100755 roles/pcp_metrics/files/plot.py create mode 100644 roles/pcp_metrics/files/plot.requirements.txt create mode 100644 roles/pcp_metrics/tasks/plot.yaml diff --git a/roles/pcp_metrics/defaults/main.yaml b/roles/pcp_metrics/defaults/main.yaml index 0d6aa3ebae..1a9b616676 100644 --- a/roles/pcp_metrics/defaults/main.yaml +++ b/roles/pcp_metrics/defaults/main.yaml @@ -2,6 +2,7 @@ # Variables to control which parts of main playbook should be executed pcp_metrics_setup: false pcp_metrics_gather: false +pcp_metrics_plot: false # Setup-related variables pcp_metrics_packages: @@ -14,3 +15,6 @@ pcp_metrics_archive: "/var/log/pcp/pmlogger/{{ ansible_nodename }}" pcp_metrics_interval: 10 # seconds pcp_metrics_metricspec: ':cifmw' pcp_metrics_output_dir: /tmp/pcp-metrics + +# Plot-related variables +pcp_metrics_venv_dir: /tmp/pcp-metrics-venv diff --git a/roles/pcp_metrics/files/plot.py b/roles/pcp_metrics/files/plot.py new file mode 100755 index 0000000000..4759187288 --- /dev/null +++ b/roles/pcp_metrics/files/plot.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python3 + +import csv +from glob import glob +from os import getenv +import os.path +import sys +from time import asctime +from typing import Iterable +from typing import Iterator + +from matplotlib import pyplot as plt +import matplotlib.dates as mdates +import matplotlib.ticker as tck +import pandas as pd + + +# +# Parameters +# +METRICS_SRC = (sys.argv[1:] if len(sys.argv) > 1 + else [getenv('METRICS_SRC', 'metrics/*.csv')]) +OUTPUT_DIR = getenv('OUTPUT_DIR', 'metrics/') + +FIG_WIDTH = int(getenv('FIG_WIDTH', 8)) +FIG_HEIGHT = int(getenv('FIG_HEIGHT', 10)) + +PLOT_OPTIONS = { + 'dpi': 300, + 'bbox_inches': 'tight', +} + + +# +# Helper functions and classes +# +class ColorCycler(Iterable): + '''Cyclic generator of values, with method to restore original state.''' + def __init__(self) -> None: + self.values = [ + 'tab:blue', + 'tab:orange', + 'tab:green', + 'tab:red', + 'tab:purple', + 'tab:brown', + 'tab:pink', + 'tab:gray', + 'tab:olive', + 'tab:cyan', + ] + self.initial = self.values.copy() + + def __iter__(self) -> Iterator[str]: + return next(self) + + def __next__(self) -> str: + value = self.values[0] + self.values = self.values[1:] + self.values[:1] + return value + + def reset(self) -> None: + self.values = self.initial.copy() + + +class MyLogFormatter(tck.LogFormatter): + '''Custom formatter for logarithmic axis. + + Displays all values as `10^{exponent}`, except for `10^0` and `10^1`, + which instead are simply displayed as `1` and `10` respectively. + ''' + def _num_to_string(self, v, vmin, vmax): + num = str(v) + exponent = len(num) - 3 + + if exponent == 0: + return '1' + elif exponent == 1: + return '10' + else: + return f'$10^{exponent}$' + + +def is_csv(path: str) -> bool: + '''Checks whether a given path specifies a CSV file. + + Parameters + ---------- + path : str + A path to check whether it points to a CSV file. + + Returns + ------- + result : bool + True when the file exists and was recognized as a valid CSV file + by the csv.Sniffer class; False otherwise. + ''' + with open(path, 'rt') as handle: + try: + csv.Sniffer().sniff(handle.read(1024)) + return True + + except Exception: + return False + + +def find_sources(*search_paths: str) -> list[str]: + '''Returns list of paths to CSV files found under specified search paths. + + Parameters + ---------- + *search_paths : str + A direct path or a glob pattern specifying where to look for CSV files. + When the path points to an existing directory, all regular files within + that directory are checked. The function accepts any number parameters. + + Returns + ------- + sources : list[str] + A sorted list of paths to discovered CSV files. + ''' + sources = [] + + for path in search_paths: + if os.path.isfile(path): + sources.append(path) + + elif os.path.isdir(path): + sources.extend(os.path.join(path, file) + for file in os.listdir(path) + if os.path.isfile(os.path.join(path, file))) + + else: # try glob expansion + sources.extend(candidate for candidate in glob(path) + if os.path.isfile(candidate)) + + # Filter-out non-csv files + sources = sorted(path for path in sources if is_csv(path)) + + if not sources: + print('ERROR No sources found in specified paths:', *search_paths) + sys.exit(1) + + return sources + + +def load_csv(path: str, source: str | None = None) -> pd.DataFrame: + '''Reads a CSV file from given path and loads into a DataFrame object. + + Parameters + ---------- + path : str + A path to CSV file to read into the DataFrame format. + source : str | None + Additional label to annotate the data (default: None). + + Returns + ------- + df : pd.DataFrame + The DataFrame object produced from the source CSV file. + ''' + df = pd.read_csv(path, delimiter=r',\s*', engine='python') + df = df.rename(columns=lambda x: x.strip('"')) # strip quotes from headers + df['Time'] = pd.to_datetime(df['Time'], format='%Y-%m-%d_%H:%M:%S') + + if source: + df.insert(1, 'source', source) + + return df + + +def draw(ax: plt.Axes, + x: Iterable[float], + y: Iterable[float], + z: Iterable[float] | None = None, + color: str | None = None, + label: str | None = None, + ) -> None: + '''Plots a line chart in the given subplot object. + + Parameters + ---------- + ax : plt.Axes + A subplot object in which the data should be plotted. + x : Iterable[float] + The values (coordinates) along X-axis to be plotted. + y : Iterable[float] + The values (coordinates) along Y-axis to be plotted; + must have the same number of elements as parameter `x`. + z : Iterable[float] + Additional values (coordinates) along Y-axis to be plotted; + must have the same number of elements as parameter `x`. + color : str | None + The color name or hex RGB string to be used in the plot, + or None for the matplotlib to automatically pick one (default: None). + label : str | None + Additional label to annotate the data in plot (default: None). + ''' + if z is not None: + ax.fill_between(x, z, color=color, alpha=0.25) + + ax.fill_between(x, y, color=color, alpha=0.25) + ax.plot(x, y, color=color, alpha=1.0, label=label) + + +def set_xaxis(axs: Iterable[plt.Axes]) -> None: + '''Configures one common X-axis for the defined subplot objects. + + Parameters + ---------- + axs : Iterable[plt.Axes] + A collection of subplot objects in which the data were plotted. + ''' + for ax in axs: + ax.label_outer() + + ax = axs[-1] # Just for clarity: all calls below refer to the last axis + + my_fmt = mdates.DateFormatter(r'%b %d, $\mathbf{%H:%M:%S}$') + ax.xaxis.set_major_formatter(my_fmt) + + my_fmt = mdates.DateFormatter(r'$%H:%M:%S$') + ax.xaxis.set_minor_formatter(my_fmt) + ax.xaxis.set_minor_locator(tck.AutoMinorLocator()) + + # NOTE: tick_params() is nicer to use, but only set_xticks() allows setting + # the horizontal alignment of labels (which is good for long labels) + for arg in ({'minor': False}, {'minor': True}): + ax.set_xticks(ax.get_xticks(**arg), ax.get_xticklabels(**arg), **arg, + size=8, rotation=45, ha='right', rotation_mode='anchor') + + +def set_yaxis(ax: plt.Axes, + ylabel: str = 'value []', + ylim_bottom: int | None = 0, + ylim_top: int | None = None, + yscale: str = 'linear', + ) -> None: + '''Configures the Y-axis in the given subplot object. + + Parameters + ---------- + ax : plt.Axes + A subplot object in which the data should be plotted. + ylabel : str + A text label to be displayed next to Y-axis (default: 'value []'). + ylim_bottom : int | None + A lower-bound value to be set on the Y-axis, + or None for the matplotlib to automatically pick one (default: 0). + ylim_top : int | None + An upper-bound value to be set on the Y-axis, + or None for the matplotlib to automatically pick one (default: None). + yscale : str + A name of matplotlib axis scale type to apply (default: 'linear'). + ''' + if yscale == 'log' and ylim_bottom == 0: + ylim_bottom = 1 + if yscale == 'log' and not ylim_top: + ymax = int(ax.get_ylim()[1]) + ylim_top = max(1000, 10 ** len(str(ymax))) + + ax.set_ylabel(ylabel) + ax.set_ylim(bottom=ylim_bottom, top=ylim_top) + ax.set_yscale(yscale) + + if yscale == 'log': + ax.yaxis.set_major_formatter(MyLogFormatter(labelOnlyBase=True)) + ax.yaxis.set_minor_formatter(tck.NullFormatter()) + ax.yaxis.set_major_locator(tck.LogLocator(base=10, numticks=10)) + ax.yaxis.set_minor_locator(tck.LogLocator(base=10, + subs=(0.25, 0.5, 0.75), + numticks=10)) + else: + ax.yaxis.set_minor_locator(tck.AutoMinorLocator()) + + ax.grid(which='both', axis='both', linewidth=0.5, linestyle='dotted') + + +def set_legend(fig: plt.Figure, axs: Iterable[plt.Axes]) -> None: + '''Configures the legend to be displayed in the figure. + + Parameters + ---------- + fig : plt.Figure + A figure object in which the legend should be displayed. + axs : Iterable[plt.Axes] + A collection of subplot objects in which the data were plotted. + ''' + handles, labels = axs[-1].get_legend_handles_labels() + if handles and labels: + fig.legend(handles, labels, + loc='outside upper center', + mode='expand', ncol=4) + + +def subplot(ax: plt.Axes, + df: pd.DataFrame, + x: str = 'Time', + y: str = 'cpu', + z: str | None = None, + loop: str | None = None, + color: str | ColorCycler | None = None, + reset: bool = False, + ylabel: str = 'value []', + ylim_bottom: int | None = 0, + ylim_top: int | None = None, + yscale: str = 'linear', + ) -> None: + '''Generates a complete chart from one or multiple data series. + + Parameters + ---------- + ax : plt.Axes + A subplot object in which the data should be plotted. + df : pd.DataFrame + The DataFrame object containing all the data to be plotted. + x : str + The column name in the DataFrame object (given as the `df` parameter) + containing the values for X-axis (default: 'Time'). + y : str + The column name or expression to extract the desired values for Y-axis + from the DataFrame object given as the `df` parameter (default: 'cpu'). + z : str | None + The column name or expression to extract the additional values + for Y-axis from the given DataFrame object (default: None). + loop : str | None + The optional column name in the Dataframe object (the `df` parameter) + which contains entries that shall be used to split the output values + (from `y` parameter) and draw as the separate labelled data series; + if not specified, a single data series is assumed (default: None). + color : str | ColorCycler | None + The color name or hex RGB string to be used in the plot, + or the instance of ColorCycler specifying set of color names, + or None for the default ColorCycler (default: None). + reset : bool + Sets whether to reset the ColorCycler instance between subplots, + ignored if given `color` parameter is not ColorCycler (default: False). + ylabel : str + A text label to be displayed next to Y-axis (default: 'value []'). + ylim_bottom : int | None + A lower-bound value to be set on the Y-axis, + or None for the matplotlib to automatically pick one (default: 0). + ylim_top : int | None + An upper-bound value to be set on the Y-axis, + or None for the matplotlib to automatically pick one (default: None). + yscale : str + A name of matplotlib axis scale type to apply (default: 'linear'). + ''' + if reset and isinstance(color, ColorCycler): + color.reset() + + if yscale == 'log': + y += '+ 0.001' # ensure non-zero values + + if loop: + for item in df[loop].unique(): + c = next(color) if isinstance(color, ColorCycler) else color + draw(ax=ax, + x=df.query(f'{loop} == "{item}"')[x], + y=df.query(f'{loop} == "{item}"').eval(y), + z=df.query(f'{loop} == "{item}"').eval(z) if z else None, + color=c, + label=item) + else: + draw(ax=ax, + x=df[x], + y=df.eval(y), + z=df.eval(z) if z else None, + color=next(color) if isinstance(color, ColorCycler) else color) + + set_yaxis(ax=ax, + ylabel=ylabel, + ylim_bottom=ylim_bottom, + ylim_top=ylim_top, + yscale=yscale) + + +def plot(df: pd.DataFrame, + output: str, + title: str | None = None, + loop: str | None = None, + color: str | ColorCycler | None = None, + reset: bool = False, + ) -> None: + '''Produces the figure and saves it as PDF file under a given output path. + + Parameters + ---------- + df : pd.DataFrame + The DataFrame object containing all the data to be plotted. + output : str + The path where the generated plot should be saved as PDF file. + title : str | None + The text to be displayed on top of the produced figure (default: None). + loop : str | None + The optional column name in the Dataframe object (the `df` parameter) + which contains entries that shall be used to split the output values + (from `y` parameter) and draw as the separate labelled data series; + if not specified, a single data series is assumed (default: None). + color : str | ColorCycler | None + The color name or hex RGB string to be used in the plot, + or the instance of ColorCycler specifying set of color names, + or None for the default ColorCycler (default: None). + reset : bool + Sets whether to reset the ColorCycler instance between subplots, + ignored if given `color` parameter is not ColorCycler (default: False). + ''' + plt.rcdefaults() + + fig, axs = plt.subplots(nrows=6, sharex=True, layout='constrained') + + if not color: + color = ColorCycler() + + if title: + fig.suptitle(title, fontsize=16) + + # NOTE: the argument of Pandas query() & eval() is expected to be a valid + # Python expression, which does not allow any special characters + # (e.g. dots); the backticks can be used to specify column names + # with such characters, so below instead of 'mem.used' we need + # to specify '`mem.used`' for some of the `y` parameters. + subplot(axs[0], df, + y='cpu + sys', + z='sys', + loop=loop, + color=color, + reset=reset, + ylabel='CPU [%]', + ylim_top=100) + + subplot(axs[1], df, + y='100 * (1 - `mem.freemem` / `mem.physmem`)', + z='100 * (1 - `mem.util.available` / `mem.physmem`)', + loop=loop, + color=color, + reset=reset, + ylabel='RAM [%]', + ylim_top=100) + + subplot(axs[2], df, + y='`disk.all.read_bytes`', + loop=loop, + color=color, + reset=reset, + ylabel='Read [kB/s]', + ylim_top=10**6, + yscale='log') + + subplot(axs[3], df, + y='`disk.all.write_bytes`', + loop=loop, + color=color, + reset=reset, + ylabel='Write [kB/s]', + ylim_top=10**6, + yscale='log') + + subplot(axs[4], df, + y='kbin', + loop=loop, + color=color, + reset=reset, + ylabel='Net in [kB/s]', + ylim_top=10**6, + yscale='log') + + subplot(axs[5], df, + y='kbout', + loop=loop, + color=color, + reset=reset, + ylabel='Net out [kB/s]', + ylim_top=10**6, + yscale='log') + + set_xaxis(axs) + set_legend(fig, axs) + + fig.set_figwidth(FIG_WIDTH) + fig.set_figheight(FIG_HEIGHT) + fig.savefig(output, format='pdf', **PLOT_OPTIONS) + + +# +# Main section +# +if __name__ == '__main__': + try: + paths = find_sources(*METRICS_SRC) + dfs = [] + + for path in paths: + print(asctime(), 'Loading:', path) + hostname = os.path.splitext(os.path.basename(path))[0] + dfs.append(load_csv(path, hostname)) + + df = pd.concat(dfs) + del dfs + + for hostname in sorted(df['source'].unique()): + path = os.path.join(OUTPUT_DIR, f'{hostname}.pdf') + print(asctime(), 'Generating:', path) + plot(df.query(f'source == "{hostname}"'), + output=path, + title=hostname) + + path = os.path.join(OUTPUT_DIR, 'all.pdf') + print(asctime(), 'Generating:', path) + plot(df, + output=path, + loop='source', + reset=True) + + print(asctime(), 'Done!') + + except KeyboardInterrupt: + print(flush=True) diff --git a/roles/pcp_metrics/files/plot.requirements.txt b/roles/pcp_metrics/files/plot.requirements.txt new file mode 100644 index 0000000000..babdd14a51 --- /dev/null +++ b/roles/pcp_metrics/files/plot.requirements.txt @@ -0,0 +1,2 @@ +matplotlib +pandas diff --git a/roles/pcp_metrics/tasks/main.yaml b/roles/pcp_metrics/tasks/main.yaml index 5082e2001f..f23ef4fd4c 100644 --- a/roles/pcp_metrics/tasks/main.yaml +++ b/roles/pcp_metrics/tasks/main.yaml @@ -10,3 +10,9 @@ name: "{{ role_name }}" tasks_from: gather when: pcp_metrics_gather | bool + +- name: Generate figures + ansible.builtin.include_role: + name: "{{ role_name }}" + tasks_from: plot + when: pcp_metrics_plot | bool diff --git a/roles/pcp_metrics/tasks/plot.yaml b/roles/pcp_metrics/tasks/plot.yaml new file mode 100644 index 0000000000..8ac4f4986b --- /dev/null +++ b/roles/pcp_metrics/tasks/plot.yaml @@ -0,0 +1,28 @@ +--- +- name: Ensure required system package + become: true + ansible.builtin.package: + name: python3-pip + state: present + +- name: Install dependencies in virtualenv + ansible.builtin.pip: + virtualenv_command: python3 -m venv + virtualenv: "{{ pcp_metrics_venv_dir }}" + requirements: "{{ role_path }}/files/plot.requirements.txt" + +- name: Copy plot script + ansible.builtin.copy: + src: plot.py + dest: "{{ pcp_metrics_venv_dir }}/plot.py" + mode: '0755' + +- name: Draw the figures + ansible.builtin.shell: >- + . "{{ pcp_metrics_venv_dir }}/bin/activate" + && export OUTPUT_DIR="{{ pcp_metrics_output_dir }}" + && python3 "{{ pcp_metrics_venv_dir }}/plot.py" "{{ pcp_metrics_output_dir }}/*.csv" + register: _pcp_shell + failed_when: + - _pcp_shell.rc != 0 + - '"ERROR No sources found in specified paths" not in _pcp_shell.stdout' From d80c49f342e28833b1c2be9663990bdd88db7a01 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Tue, 16 Sep 2025 22:19:39 +0200 Subject: [PATCH 455/480] [PCP] Make plot script compatible with Python 3.9 The `type | None` notation is possible from Python 3.10; for Python 3.9 it is necessary to either use `Optional[]` or `Union[]` specifier in typing. --- roles/pcp_metrics/files/plot.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/roles/pcp_metrics/files/plot.py b/roles/pcp_metrics/files/plot.py index 4759187288..5861a3d253 100755 --- a/roles/pcp_metrics/files/plot.py +++ b/roles/pcp_metrics/files/plot.py @@ -8,6 +8,7 @@ from time import asctime from typing import Iterable from typing import Iterator +from typing import Union from matplotlib import pyplot as plt import matplotlib.dates as mdates @@ -144,7 +145,7 @@ def find_sources(*search_paths: str) -> list[str]: return sources -def load_csv(path: str, source: str | None = None) -> pd.DataFrame: +def load_csv(path: str, source: Union[str, None] = None) -> pd.DataFrame: '''Reads a CSV file from given path and loads into a DataFrame object. Parameters @@ -172,9 +173,9 @@ def load_csv(path: str, source: str | None = None) -> pd.DataFrame: def draw(ax: plt.Axes, x: Iterable[float], y: Iterable[float], - z: Iterable[float] | None = None, - color: str | None = None, - label: str | None = None, + z: Union[Iterable[float], None] = None, + color: Union[str, None] = None, + label: Union[str, None] = None, ) -> None: '''Plots a line chart in the given subplot object. @@ -232,8 +233,8 @@ def set_xaxis(axs: Iterable[plt.Axes]) -> None: def set_yaxis(ax: plt.Axes, ylabel: str = 'value []', - ylim_bottom: int | None = 0, - ylim_top: int | None = None, + ylim_bottom: Union[int, None] = 0, + ylim_top: Union[int, None] = None, yscale: str = 'linear', ) -> None: '''Configures the Y-axis in the given subplot object. @@ -297,13 +298,13 @@ def subplot(ax: plt.Axes, df: pd.DataFrame, x: str = 'Time', y: str = 'cpu', - z: str | None = None, - loop: str | None = None, - color: str | ColorCycler | None = None, + z: Union[str, None] = None, + loop: Union[str, None] = None, + color: Union[str, ColorCycler, None] = None, reset: bool = False, ylabel: str = 'value []', - ylim_bottom: int | None = 0, - ylim_top: int | None = None, + ylim_bottom: Union[int, None] = 0, + ylim_top: Union[int, None] = None, yscale: str = 'linear', ) -> None: '''Generates a complete chart from one or multiple data series. @@ -377,9 +378,9 @@ def subplot(ax: plt.Axes, def plot(df: pd.DataFrame, output: str, - title: str | None = None, - loop: str | None = None, - color: str | ColorCycler | None = None, + title: Union[str, None] = None, + loop: Union[str, None] = None, + color: Union[str, ColorCycler, None] = None, reset: bool = False, ) -> None: '''Produces the figure and saves it as PDF file under a given output path. From 6ad7ddf09eb7b761128e924148f0ea23afcab68c Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Fri, 5 Sep 2025 14:27:32 +0200 Subject: [PATCH 456/480] [PCP] Add hooks to enable the metrics collection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Those playbooks are intented entry-points for the prepared `pcp_metrics` role – to be used in CI jobs e.g. at post_infra and pre_logs stages. --- hooks/playbooks/pcp-metrics-post.yml | 31 ++++++++++++++++++++++++++++ hooks/playbooks/pcp-metrics-pre.yml | 24 +++++++++++++++++++++ roles/pcp_metrics/README.md | 4 ++++ 3 files changed, 59 insertions(+) create mode 100644 hooks/playbooks/pcp-metrics-post.yml create mode 100644 hooks/playbooks/pcp-metrics-pre.yml diff --git a/hooks/playbooks/pcp-metrics-post.yml b/hooks/playbooks/pcp-metrics-post.yml new file mode 100644 index 0000000000..a04504715a --- /dev/null +++ b/hooks/playbooks/pcp-metrics-post.yml @@ -0,0 +1,31 @@ +--- +# +# Playbook that utilizes the Performance Co-Pilot toolkit +# to collect system metrics and generate figures for analysis. +# Relies on the corresponding `pcp_metrics` Ansible role. +# +# The best place to call this hook is under post_tests actions. +# +- name: Collect performance metrics + hosts: all,!localhost + gather_facts: false + tasks: + - name: Gather metrics + ansible.builtin.include_role: + name: pcp_metrics + tasks_from: gather + + +- name: Process metrics + hosts: localhost + tasks: + - name: Generate figures + ansible.builtin.include_role: + name: pcp_metrics + tasks_from: plot + + - name: Copy results to ci-framework-data + ansible.builtin.copy: + src: /tmp/pcp-metrics/ + dest: "{{ ansible_user_dir }}/ci-framework-data/logs/metrics" + mode: preserve diff --git a/hooks/playbooks/pcp-metrics-pre.yml b/hooks/playbooks/pcp-metrics-pre.yml new file mode 100644 index 0000000000..44e4770df5 --- /dev/null +++ b/hooks/playbooks/pcp-metrics-pre.yml @@ -0,0 +1,24 @@ +--- +# +# Playbook that setups the Performance Co-Pilot toolkit on infra. +# Relies on the corresponding `pcp_metrics` Ansible role. +# +# The best place to call this hook is under post_infra actions. +# +- name: Patch CoreOS + hosts: crc,ocps,ocp_workers + gather_facts: false + tasks: + - name: Patch CoreOS + ansible.builtin.include_role: + name: pcp_metrics + tasks_from: coreos + +- name: Start collecting performance metrics + hosts: all,!localhost + gather_facts: false + tasks: + - name: Setup PCP + ansible.builtin.include_role: + name: pcp_metrics + tasks_from: setup diff --git a/roles/pcp_metrics/README.md b/roles/pcp_metrics/README.md index 38290b6c95..4c7fd830b5 100644 --- a/roles/pcp_metrics/README.md +++ b/roles/pcp_metrics/README.md @@ -11,6 +11,10 @@ which is an AI coding assistant – and not a concern of this role here. Usage ----- +Please check the hooks provided in this repository for real-world examples: +- [pcp-metrics-pre.yml](/hooks/playbooks/pcp-metrics-pre.yml) +- [pcp-metrics-post.yml](/hooks/playbooks/pcp-metrics-post.yml) + To setup and enable PCP on the target host, include the role with setup tasks: ``` From de1c012da80ff24c50cccb24326213bc98f99e59 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Wed, 17 Sep 2025 22:42:03 +0200 Subject: [PATCH 457/480] [PCP] Add annotations to plots This commit introduces a feature that explores the Ansible log files from within the ci-framework-data directory and annotates the plots generated from PCP metrics with details, such as when some EDPM stages were executed and at what time there were testing frameworks launched. --- hooks/playbooks/pcp-metrics-post.yml | 5 +++ roles/pcp_metrics/files/plot.py | 39 ++++++++++++++++++++++++ roles/pcp_metrics/tasks/annotations.yaml | 32 +++++++++++++++++++ roles/pcp_metrics/tasks/plot.yaml | 1 + 4 files changed, 77 insertions(+) create mode 100644 roles/pcp_metrics/tasks/annotations.yaml diff --git a/hooks/playbooks/pcp-metrics-post.yml b/hooks/playbooks/pcp-metrics-post.yml index a04504715a..5423fa4062 100644 --- a/hooks/playbooks/pcp-metrics-post.yml +++ b/hooks/playbooks/pcp-metrics-post.yml @@ -19,6 +19,11 @@ - name: Process metrics hosts: localhost tasks: + - name: Gather annotations + ansible.builtin.include_role: + name: pcp_metrics + tasks_from: annotations + - name: Generate figures ansible.builtin.include_role: name: pcp_metrics diff --git a/roles/pcp_metrics/files/plot.py b/roles/pcp_metrics/files/plot.py index 5861a3d253..8fe47d8369 100755 --- a/roles/pcp_metrics/files/plot.py +++ b/roles/pcp_metrics/files/plot.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import csv +from datetime import datetime from glob import glob from os import getenv import os.path @@ -19,6 +20,7 @@ # # Parameters # +ANNOTATIONS_FILE = getenv('ANNOTATIONS_FILE', 'metrics/annotations.txt') METRICS_SRC = (sys.argv[1:] if len(sys.argv) > 1 else [getenv('METRICS_SRC', 'metrics/*.csv')]) OUTPUT_DIR = getenv('OUTPUT_DIR', 'metrics/') @@ -376,6 +378,41 @@ def subplot(ax: plt.Axes, yscale=yscale) +def annotate(axs: Iterable[plt.Axes]) -> None: + '''Draws vertical annotation lines on the interesting time marks. + + Parameters + ---------- + axs : Iterable[plt.Axes] + A collection of subplot objects in which the data were plotted. + ''' + if not os.path.isfile(ANNOTATIONS_FILE): + print('WARNING No annotations dafa found in file:', ANNOTATIONS_FILE) + return + + with open(ANNOTATIONS_FILE) as file: + data = file.read().strip().split('\n') + + for annotation in data: + time, details = annotation.split(' | ', maxsplit=1) + time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S,%f') + + if details.startswith('PLAY'): + color = 'darkred' + + elif details.startswith('TASK [kustomize_deploy'): + color = 'navy' + + elif details.startswith('TASK [test_operator'): + color = 'darkgreen' + + else: # generic + color = 'grey' + + for ax in axs: + ax.axvline(time, color=color, ls='--', alpha=0.5) + + def plot(df: pd.DataFrame, output: str, title: Union[str, None] = None, @@ -416,6 +453,8 @@ def plot(df: pd.DataFrame, if title: fig.suptitle(title, fontsize=16) + annotate(axs) + # NOTE: the argument of Pandas query() & eval() is expected to be a valid # Python expression, which does not allow any special characters # (e.g. dots); the backticks can be used to specify column names diff --git a/roles/pcp_metrics/tasks/annotations.yaml b/roles/pcp_metrics/tasks/annotations.yaml new file mode 100644 index 0000000000..fb99f2db28 --- /dev/null +++ b/roles/pcp_metrics/tasks/annotations.yaml @@ -0,0 +1,32 @@ +--- +- name: Extract annotations + ansible.builtin.shell: >- + grep + --no-filename + --regexp 'PLAY \[' + --regexp 'TASK \[kustomize_deploy : Apply generated content for' + --regexp 'TASK \[test_operator : Run' + $( find "{{ ansible_user_dir }}" -iname 'ansible*.log' 2>/dev/null ) + | sed + --regexp-extended + --expression 's#p=[0-9]+ u=[^ ]+ n=[^ ]+ |##g' + --expression 's# (_raw_params|msg|chdir)=.*#]#g' + --expression 's#[ *]*$##g' + | sort + --numeric-sort + --key=1,2 + | uniq + ignore_errors: true + changed_when: false + register: _annotations_shell + +- name: Ensure the output directory exist + file: + path: "{{ pcp_metrics_output_dir }}" + state: directory + mode: '0755' + +- name: Save annotations + copy: + content: "{{ _annotations_shell.stdout }}" + dest: "{{ pcp_metrics_output_dir }}/annotations.txt" diff --git a/roles/pcp_metrics/tasks/plot.yaml b/roles/pcp_metrics/tasks/plot.yaml index 8ac4f4986b..52a81f1086 100644 --- a/roles/pcp_metrics/tasks/plot.yaml +++ b/roles/pcp_metrics/tasks/plot.yaml @@ -21,6 +21,7 @@ ansible.builtin.shell: >- . "{{ pcp_metrics_venv_dir }}/bin/activate" && export OUTPUT_DIR="{{ pcp_metrics_output_dir }}" + && export ANNOTATIONS_FILE="{{ pcp_metrics_output_dir }}/annotations.txt" && python3 "{{ pcp_metrics_venv_dir }}/plot.py" "{{ pcp_metrics_output_dir }}/*.csv" register: _pcp_shell failed_when: From 4f17aab626c738747a708993719905f3d6a6eff4 Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Mon, 29 Sep 2025 10:43:43 +0200 Subject: [PATCH 458/480] [PCP] Figure auto-width feature For long-running jobs, this commit causes the produced plots to be wider, so the performance statistics are always properly visible for analysis. --- roles/pcp_metrics/files/plot.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/pcp_metrics/files/plot.py b/roles/pcp_metrics/files/plot.py index 8fe47d8369..dc6fbe92e9 100755 --- a/roles/pcp_metrics/files/plot.py +++ b/roles/pcp_metrics/files/plot.py @@ -517,7 +517,9 @@ def plot(df: pd.DataFrame, set_xaxis(axs) set_legend(fig, axs) - fig.set_figwidth(FIG_WIDTH) + fig.set_figwidth(max(FIG_WIDTH, + 2 * df['Time'].agg(['min', 'max']).diff().dropna() + .iloc[0].ceil('h').components.hours)) fig.set_figheight(FIG_HEIGHT) fig.savefig(output, format='pdf', **PLOT_OPTIONS) From 8c75c503a54c0c8ed8b9f393d4e40f664e0e81fb Mon Sep 17 00:00:00 2001 From: Szymon Datko Date: Wed, 29 Oct 2025 21:27:40 +0100 Subject: [PATCH 459/480] [minor] Paint it black I see the red job and I want it painted black; no linter errors anymore, I want pre-commit pass! (i.e. the linter knows better, so this commit is just to satisfy it) --- roles/pcp_metrics/files/plot.py | 462 +++++++++++++++++--------------- 1 file changed, 251 insertions(+), 211 deletions(-) diff --git a/roles/pcp_metrics/files/plot.py b/roles/pcp_metrics/files/plot.py index dc6fbe92e9..39cb1780f1 100755 --- a/roles/pcp_metrics/files/plot.py +++ b/roles/pcp_metrics/files/plot.py @@ -20,17 +20,18 @@ # # Parameters # -ANNOTATIONS_FILE = getenv('ANNOTATIONS_FILE', 'metrics/annotations.txt') -METRICS_SRC = (sys.argv[1:] if len(sys.argv) > 1 - else [getenv('METRICS_SRC', 'metrics/*.csv')]) -OUTPUT_DIR = getenv('OUTPUT_DIR', 'metrics/') +ANNOTATIONS_FILE = getenv("ANNOTATIONS_FILE", "metrics/annotations.txt") +METRICS_SRC = ( + sys.argv[1:] if len(sys.argv) > 1 else [getenv("METRICS_SRC", "metrics/*.csv")] +) +OUTPUT_DIR = getenv("OUTPUT_DIR", "metrics/") -FIG_WIDTH = int(getenv('FIG_WIDTH', 8)) -FIG_HEIGHT = int(getenv('FIG_HEIGHT', 10)) +FIG_WIDTH = int(getenv("FIG_WIDTH", 8)) +FIG_HEIGHT = int(getenv("FIG_HEIGHT", 10)) PLOT_OPTIONS = { - 'dpi': 300, - 'bbox_inches': 'tight', + "dpi": 300, + "bbox_inches": "tight", } @@ -38,19 +39,20 @@ # Helper functions and classes # class ColorCycler(Iterable): - '''Cyclic generator of values, with method to restore original state.''' + """Cyclic generator of values, with method to restore original state.""" + def __init__(self) -> None: self.values = [ - 'tab:blue', - 'tab:orange', - 'tab:green', - 'tab:red', - 'tab:purple', - 'tab:brown', - 'tab:pink', - 'tab:gray', - 'tab:olive', - 'tab:cyan', + "tab:blue", + "tab:orange", + "tab:green", + "tab:red", + "tab:purple", + "tab:brown", + "tab:pink", + "tab:gray", + "tab:olive", + "tab:cyan", ] self.initial = self.values.copy() @@ -67,25 +69,26 @@ def reset(self) -> None: class MyLogFormatter(tck.LogFormatter): - '''Custom formatter for logarithmic axis. + """Custom formatter for logarithmic axis. Displays all values as `10^{exponent}`, except for `10^0` and `10^1`, which instead are simply displayed as `1` and `10` respectively. - ''' + """ + def _num_to_string(self, v, vmin, vmax): num = str(v) exponent = len(num) - 3 if exponent == 0: - return '1' + return "1" elif exponent == 1: - return '10' + return "10" else: - return f'$10^{exponent}$' + return f"$10^{exponent}$" def is_csv(path: str) -> bool: - '''Checks whether a given path specifies a CSV file. + """Checks whether a given path specifies a CSV file. Parameters ---------- @@ -97,8 +100,8 @@ def is_csv(path: str) -> bool: result : bool True when the file exists and was recognized as a valid CSV file by the csv.Sniffer class; False otherwise. - ''' - with open(path, 'rt') as handle: + """ + with open(path, "rt") as handle: try: csv.Sniffer().sniff(handle.read(1024)) return True @@ -108,7 +111,7 @@ def is_csv(path: str) -> bool: def find_sources(*search_paths: str) -> list[str]: - '''Returns list of paths to CSV files found under specified search paths. + """Returns list of paths to CSV files found under specified search paths. Parameters ---------- @@ -121,7 +124,7 @@ def find_sources(*search_paths: str) -> list[str]: ------- sources : list[str] A sorted list of paths to discovered CSV files. - ''' + """ sources = [] for path in search_paths: @@ -129,26 +132,29 @@ def find_sources(*search_paths: str) -> list[str]: sources.append(path) elif os.path.isdir(path): - sources.extend(os.path.join(path, file) - for file in os.listdir(path) - if os.path.isfile(os.path.join(path, file))) + sources.extend( + os.path.join(path, file) + for file in os.listdir(path) + if os.path.isfile(os.path.join(path, file)) + ) else: # try glob expansion - sources.extend(candidate for candidate in glob(path) - if os.path.isfile(candidate)) + sources.extend( + candidate for candidate in glob(path) if os.path.isfile(candidate) + ) # Filter-out non-csv files sources = sorted(path for path in sources if is_csv(path)) if not sources: - print('ERROR No sources found in specified paths:', *search_paths) + print("ERROR No sources found in specified paths:", *search_paths) sys.exit(1) return sources def load_csv(path: str, source: Union[str, None] = None) -> pd.DataFrame: - '''Reads a CSV file from given path and loads into a DataFrame object. + """Reads a CSV file from given path and loads into a DataFrame object. Parameters ---------- @@ -161,25 +167,26 @@ def load_csv(path: str, source: Union[str, None] = None) -> pd.DataFrame: ------- df : pd.DataFrame The DataFrame object produced from the source CSV file. - ''' - df = pd.read_csv(path, delimiter=r',\s*', engine='python') + """ + df = pd.read_csv(path, delimiter=r",\s*", engine="python") df = df.rename(columns=lambda x: x.strip('"')) # strip quotes from headers - df['Time'] = pd.to_datetime(df['Time'], format='%Y-%m-%d_%H:%M:%S') + df["Time"] = pd.to_datetime(df["Time"], format="%Y-%m-%d_%H:%M:%S") if source: - df.insert(1, 'source', source) + df.insert(1, "source", source) return df -def draw(ax: plt.Axes, - x: Iterable[float], - y: Iterable[float], - z: Union[Iterable[float], None] = None, - color: Union[str, None] = None, - label: Union[str, None] = None, - ) -> None: - '''Plots a line chart in the given subplot object. +def draw( + ax: plt.Axes, + x: Iterable[float], + y: Iterable[float], + z: Union[Iterable[float], None] = None, + color: Union[str, None] = None, + label: Union[str, None] = None, +) -> None: + """Plots a line chart in the given subplot object. Parameters ---------- @@ -198,7 +205,7 @@ def draw(ax: plt.Axes, or None for the matplotlib to automatically pick one (default: None). label : str | None Additional label to annotate the data in plot (default: None). - ''' + """ if z is not None: ax.fill_between(x, z, color=color, alpha=0.25) @@ -207,39 +214,47 @@ def draw(ax: plt.Axes, def set_xaxis(axs: Iterable[plt.Axes]) -> None: - '''Configures one common X-axis for the defined subplot objects. + """Configures one common X-axis for the defined subplot objects. Parameters ---------- axs : Iterable[plt.Axes] A collection of subplot objects in which the data were plotted. - ''' + """ for ax in axs: ax.label_outer() ax = axs[-1] # Just for clarity: all calls below refer to the last axis - my_fmt = mdates.DateFormatter(r'%b %d, $\mathbf{%H:%M:%S}$') + my_fmt = mdates.DateFormatter(r"%b %d, $\mathbf{%H:%M:%S}$") ax.xaxis.set_major_formatter(my_fmt) - my_fmt = mdates.DateFormatter(r'$%H:%M:%S$') + my_fmt = mdates.DateFormatter(r"$%H:%M:%S$") ax.xaxis.set_minor_formatter(my_fmt) ax.xaxis.set_minor_locator(tck.AutoMinorLocator()) # NOTE: tick_params() is nicer to use, but only set_xticks() allows setting # the horizontal alignment of labels (which is good for long labels) - for arg in ({'minor': False}, {'minor': True}): - ax.set_xticks(ax.get_xticks(**arg), ax.get_xticklabels(**arg), **arg, - size=8, rotation=45, ha='right', rotation_mode='anchor') - - -def set_yaxis(ax: plt.Axes, - ylabel: str = 'value []', - ylim_bottom: Union[int, None] = 0, - ylim_top: Union[int, None] = None, - yscale: str = 'linear', - ) -> None: - '''Configures the Y-axis in the given subplot object. + for arg in ({"minor": False}, {"minor": True}): + ax.set_xticks( + ax.get_xticks(**arg), + ax.get_xticklabels(**arg), + **arg, + size=8, + rotation=45, + ha="right", + rotation_mode="anchor", + ) + + +def set_yaxis( + ax: plt.Axes, + ylabel: str = "value []", + ylim_bottom: Union[int, None] = 0, + ylim_top: Union[int, None] = None, + yscale: str = "linear", +) -> None: + """Configures the Y-axis in the given subplot object. Parameters ---------- @@ -255,10 +270,10 @@ def set_yaxis(ax: plt.Axes, or None for the matplotlib to automatically pick one (default: None). yscale : str A name of matplotlib axis scale type to apply (default: 'linear'). - ''' - if yscale == 'log' and ylim_bottom == 0: + """ + if yscale == "log" and ylim_bottom == 0: ylim_bottom = 1 - if yscale == 'log' and not ylim_top: + if yscale == "log" and not ylim_top: ymax = int(ax.get_ylim()[1]) ylim_top = max(1000, 10 ** len(str(ymax))) @@ -266,21 +281,21 @@ def set_yaxis(ax: plt.Axes, ax.set_ylim(bottom=ylim_bottom, top=ylim_top) ax.set_yscale(yscale) - if yscale == 'log': + if yscale == "log": ax.yaxis.set_major_formatter(MyLogFormatter(labelOnlyBase=True)) ax.yaxis.set_minor_formatter(tck.NullFormatter()) ax.yaxis.set_major_locator(tck.LogLocator(base=10, numticks=10)) - ax.yaxis.set_minor_locator(tck.LogLocator(base=10, - subs=(0.25, 0.5, 0.75), - numticks=10)) + ax.yaxis.set_minor_locator( + tck.LogLocator(base=10, subs=(0.25, 0.5, 0.75), numticks=10) + ) else: ax.yaxis.set_minor_locator(tck.AutoMinorLocator()) - ax.grid(which='both', axis='both', linewidth=0.5, linestyle='dotted') + ax.grid(which="both", axis="both", linewidth=0.5, linestyle="dotted") def set_legend(fig: plt.Figure, axs: Iterable[plt.Axes]) -> None: - '''Configures the legend to be displayed in the figure. + """Configures the legend to be displayed in the figure. Parameters ---------- @@ -288,28 +303,27 @@ def set_legend(fig: plt.Figure, axs: Iterable[plt.Axes]) -> None: A figure object in which the legend should be displayed. axs : Iterable[plt.Axes] A collection of subplot objects in which the data were plotted. - ''' + """ handles, labels = axs[-1].get_legend_handles_labels() if handles and labels: - fig.legend(handles, labels, - loc='outside upper center', - mode='expand', ncol=4) - - -def subplot(ax: plt.Axes, - df: pd.DataFrame, - x: str = 'Time', - y: str = 'cpu', - z: Union[str, None] = None, - loop: Union[str, None] = None, - color: Union[str, ColorCycler, None] = None, - reset: bool = False, - ylabel: str = 'value []', - ylim_bottom: Union[int, None] = 0, - ylim_top: Union[int, None] = None, - yscale: str = 'linear', - ) -> None: - '''Generates a complete chart from one or multiple data series. + fig.legend(handles, labels, loc="outside upper center", mode="expand", ncol=4) + + +def subplot( + ax: plt.Axes, + df: pd.DataFrame, + x: str = "Time", + y: str = "cpu", + z: Union[str, None] = None, + loop: Union[str, None] = None, + color: Union[str, ColorCycler, None] = None, + reset: bool = False, + ylabel: str = "value []", + ylim_bottom: Union[int, None] = 0, + ylim_top: Union[int, None] = None, + yscale: str = "linear", +) -> None: + """Generates a complete chart from one or multiple data series. Parameters ---------- @@ -348,79 +362,82 @@ def subplot(ax: plt.Axes, or None for the matplotlib to automatically pick one (default: None). yscale : str A name of matplotlib axis scale type to apply (default: 'linear'). - ''' + """ if reset and isinstance(color, ColorCycler): color.reset() - if yscale == 'log': - y += '+ 0.001' # ensure non-zero values + if yscale == "log": + y += "+ 0.001" # ensure non-zero values if loop: for item in df[loop].unique(): c = next(color) if isinstance(color, ColorCycler) else color - draw(ax=ax, - x=df.query(f'{loop} == "{item}"')[x], - y=df.query(f'{loop} == "{item}"').eval(y), - z=df.query(f'{loop} == "{item}"').eval(z) if z else None, - color=c, - label=item) + draw( + ax=ax, + x=df.query(f'{loop} == "{item}"')[x], + y=df.query(f'{loop} == "{item}"').eval(y), + z=df.query(f'{loop} == "{item}"').eval(z) if z else None, + color=c, + label=item, + ) else: - draw(ax=ax, - x=df[x], - y=df.eval(y), - z=df.eval(z) if z else None, - color=next(color) if isinstance(color, ColorCycler) else color) + draw( + ax=ax, + x=df[x], + y=df.eval(y), + z=df.eval(z) if z else None, + color=next(color) if isinstance(color, ColorCycler) else color, + ) - set_yaxis(ax=ax, - ylabel=ylabel, - ylim_bottom=ylim_bottom, - ylim_top=ylim_top, - yscale=yscale) + set_yaxis( + ax=ax, ylabel=ylabel, ylim_bottom=ylim_bottom, ylim_top=ylim_top, yscale=yscale + ) def annotate(axs: Iterable[plt.Axes]) -> None: - '''Draws vertical annotation lines on the interesting time marks. + """Draws vertical annotation lines on the interesting time marks. Parameters ---------- axs : Iterable[plt.Axes] A collection of subplot objects in which the data were plotted. - ''' + """ if not os.path.isfile(ANNOTATIONS_FILE): - print('WARNING No annotations dafa found in file:', ANNOTATIONS_FILE) + print("WARNING No annotations dafa found in file:", ANNOTATIONS_FILE) return with open(ANNOTATIONS_FILE) as file: - data = file.read().strip().split('\n') + data = file.read().strip().split("\n") for annotation in data: - time, details = annotation.split(' | ', maxsplit=1) - time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S,%f') + time, details = annotation.split(" | ", maxsplit=1) + time = datetime.strptime(time, "%Y-%m-%d %H:%M:%S,%f") - if details.startswith('PLAY'): - color = 'darkred' + if details.startswith("PLAY"): + color = "darkred" - elif details.startswith('TASK [kustomize_deploy'): - color = 'navy' + elif details.startswith("TASK [kustomize_deploy"): + color = "navy" - elif details.startswith('TASK [test_operator'): - color = 'darkgreen' + elif details.startswith("TASK [test_operator"): + color = "darkgreen" else: # generic - color = 'grey' + color = "grey" for ax in axs: - ax.axvline(time, color=color, ls='--', alpha=0.5) + ax.axvline(time, color=color, ls="--", alpha=0.5) -def plot(df: pd.DataFrame, - output: str, - title: Union[str, None] = None, - loop: Union[str, None] = None, - color: Union[str, ColorCycler, None] = None, - reset: bool = False, - ) -> None: - '''Produces the figure and saves it as PDF file under a given output path. +def plot( + df: pd.DataFrame, + output: str, + title: Union[str, None] = None, + loop: Union[str, None] = None, + color: Union[str, ColorCycler, None] = None, + reset: bool = False, +) -> None: + """Produces the figure and saves it as PDF file under a given output path. Parameters ---------- @@ -442,10 +459,10 @@ def plot(df: pd.DataFrame, reset : bool Sets whether to reset the ColorCycler instance between subplots, ignored if given `color` parameter is not ColorCycler (default: False). - ''' + """ plt.rcdefaults() - fig, axs = plt.subplots(nrows=6, sharex=True, layout='constrained') + fig, axs = plt.subplots(nrows=6, sharex=True, layout="constrained") if not color: color = ColorCycler() @@ -460,101 +477,124 @@ def plot(df: pd.DataFrame, # (e.g. dots); the backticks can be used to specify column names # with such characters, so below instead of 'mem.used' we need # to specify '`mem.used`' for some of the `y` parameters. - subplot(axs[0], df, - y='cpu + sys', - z='sys', - loop=loop, - color=color, - reset=reset, - ylabel='CPU [%]', - ylim_top=100) - - subplot(axs[1], df, - y='100 * (1 - `mem.freemem` / `mem.physmem`)', - z='100 * (1 - `mem.util.available` / `mem.physmem`)', - loop=loop, - color=color, - reset=reset, - ylabel='RAM [%]', - ylim_top=100) - - subplot(axs[2], df, - y='`disk.all.read_bytes`', - loop=loop, - color=color, - reset=reset, - ylabel='Read [kB/s]', - ylim_top=10**6, - yscale='log') - - subplot(axs[3], df, - y='`disk.all.write_bytes`', - loop=loop, - color=color, - reset=reset, - ylabel='Write [kB/s]', - ylim_top=10**6, - yscale='log') - - subplot(axs[4], df, - y='kbin', - loop=loop, - color=color, - reset=reset, - ylabel='Net in [kB/s]', - ylim_top=10**6, - yscale='log') - - subplot(axs[5], df, - y='kbout', - loop=loop, - color=color, - reset=reset, - ylabel='Net out [kB/s]', - ylim_top=10**6, - yscale='log') + subplot( + axs[0], + df, + y="cpu + sys", + z="sys", + loop=loop, + color=color, + reset=reset, + ylabel="CPU [%]", + ylim_top=100, + ) + + subplot( + axs[1], + df, + y="100 * (1 - `mem.freemem` / `mem.physmem`)", + z="100 * (1 - `mem.util.available` / `mem.physmem`)", + loop=loop, + color=color, + reset=reset, + ylabel="RAM [%]", + ylim_top=100, + ) + + subplot( + axs[2], + df, + y="`disk.all.read_bytes`", + loop=loop, + color=color, + reset=reset, + ylabel="Read [kB/s]", + ylim_top=10**6, + yscale="log", + ) + + subplot( + axs[3], + df, + y="`disk.all.write_bytes`", + loop=loop, + color=color, + reset=reset, + ylabel="Write [kB/s]", + ylim_top=10**6, + yscale="log", + ) + + subplot( + axs[4], + df, + y="kbin", + loop=loop, + color=color, + reset=reset, + ylabel="Net in [kB/s]", + ylim_top=10**6, + yscale="log", + ) + + subplot( + axs[5], + df, + y="kbout", + loop=loop, + color=color, + reset=reset, + ylabel="Net out [kB/s]", + ylim_top=10**6, + yscale="log", + ) set_xaxis(axs) set_legend(fig, axs) - fig.set_figwidth(max(FIG_WIDTH, - 2 * df['Time'].agg(['min', 'max']).diff().dropna() - .iloc[0].ceil('h').components.hours)) + fig.set_figwidth( + max( + FIG_WIDTH, + 2 + * df["Time"] + .agg(["min", "max"]) + .diff() + .dropna() + .iloc[0] + .ceil("h") + .components.hours, + ) + ) fig.set_figheight(FIG_HEIGHT) - fig.savefig(output, format='pdf', **PLOT_OPTIONS) + fig.savefig(output, format="pdf", **PLOT_OPTIONS) # # Main section # -if __name__ == '__main__': +if __name__ == "__main__": try: paths = find_sources(*METRICS_SRC) dfs = [] for path in paths: - print(asctime(), 'Loading:', path) + print(asctime(), "Loading:", path) hostname = os.path.splitext(os.path.basename(path))[0] dfs.append(load_csv(path, hostname)) df = pd.concat(dfs) del dfs - for hostname in sorted(df['source'].unique()): - path = os.path.join(OUTPUT_DIR, f'{hostname}.pdf') - print(asctime(), 'Generating:', path) - plot(df.query(f'source == "{hostname}"'), - output=path, - title=hostname) - - path = os.path.join(OUTPUT_DIR, 'all.pdf') - print(asctime(), 'Generating:', path) - plot(df, - output=path, - loop='source', - reset=True) - - print(asctime(), 'Done!') + for hostname in sorted(df["source"].unique()): + path = os.path.join(OUTPUT_DIR, f"{hostname}.pdf") + print(asctime(), "Generating:", path) + plot(df.query(f'source == "{hostname}"'), output=path, title=hostname) + + path = os.path.join(OUTPUT_DIR, "all.pdf") + print(asctime(), "Generating:", path) + plot(df, output=path, loop="source", reset=True) + + print(asctime(), "Done!") except KeyboardInterrupt: print(flush=True) From b3778e5cf95bf65c9b58e3db1f07187f48abdfb6 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 28 Oct 2025 17:30:46 +0100 Subject: [PATCH 460/480] Read group_vars/all.yml in each nested Ansible execution in ci playbooks Some of the CI jobs, even when ansible-playbook execution is done from ci-framework dir, does not read group_vars/all.yml variable file. That's because the playbook execution require to have symlink to the group_vars in the playbook dir. Until we still have nested ansible execution available in ci-framework, we should read the all.yml file using ansible extra vars parameters instead of relay that there should be symlink done in this dir. Signed-off-by: Daniel Pawlik --- ci/playbooks/architecture/run.yml | 1 + ci/playbooks/architecture/validate-architecture.yml | 1 + ci/playbooks/bootstrap-networking-mapper.yml | 1 + ci/playbooks/build_push_container_runner.yml | 1 + ci/playbooks/content_provider/run.yml | 1 + ci/playbooks/e2e-run.yml | 3 +++ ci/playbooks/edpm/run.yml | 2 ++ ci/playbooks/edpm/update.yml | 1 + ci/playbooks/edpm_baremetal_deployment/run.yml | 1 + .../edpm_build_images_content_provider_run.yaml | 1 + ci/playbooks/edpm_build_images/run.yml | 1 + ci/playbooks/kuttl/kuttl-from-operator-deps.yaml | 1 + ci/playbooks/kuttl/kuttl-from-operator-run.yaml | 1 + ci/playbooks/kuttl/run.yml | 1 + ci/playbooks/meta_content_provider/run.yml | 1 + ci/playbooks/tcib/run.yml | 1 + ci/playbooks/test-base-job/test-run.yml | 1 + 17 files changed, 20 insertions(+) diff --git a/ci/playbooks/architecture/run.yml b/ci/playbooks/architecture/run.yml index 0b45fd5cdb..2b4f968ff4 100644 --- a/ci/playbooks/architecture/run.yml +++ b/ci/playbooks/architecture/run.yml @@ -16,4 +16,5 @@ cmd: >- ansible-playbook -i localhost, -c local ci/playbooks/architecture/validate-architecture.yml + -e @group_vars/all.yml -e "@{{ ansible_user_dir }}/ci-framework-data/artifacts/parameters/zuul-params.yml" diff --git a/ci/playbooks/architecture/validate-architecture.yml b/ci/playbooks/architecture/validate-architecture.yml index 283fab317a..3429a600f7 100644 --- a/ci/playbooks/architecture/validate-architecture.yml +++ b/ci/playbooks/architecture/validate-architecture.yml @@ -2,6 +2,7 @@ # Usage and expected parameters # $ ansible-playbook -i localhost, -c local \ # validate-architecture.yml \ +# -e @group_vars/all.yml \ # -e cifmw_architecture_repo=$HOME/architecture \ # -e cifmw_architecture_scenario=hci \ # -e cifmw_networking_mapper_networking_env_def_path=$HOME/net-env.yml diff --git a/ci/playbooks/bootstrap-networking-mapper.yml b/ci/playbooks/bootstrap-networking-mapper.yml index 5d91e085a8..dd5672d498 100644 --- a/ci/playbooks/bootstrap-networking-mapper.yml +++ b/ci/playbooks/bootstrap-networking-mapper.yml @@ -21,6 +21,7 @@ cmd: >- ~/test-python/bin/ansible-playbook {{ ansible_user_dir }}/networking_mapper.yml -i {{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e "@{{ ansible_user_dir }}/ci-framework-data/artifacts/parameters/zuul-params.yml" -e cifmw_networking_mapper_ifaces_info_path=/etc/ci/env/interfaces-info.yml diff --git a/ci/playbooks/build_push_container_runner.yml b/ci/playbooks/build_push_container_runner.yml index 957c24808a..adbea8fdb5 100644 --- a/ci/playbooks/build_push_container_runner.yml +++ b/ci/playbooks/build_push_container_runner.yml @@ -10,4 +10,5 @@ cmd: >- ~/test-python/bin/ansible-playbook ci/playbooks/build_push_container.yml -i {{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml + -e @group_vars/all.yml -e "@{{ ansible_user_dir }}/ci-framework-data/artifacts/parameters/zuul-params.yml" diff --git a/ci/playbooks/content_provider/run.yml b/ci/playbooks/content_provider/run.yml index 81788f7e03..f73fc24bd0 100644 --- a/ci/playbooks/content_provider/run.yml +++ b/ci/playbooks/content_provider/run.yml @@ -18,6 +18,7 @@ cmd: >- ansible-playbook -i localhost, -c local ci/playbooks/content_provider/content_provider.yml + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/content_provider.yml -e "@{{ ansible_user_dir }}/ci-framework-data/artifacts/parameters/zuul-params.yml" diff --git a/ci/playbooks/e2e-run.yml b/ci/playbooks/e2e-run.yml index ebcc50d85d..5b9ef494c1 100644 --- a/ci/playbooks/e2e-run.yml +++ b/ci/playbooks/e2e-run.yml @@ -11,6 +11,7 @@ cmd: >- ansible-playbook deploy-edpm.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/install_yamls.yml {%- if cifmw_extras is defined %} @@ -32,6 +33,7 @@ cmd: >- ansible-playbook deploy-edpm.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/install_yamls.yml {%- if cifmw_extras is defined %} @@ -54,6 +56,7 @@ cmd: >- ansible-playbook deploy-edpm.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/install_yamls.yml {%- if cifmw_extras is defined %} diff --git a/ci/playbooks/edpm/run.yml b/ci/playbooks/edpm/run.yml index e49364271d..29d14ed7aa 100644 --- a/ci/playbooks/edpm/run.yml +++ b/ci/playbooks/edpm/run.yml @@ -28,6 +28,7 @@ cmd: >- ansible-playbook deploy-edpm.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/edpm_ci.yml {%- if edpm_file.stat.exists %} @@ -46,6 +47,7 @@ cmd: >- ansible-playbook post-deployment.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/edpm_ci.yml {%- if edpm_file.stat.exists %} diff --git a/ci/playbooks/edpm/update.yml b/ci/playbooks/edpm/update.yml index cc663f7ccc..c2e5e501cd 100644 --- a/ci/playbooks/edpm/update.yml +++ b/ci/playbooks/edpm/update.yml @@ -21,6 +21,7 @@ cmd: >- ansible-playbook update-edpm.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/edpm_ci.yml {%- if edpm_file.stat.exists %} diff --git a/ci/playbooks/edpm_baremetal_deployment/run.yml b/ci/playbooks/edpm_baremetal_deployment/run.yml index ed388ed081..a6e2902a78 100644 --- a/ci/playbooks/edpm_baremetal_deployment/run.yml +++ b/ci/playbooks/edpm_baremetal_deployment/run.yml @@ -59,6 +59,7 @@ cmd: >- ansible-playbook deploy-edpm.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/edpm_baremetal_deployment_ci.yml {%- if edpm_file.stat.exists %} diff --git a/ci/playbooks/edpm_build_images/edpm_build_images_content_provider_run.yaml b/ci/playbooks/edpm_build_images/edpm_build_images_content_provider_run.yaml index 0d6f2e62bb..df85fed949 100644 --- a/ci/playbooks/edpm_build_images/edpm_build_images_content_provider_run.yaml +++ b/ci/playbooks/edpm_build_images/edpm_build_images_content_provider_run.yaml @@ -18,6 +18,7 @@ cmd: >- ansible-playbook -i localhost, -c local ci/playbooks/edpm_build_images/edpm_build_images_content_provider.yaml + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml {%- if cifmw_extras is defined %} {%- for extra_vars in cifmw_extras %} diff --git a/ci/playbooks/edpm_build_images/run.yml b/ci/playbooks/edpm_build_images/run.yml index 2d1b0aef74..a526aca4f6 100644 --- a/ci/playbooks/edpm_build_images/run.yml +++ b/ci/playbooks/edpm_build_images/run.yml @@ -18,6 +18,7 @@ cmd: >- ansible-playbook -i {{ cifmw_zuul_target_host }}, -c local ci/playbooks/edpm_build_images/edpm_image_builder.yml + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml {%- if cifmw_extras is defined %} {%- for extra_vars in cifmw_extras %} diff --git a/ci/playbooks/kuttl/kuttl-from-operator-deps.yaml b/ci/playbooks/kuttl/kuttl-from-operator-deps.yaml index e34c79062e..a39300228d 100644 --- a/ci/playbooks/kuttl/kuttl-from-operator-deps.yaml +++ b/ci/playbooks/kuttl/kuttl-from-operator-deps.yaml @@ -8,6 +8,7 @@ cmd: >- ansible-playbook ci/playbooks/kuttl/deploy-deps.yaml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/ci.yml {%- if cifmw_extras is defined %} diff --git a/ci/playbooks/kuttl/kuttl-from-operator-run.yaml b/ci/playbooks/kuttl/kuttl-from-operator-run.yaml index dc5d376d87..f1dacb01bc 100644 --- a/ci/playbooks/kuttl/kuttl-from-operator-run.yaml +++ b/ci/playbooks/kuttl/kuttl-from-operator-run.yaml @@ -8,6 +8,7 @@ cmd: >- ansible-playbook ci/playbooks/kuttl/run-kuttl-from-operator-targets.yaml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/ci.yml {%- if cifmw_extras is defined %} diff --git a/ci/playbooks/kuttl/run.yml b/ci/playbooks/kuttl/run.yml index bc13b6ab56..8bc271e428 100644 --- a/ci/playbooks/kuttl/run.yml +++ b/ci/playbooks/kuttl/run.yml @@ -8,6 +8,7 @@ cmd: >- ansible-playbook ci/playbooks/kuttl/e2e-kuttl.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/ci.yml -e @scenarios/centos-9/kuttl.yml diff --git a/ci/playbooks/meta_content_provider/run.yml b/ci/playbooks/meta_content_provider/run.yml index 965a447c4b..71b44d2c3d 100644 --- a/ci/playbooks/meta_content_provider/run.yml +++ b/ci/playbooks/meta_content_provider/run.yml @@ -18,6 +18,7 @@ cmd: >- ansible-playbook ci/playbooks/meta_content_provider/meta_content_provider.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/meta_content_provider.yml -e "cifmw_rp_registry_ip={{ cifmw_rp_registry_ip }}" diff --git a/ci/playbooks/tcib/run.yml b/ci/playbooks/tcib/run.yml index 0d97f918a4..d4ddc6008f 100644 --- a/ci/playbooks/tcib/run.yml +++ b/ci/playbooks/tcib/run.yml @@ -23,6 +23,7 @@ cmd: >- ansible-playbook ci/playbooks/tcib/tcib.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml -e @scenarios/centos-9/base.yml -e @scenarios/centos-9/tcib.yml -e "cifmw_rp_registry_ip={{ node_ip }}" diff --git a/ci/playbooks/test-base-job/test-run.yml b/ci/playbooks/test-base-job/test-run.yml index a314546458..1deab468db 100644 --- a/ci/playbooks/test-base-job/test-run.yml +++ b/ci/playbooks/test-base-job/test-run.yml @@ -18,6 +18,7 @@ cmd: >- ansible-playbook ci/playbooks/test-base-job/nested-run.yml -i "{{ ansible_user_dir }}/ci-framework-data/artifacts/zuul_inventory.yml" + -e @group_vars/all.yml {%- if cifmw_extras is defined %} {%- for extra_var in cifmw_extras %} -e "{{ extra_var }}" From 1ea6b9cd0a4f8a6185d12b94616f1796dcf0bad0 Mon Sep 17 00:00:00 2001 From: Katarina Strenkova Date: Tue, 30 Sep 2025 07:29:04 -0400 Subject: [PATCH 461/480] Add building Tempest timing data url There is a relatively new parameter added to test-operator, called TimingDataUrl. We are trying to make the parameter work with jobs, but the issue is that there is no variable that stores the final Tempest pod name that will be created. Without it, we cannot build the timing data url, as we are saving the data under job/test-name folder. This change adds a way to build the final url for both tests with and without workflow. --- roles/test_operator/tasks/tempest-tests.yml | 68 +++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/roles/test_operator/tasks/tempest-tests.yml b/roles/test_operator/tasks/tempest-tests.yml index c1b4b7a6f8..31f5aa49a5 100644 --- a/roles/test_operator/tasks/tempest-tests.yml +++ b/roles/test_operator/tasks/tempest-tests.yml @@ -210,3 +210,71 @@ stage_vars_dict | combine({'cifmw_test_operator_tempest_workflow': no_resources_workflow}) }} + +- name: Build tempest timing data URL (no workflow) + when: + - not cifmw_test_operator_dry_run | bool + - stage_vars_dict.cifmw_test_operator_tempest_timing_data_url is defined + - stage_vars_dict.cifmw_test_operator_tempest_timing_data_url + - stage_vars_dict.cifmw_test_operator_tempest_workflow | length == 0 + vars: + final_timing_data_url: >- + {{ + stage_vars_dict.cifmw_test_operator_tempest_timing_data_url + + test_operator_instance_name + '/stestr.tar.gz' + }} + ansible.builtin.set_fact: + test_operator_cr: >- + {{ + test_operator_cr | + combine({'spec': {'timingDataUrl': final_timing_data_url}}, recursive=true) + }} + stage_vars_dict: >- + {{ + stage_vars_dict | + combine({'cifmw_test_operator_tempest_timing_data_url': final_timing_data_url}) + }} + +- name: Build tempest timing data URL (workflow) + when: + - not cifmw_test_operator_dry_run | bool + - stage_vars_dict.cifmw_test_operator_tempest_workflow | length > 0 + block: + - name: Add the full timing data url to workflow steps + vars: + base_url: >- + {{ + item.timingDataUrl | default(stage_vars_dict.get('cifmw_test_operator_tempest_timing_data_url', '')) + }} + final_timing_data_url: >- + {{ + base_url + test_operator_instance_name + '-s' + + '%02d' | format(step_number) + '-' + item.stepName + '/stestr.tar.gz' + }} + _timing_data_url_workflow_step: >- + {{ + (base_url | length > 0) + | ternary( + item | combine({'timingDataUrl': final_timing_data_url}, recursive=true), + item + ) + }} + ansible.builtin.set_fact: + timing_data_url_workflow: "{{ timing_data_url_workflow | default([]) + [_timing_data_url_workflow_step] }}" + loop: "{{ stage_vars_dict.cifmw_test_operator_tempest_workflow | list }}" + loop_control: + index_var: step_number + + - name: Override the Tempest CR workflow + when: timing_data_url_workflow is defined + ansible.builtin.set_fact: + test_operator_cr: >- + {{ + test_operator_cr | + combine({'spec': {'workflow': timing_data_url_workflow}}, recursive=true) + }} + stage_vars_dict: >- + {{ + stage_vars_dict | + combine({'cifmw_test_operator_tempest_workflow': timing_data_url_workflow}) + }} From bb1ff8324ad801acfe9a10d9adf6e9d498f0e99f Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Thu, 30 Oct 2025 17:06:14 +0530 Subject: [PATCH 462/480] Exclude roles symlink from ansibe-lint Some files in roles dir are excluded from lint check. Adding roles dir symlink in hooks dir is causing lint to check those files too, causing lint job to fail. Excluding that symlink should help. Signed-off-by: Amartya Sinha Co-Authored-By: Daniel Pawlik --- .ansible-lint | 1 + roles/pcp_metrics/tasks/annotations.yaml | 5 +++-- roles/pcp_metrics/tasks/coreos.yaml | 5 +++-- roles/pcp_metrics/tasks/gather.yaml | 7 ++++--- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index 4c12fb7024..8ffdeaeb19 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -22,6 +22,7 @@ exclude_paths: - roles/kustomize_deploy/molecule/flexible_loop/prepare.yml # import_playbook - roles/*/molecule/*/side_effect.yml # syntax-check[empty-playbook] https://github.com/ansible/molecule/issues/3617 - roles/ci_multus/molecule/*/nads_output.yml # internal-error due to "---" characters + - hooks/playbooks/roles/ strict: true quiet: false verbosity: 1 diff --git a/roles/pcp_metrics/tasks/annotations.yaml b/roles/pcp_metrics/tasks/annotations.yaml index fb99f2db28..d6d2f6e983 100644 --- a/roles/pcp_metrics/tasks/annotations.yaml +++ b/roles/pcp_metrics/tasks/annotations.yaml @@ -21,12 +21,13 @@ register: _annotations_shell - name: Ensure the output directory exist - file: + ansible.builtin.file: path: "{{ pcp_metrics_output_dir }}" state: directory mode: '0755' - name: Save annotations - copy: + ansible.builtin.copy: content: "{{ _annotations_shell.stdout }}" dest: "{{ pcp_metrics_output_dir }}/annotations.txt" + mode: "0644" diff --git a/roles/pcp_metrics/tasks/coreos.yaml b/roles/pcp_metrics/tasks/coreos.yaml index 2710016fca..39ed6224a0 100644 --- a/roles/pcp_metrics/tasks/coreos.yaml +++ b/roles/pcp_metrics/tasks/coreos.yaml @@ -17,7 +17,7 @@ name: baseos description: BaseOS repository baseurl: "{{ pcp_repo_url }}/BaseOS/$basearch/os/" - gpgcheck: no + gpgcheck: false - name: Set repositories (AppStream) ansible.builtin.yum_repository: @@ -25,7 +25,7 @@ name: appstream description: AppStream repository baseurl: "{{ pcp_repo_url }}/AppStream/$basearch/os/" - gpgcheck: no + gpgcheck: false - name: Make /usr writable become: true @@ -39,3 +39,4 @@ ansible.builtin.file: path: /var/lib/rpm-state state: directory + mode: "0750" diff --git a/roles/pcp_metrics/tasks/gather.yaml b/roles/pcp_metrics/tasks/gather.yaml index 4e1543ae21..0c89f1a483 100644 --- a/roles/pcp_metrics/tasks/gather.yaml +++ b/roles/pcp_metrics/tasks/gather.yaml @@ -21,7 +21,7 @@ register: _pmrep - name: Check if archive exists - stat: + ansible.builtin.stat: path: "{{ pcp_metrics_archive }}" register: _pcp_archive @@ -47,14 +47,15 @@ changed_when: false - name: Ensure the output directory exist - file: + ansible.builtin.file: path: "{{ pcp_metrics_output_dir }}" state: directory mode: '0755' delegate_to: localhost - name: Save the collected metrics to a local file - copy: + ansible.builtin.copy: content: "{{ _pcp_metrics_pmrep.stdout }}" dest: "{{ pcp_metrics_output_dir }}/{{ ansible_hostname }}.csv" + mode: "0644" delegate_to: localhost From 3ec08a4baaa46ce06591addb53d3b07842ef3631 Mon Sep 17 00:00:00 2001 From: Jon Uriarte Date: Wed, 29 Oct 2025 14:49:48 +0100 Subject: [PATCH 463/480] Adjust OCP and compute resources to serval The OCP master nodes and RHOSO compute nodes resources are adapted for osasinfra DT running on serval servers (500 GB RAM, 890 GB disk, 48 CPUs). --- scenarios/reproducers/dt-osasinfra.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scenarios/reproducers/dt-osasinfra.yml b/scenarios/reproducers/dt-osasinfra.yml index 680ec32865..a808b71644 100644 --- a/scenarios/reproducers/dt-osasinfra.yml +++ b/scenarios/reproducers/dt-osasinfra.yml @@ -45,7 +45,7 @@ cifmw_networking_mapper_definition_patches_01: # HCI requires bigger size to hold OCP on OSP disks cifmw_block_device_size: 100G -cifmw_libvirt_manager_compute_disksize: 200 +cifmw_libvirt_manager_compute_disksize: 160 cifmw_libvirt_manager_compute_memory: 50 cifmw_libvirt_manager_compute_cpus: 8 @@ -87,7 +87,7 @@ cifmw_libvirt_manager_configuration: disk_file_name: "ocp_master" disksize: "100" cpus: 16 - memory: 32 + memory: 64 root_part_id: 4 uefi: true nets: From 5c6c0df62a648f239bd2d14cba3db0d5e5d549ea Mon Sep 17 00:00:00 2001 From: sreekovili Date: Fri, 3 Oct 2025 16:33:23 -0400 Subject: [PATCH 464/480] Create a template for nova04delta --- .../edpm-nodeset-values/values.yaml.j2 | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 diff --git a/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 new file mode 100644 index 0000000000..57dda73dcd --- /dev/null +++ b/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 @@ -0,0 +1,87 @@ +--- +# source: nova04delta/edpm-nodeset-values/values.yaml.j2 +{% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} +{% set instances_names = [] %} +{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodes = _original_nodeset.nodes | default({}) %} +{% set _original_services = _original_nodeset['services'] | default([]) %} +{% for _inst in cifmw_networking_env_definition.instances.keys() %} +{% if _inst.startswith('compute') %} +{% set _ = instances_names.append(_inst) %} +{% endif %} +{% endfor %} +{% set inst_stop_idx = (instances_names | length) // 2 %} +{% set nodeset_one_instances = instances_names[:inst_stop_idx] %} +data: + ssh_keys: + authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} + private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansibleVars: + edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" + timesync_ntp_servers: + - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" + edpm_network_config_os_net_config_mappings: +{% for instance in nodeset_one_instances %} + edpm-{{ instance }}: +{% if hostvars[instance] is defined %} + nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" +{% endif %} + nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane.mac_addr }}" +{% endfor %} +{% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} + edpm_sshd_allowed_ranges: +{% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} + - "{{ range }}" +{% endfor %} +{% endif %} + nodes: +{% for instance in nodeset_one_instances %} + edpm-{{ instance }}: + ansible: + host: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} + hostName: {{ instance }} + networks: +{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} + - name: {{ net }} + subnetName: subnet1 + fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[net][_ipv.ip_vX] }} +{% if net is match('ctlplane') %} + defaultRoute: true +{% endif %} +{% endfor %} +{% endfor %} +{% if ('repo-setup' not in _original_services) and + ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} + services: + - "repo-setup" +{% for svc in _original_services %} + - "{{ svc }}" +{% endfor %} +{% endif %} +{% for host in cifmw_networking_env_definition.instances.keys() if host is match('^compute.*') %} +{% if cifmw_run_id is defined %} +{% set _host = host | replace('-' + cifmw_run_id, '') %} +{% else %} +{% set _host = host %} +{% endif %} + {{ _host }}: + bmc: + address: {{ cifmw_baremetal_hosts[host].connection }} + credentialsName: {{ _host }}-bmc-secret + bootMACAddress: {{ cifmw_baremetal_hosts[host].nics[0].mac }} + labels: + app: openstack + nodeset: {{ host | split('-') | first }} + name: {{ host }} + rootDeviceHints: + deviceName: /dev/sda +{% endfor %} From 8ca944b974af16240afcb91162ed153f3ae05bc8 Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Wed, 8 Oct 2025 10:47:58 +0200 Subject: [PATCH 465/480] Fix nova04delta template Signed-off-by: Bohdan Dobrelia --- .../edpm-nodeset-values/values.yaml.j2 | 116 ++++++++++-------- 1 file changed, 62 insertions(+), 54 deletions(-) diff --git a/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 index 57dda73dcd..a8e321cea2 100644 --- a/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 @@ -2,26 +2,23 @@ # source: nova04delta/edpm-nodeset-values/values.yaml.j2 {% set _ipv = cifmw_ci_gen_kustomize_values_ip_version_var_mapping %} {% set instances_names = [] %} -{% set _original_nodeset = (original_content.data | default({})).nodeset | default({}) %} +{% set _original_nodeset = original_content.data.nodeset | default({}) %} {% set _original_nodes = _original_nodeset.nodes | default({}) %} {% set _original_services = _original_nodeset['services'] | default([]) %} -{% for _inst in cifmw_networking_env_definition.instances.keys() %} -{% if _inst.startswith('compute') %} -{% set _ = instances_names.append(_inst) %} -{% endif %} +{% set _vm_type = (_original_nodes.keys() | first).split('-')[1] %} +{% for _inst in cifmw_baremetal_hosts.keys() %} +{% if 'compute' in _inst %} +{% set _ = instances_names.append(_inst) %} +{% endif %} {% endfor %} -{% set inst_stop_idx = (instances_names | length) // 2 %} -{% set nodeset_one_instances = instances_names[:inst_stop_idx] %} +{% set nodeset_one_instances = [instances_names[0]] %} data: + baremetalSetTemplate: + provisioningInterface: null ssh_keys: authorized: {{ cifmw_ci_gen_kustomize_values_ssh_authorizedkeys | b64encode }} private: {{ cifmw_ci_gen_kustomize_values_ssh_private_key | b64encode }} public: {{ cifmw_ci_gen_kustomize_values_ssh_public_key | b64encode }} - nova: - migration: - ssh_keys: - private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} - public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} nodeset: ansible: ansibleUser: "zuul" @@ -29,36 +26,25 @@ data: edpm_fips_mode: "{{ 'enabled' if cifmw_fips_enabled|default(false)|bool else 'check' }}" timesync_ntp_servers: - hostname: "{{ cifmw_ci_gen_kustomize_values_ntp_srv | default('pool.ntp.org') }}" - edpm_network_config_os_net_config_mappings: -{% for instance in nodeset_one_instances %} - edpm-{{ instance }}: -{% if hostvars[instance] is defined %} - nic1: "{{ hostvars[instance][_ipv.ansible_default_ipvX].macaddress }}" -{% endif %} - nic2: "{{ cifmw_networking_env_definition.instances[instance].networks.ctlplane.mac_addr }}" -{% endfor %} + edpm_bootstrap_command: | + # root CA + pushd /etc/pki/ca-trust/source/anchors/ + curl -LOk {{ cifmw_install_ca_url }} + update-ca-trust + popd + + # install rhos-release repos + dnf --nogpgcheck install -y {{ cifmw_repo_setup_rhos_release_rpm }} + rhos-release {{ cifmw_repo_setup_rhos_release_args }} + + # see https://access.redhat.com/solutions/253273 + dnf -y install conntrack-tools {% if cifmw_ci_gen_kustomize_values_sshd_ranges | default([]) | length > 0 %} edpm_sshd_allowed_ranges: {% for range in cifmw_ci_gen_kustomize_values_sshd_ranges %} - "{{ range }}" {% endfor %} {% endif %} - nodes: -{% for instance in nodeset_one_instances %} - edpm-{{ instance }}: - ansible: - host: {{ cifmw_networking_env_definition.instances[instance].networks.ctlplane[_ipv.ip_vX] }} - hostName: {{ instance }} - networks: -{% for net in cifmw_networking_env_definition.instances[instance].networks.keys() %} - - name: {{ net }} - subnetName: subnet1 - fixedIP: {{ cifmw_networking_env_definition.instances[instance].networks[net][_ipv.ip_vX] }} -{% if net is match('ctlplane') %} - defaultRoute: true -{% endif %} -{% endfor %} -{% endfor %} {% if ('repo-setup' not in _original_services) and ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} services: @@ -67,21 +53,43 @@ data: - "{{ svc }}" {% endfor %} {% endif %} -{% for host in cifmw_networking_env_definition.instances.keys() if host is match('^compute.*') %} -{% if cifmw_run_id is defined %} -{% set _host = host | replace('-' + cifmw_run_id, '') %} -{% else %} -{% set _host = host %} -{% endif %} - {{ _host }}: - bmc: - address: {{ cifmw_baremetal_hosts[host].connection }} - credentialsName: {{ _host }}-bmc-secret - bootMACAddress: {{ cifmw_baremetal_hosts[host].nics[0].mac }} - labels: - app: openstack - nodeset: {{ host | split('-') | first }} - name: {{ host }} - rootDeviceHints: - deviceName: /dev/sda -{% endfor %} + + # source roles/deploy_bmh/template/bmh.yml.j2, but it patches kustomize built outputs + baremetalhosts: +{% for host, def in cifmw_baremetal_hosts.items() if 'compute' in host %} +{% set _host = host | replace('-' + cifmw_run_id, '') if cifmw_run_id is defined else host %} + {{ _host }}: + bmc: + address: {{ cifmw_baremetal_hosts[host].connection }} + credentialsName: {{ _host }}-bmc-secret + disableCertificateVerification: {{ cifmw_deploy_bmh_disable_certificate_validation | default(true) }} +{% for nic in (cifmw_baremetal_hosts[host]['nics'] | default([])) if nic['network'] == cifmw_deploy_bmh_boot_interface | default('provision') %} + bootMACAddress: {{ nic.mac }} +{% endfor %} + bootMode: {{ cifmw_baremetal_hosts[host].boot_mode }} + online: {{ 'true' if cifmw_baremetal_hosts[host].status | default("") == "running" else 'false' }} + labels: + app: openstack + nodeset: {{ host | split('-') | first }} + name: {{ host }} +{% if 'root_device_hint' in cifmw_baremetal_hosts[host] %} +{# Ensure integer values are rendered as integers and not as strings #} +{% set hint_value = cifmw_baremetal_hosts[host]['root_device_hint'] + if cifmw_baremetal_hosts[host]['root_device_hint'] | int != 0 else + '"' + cifmw_baremetal_hosts[host]['root_device_hint'] + '"' %} +{% set hint_field = cifmw_baremetal_hosts[host].root_device_hint_field | default(cifmw_deploy_bmh_root_device_hint_field | default('deviceName')) %} + rootDeviceHints: + {{ hint_field }}: {{ hint_value }} +{% endif %} +{% if 'nmstate' in cifmw_baremetal_hosts[host] %} + preprovisioningNetworkDataName: {{ _host }}-nmstate-secret +{% endif %} +{% endfor %} + +{% if 'compute' in _vm_type %} + nova: + migration: + ssh_keys: + private: {{ cifmw_ci_gen_kustomize_values_migration_priv_key | b64encode }} + public: {{ cifmw_ci_gen_kustomize_values_migration_pub_key | b64encode }} +{% endif %} From ad3ab801ffbc3ee22feb994aef721c4d6e6250a2 Mon Sep 17 00:00:00 2001 From: Michael Burke Date: Wed, 8 Oct 2025 10:40:22 -0400 Subject: [PATCH 466/480] feat(reproducer): Remove hardcoded `zuul` user and `/home/zuul` paths It was already technically possible to create a controller-0 vm with a non-zuul user. This commit enables configuring controller-0 with a non-zuul user by using controller-0's `ansible_ssh_user` from `hostvars` in place of hardcoded `'zuul'`. It is worth noting that we can't simply use vars like ansible_user_id or ansible_user_dir with tasks that are delgated to controller-0. If the host we are running the reproducer from has a different user from controller-0, these vars will try to use the user from the machine that is running the reproducer rather than controller-0's user. For example, let's say we are running the reproducer from a machine with the user 'exampleuser' and we have the user 'zuul' on controller-0. If we have some task that is delegated to controller-0 and tries to create some file in the controller-0 user's home directory, we can't simply do: ``` - name: Create some file delegate_to: controller-0 ansible.builtin.file: path: "{{ ansible_user_dir }}/some_file.txt" state: touch ``` `ansible_user_dir` resolves to `/home/exampleuser`. This path does not exist on controller-0, so this task would fail. --- roles/reproducer/README.md | 4 +- roles/reproducer/defaults/main.yml | 11 +-- roles/reproducer/tasks/ci_job.yml | 24 ++--- .../tasks/configure_architecture.yml | 8 +- roles/reproducer/tasks/configure_cleanup.yaml | 6 +- .../reproducer/tasks/configure_controller.yml | 88 ++++++++----------- .../tasks/configure_post_deployment.yml | 12 +-- roles/reproducer/tasks/libvirt_layout.yml | 2 +- roles/reproducer/tasks/main.yml | 75 ++++++++-------- roles/reproducer/tasks/push_code.yml | 5 +- roles/reproducer/tasks/reuse_main.yaml | 35 +++----- .../templates/reproducer_params.yml.j2 | 2 +- roles/reproducer/vars/main.yml | 6 +- 13 files changed, 127 insertions(+), 151 deletions(-) diff --git a/roles/reproducer/README.md b/roles/reproducer/README.md index 5dbf8fba86..ca2857e938 100644 --- a/roles/reproducer/README.md +++ b/roles/reproducer/README.md @@ -6,8 +6,10 @@ None ## Parameters -* `cifmw_reproducer_user`: (String) User used for reproducer role. Defaults to `zuul` * `cifmw_reproducer_basedir`: (String) Base directory. Defaults to `cifmw_basedir`, which defaults to `~/ci-framework-data`. +* `cifmw_reproducer_controller_user`: (String) User on controller-0. Defaults to `ansible_ssh_user` from controller-0's `hostvars` if available, otherwise defaults to `zuul`. +* `cifmw_reproducer_controller_user_dir`: (String) Controller-0 user's home dir. Defaults to `/home/{{ cifmw_reproducer_controller_user }}` +* `cifmw_reproducer_controller_basedir`: (String) Path to the `ci-framework-data` dir on controller-0. Defaults to `"{{ cifmw_reproducer_controller_user_dir }}/ci-framework-data"` * `cifmw_reproducer_compute_repos`: (List[mapping]) List of yum repository that must be deployed on the compute nodes during their creation. Defaults to `[]`. * `cifmw_reproducer_compute_set_repositories`: (Bool) Deploy repositories (rhos-release) on Compute nodes. Defaults to `true`. * `cifmw_reproducer_play_extravars`: (List[string]) List of extra-vars you want to pass down to the EDPM deployment playbooks. Defaults to `[]`. diff --git a/roles/reproducer/defaults/main.yml b/roles/reproducer/defaults/main.yml index a005e9d93f..ca312765df 100644 --- a/roles/reproducer/defaults/main.yml +++ b/roles/reproducer/defaults/main.yml @@ -17,7 +17,9 @@ # All variables intended for modification should be placed in this file. # All variables within this role should have a prefix of "cifmw_reproducer" -cifmw_reproducer_user: "{{ ansible_user | default('zuul') }}" +cifmw_reproducer_controller_user: "{{ hostvars['controller-0']['ansible_ssh_user'] | default('zuul') }}" +cifmw_reproducer_controller_user_dir: "/home/{{ cifmw_reproducer_controller_user }}" +cifmw_reproducer_controller_basedir: "{{ cifmw_reproducer_controller_user_dir }}/ci-framework-data" cifmw_reproducer_basedir: "{{ cifmw_basedir | default( ansible_user_dir ~ '/ci-framework-data') }}" cifmw_reproducer_src_dir: "{{ cifmw_ci_src_dir | default( ansible_user_dir ~ '/src') }}" cifmw_reproducer_kubecfg: "{{ cifmw_libvirt_manager_configuration.vms.crc.image_local_dir }}/kubeconfig" @@ -40,13 +42,6 @@ cifmw_reproducer_supported_hypervisor_os: minimum_version: 9 RedHat: minimum_version: 9.3 -cifmw_reproducer_controller_basedir: >- - {{ - ( - '/home/zuul', - 'ci-framework-data', - ) | path_join - }} # Allow to disable validations - user toggle this at their # own risks! diff --git a/roles/reproducer/tasks/ci_job.yml b/roles/reproducer/tasks/ci_job.yml index 397ef10b7b..1495cb589e 100644 --- a/roles/reproducer/tasks/ci_job.yml +++ b/roles/reproducer/tasks/ci_job.yml @@ -23,18 +23,18 @@ block: - name: Ensure directory exists ansible.builtin.file: - path: "/home/zuul/{{ job_id }}-params" + path: "{{ cifmw_reproducer_controller_user_dir }}/{{ job_id }}-params" mode: "0755" state: directory - owner: "{{ cifmw_reproducer_user }}" - group: "{{ cifmw_reproducer_user }}" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" - name: Copy environment files to controller node tags: - bootstrap ansible.builtin.copy: src: "{{ _reproducer_basedir }}/parameters/" - dest: "/home/zuul/{{ job_id }}-params" + dest: "{{ cifmw_reproducer_controller_user_dir }}/{{ job_id }}-params" mode: "0644" - name: Inject reproducer dedicated parameter file @@ -42,7 +42,7 @@ - bootstrap ansible.builtin.template: src: "reproducer_params.yml.j2" - dest: "/home/zuul/{{ job_id }}-params/reproducer_params.yml" + dest: "{{ cifmw_reproducer_controller_user_dir }}/{{ job_id }}-params/reproducer_params.yml" mode: "0644" - name: Generate CI job playbook @@ -70,7 +70,7 @@ tags: - bootstrap ansible.builtin.copy: - dest: /home/zuul/zuul-network-data.yml + dest: "{{ cifmw_reproducer_controller_user_dir }}/zuul-network-data.yml" content: "{{ {'job_network': ci_job_networking} | to_nice_yaml}}" mode: "0644" @@ -96,7 +96,7 @@ items2dict }} ansible.builtin.copy: - dest: "/home/zuul/ci-framework-data/artifacts/parameters/zuul-params.yml" + dest: "{{ cifmw_reproducer_controller_basedir }}/artifacts/parameters/zuul-params.yml" content: "{{ {'zuul': zuul_params_filtered} | to_nice_yaml }}" mode: "0644" @@ -105,14 +105,14 @@ - always ansible.builtin.include_tasks: rotate_log.yml loop: - - "/home/zuul/ansible.log" - - "/home/zuul/ansible-pre-ci.log" - - "/home/zuul/ansible-{{ job_id }}.log" - - "/home/zuul/ansible-content-provider-bootstrap.log" + - "{{ cifmw_reproducer_controller_user_dir }}/ansible.log" + - "{{ cifmw_reproducer_controller_user_dir }}/ansible-pre-ci.log" + - "{{ cifmw_reproducer_controller_user_dir }}/ansible-{{ job_id }}.log" + - "{{ cifmw_reproducer_controller_user_dir }}/ansible-content-provider-bootstrap.log" - name: Generate and run scripts vars: - _home: "/home/zuul" + _home: "{{ cifmw_reproducer_controller_user_dir }}" run_directory: "{{ _cifmw_reproducer_framework_location }}" block: - name: Generate pre-ci-play script diff --git a/roles/reproducer/tasks/configure_architecture.yml b/roles/reproducer/tasks/configure_architecture.yml index 3eb0e70d03..abb06b50f5 100644 --- a/roles/reproducer/tasks/configure_architecture.yml +++ b/roles/reproducer/tasks/configure_architecture.yml @@ -6,18 +6,18 @@ vars: run_directory: "{{ _cifmw_reproducer_framework_location }}" exports: - ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/ci-framework-data/logs/ansible-deploy-architecture.log" + ANSIBLE_LOG_PATH: "{{ cifmw_reproducer_controller_basedir }}/logs/ansible-deploy-architecture.log" default_extravars: - "@~/ci-framework-data/parameters/reproducer-variables.yml" - "@~/ci-framework-data/parameters/openshift-environment.yml" extravars: "{{ cifmw_reproducer_play_extravars }}" playbook: "deploy-edpm.yml" ansible.builtin.template: - dest: "/home/zuul/deploy-architecture.sh" + dest: "{{ cifmw_reproducer_controller_user_dir }}/deploy-architecture.sh" src: "script.sh.j2" mode: "0755" - owner: "zuul" - group: "zuul" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" - name: Rotate some logs tags: diff --git a/roles/reproducer/tasks/configure_cleanup.yaml b/roles/reproducer/tasks/configure_cleanup.yaml index b14294c834..6c2463e5b1 100644 --- a/roles/reproducer/tasks/configure_cleanup.yaml +++ b/roles/reproducer/tasks/configure_cleanup.yaml @@ -42,11 +42,11 @@ extravars: "{{ cifmw_reproducer_play_extravars }}" playbook: "clean_openstack_deployment.yaml" ansible.builtin.template: - dest: "/home/zuul/cleanup-architecture.sh" + dest: "{{ cifmw_reproducer_controller_user_dir }}/cleanup-architecture.sh" src: "script.sh.j2" mode: "0755" - owner: "zuul" - group: "zuul" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" - name: Rotate some logs tags: diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index d1d1404108..2d07a15536 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -1,14 +1,4 @@ --- -- name: Set facts related to the reproducer - ansible.builtin.set_fact: - _ctl_reproducer_basedir: >- - {{ - ( - '/home/zuul', - 'ci-framework-data', - ) | path_join - }} - # The dynamic inventory sets the ansible_ssh_user to zuul once we get the proper # ssh configuration accesses set. - name: Configure controller-0 @@ -25,14 +15,10 @@ cifmw_sushy_emulator_install_type: podman cifmw_sushy_emulator_hypervisor_address: >- {{ inventory_hostname }}.utility - cifmw_sushy_emulator_basedir: "{{ _ctl_reproducer_basedir }}" + cifmw_sushy_emulator_basedir: "{{ cifmw_reproducer_controller_basedir }}" cifmw_sushy_emulator_connection_name: "sushy.utility" - cifmw_sushy_emulator_sshkey_path: >- - {{ - [_ctl_reproducer_basedir, '../.ssh/sushy_emulator-key'] | - path_join - }} - cifmw_podman_user_linger: "zuul" + cifmw_sushy_emulator_sshkey_path: "{{ cifmw_reproducer_controller_user_dir }}/.ssh/sushy_emulator-key" + cifmw_podman_user_linger: "{{ cifmw_reproducer_controller_user }}" cifmw_sushy_emulator_libvirt_user: >- {{ hostvars[cifmw_sushy_emulator_hypervisor_target].ansible_user_id | @@ -41,7 +27,7 @@ block: - name: Ensure directories exist ansible.builtin.file: - path: "{{ _ctl_reproducer_basedir }}/{{ item }}" + path: "{{ cifmw_reproducer_controller_basedir }}/{{ item }}" state: directory mode: "0755" loop: @@ -118,14 +104,14 @@ - bootstrap ansible.builtin.shell: cmd: >- - cat /home/zuul/reproducer-inventory/* > - {{ _ctl_reproducer_basedir }}/artifacts/zuul_inventory.yml + cat {{ cifmw_reproducer_controller_user_dir }}/reproducer-inventory/* > + {{ cifmw_reproducer_controller_basedir }}/artifacts/zuul_inventory.yml # You want to use the "name" parameter of the ansible.builtin.include_vars # call, such as: # - name: Load mac mapping # ansible.builtin.include_vars: - # file: "{{ _ctl_reproducer_basedir }}/parameters/interfaces-info.yml" + # file: "{{ cifmw_reproducer_controller_basedir }}/parameters/interfaces-info.yml" # name: my_fancy_name # Then you'll be able to access the mapping content via `my_fancy_name`. - name: Push the MAC mapping data @@ -135,7 +121,7 @@ - cifmw_libvirt_manager_mac_map is defined ansible.builtin.copy: mode: "0644" - dest: "{{ _ctl_reproducer_basedir }}/parameters/interfaces-info.yml" + dest: "{{ cifmw_reproducer_controller_basedir }}/parameters/interfaces-info.yml" content: "{{ cifmw_libvirt_manager_mac_map | to_nice_yaml }}" - name: Inject other Hypervisor SSH keys @@ -149,7 +135,7 @@ default(hostvars[host]['inventory_hostname']) }} ansible.builtin.copy: - dest: "/home/zuul/.ssh/ssh_{{ _ssh_host }}" + dest: "{{ cifmw_reproducer_controller_user_dir }}/.ssh/ssh_{{ _ssh_host }}" content: "{{ _ssh_key }}" mode: "0600" loop: "{{ hostvars.keys() }}" @@ -175,7 +161,7 @@ ansible.builtin.blockinfile: create: true mode: "0600" - path: "/home/zuul/.ssh/config" + path: "{{ cifmw_reproducer_controller_user_dir }}/.ssh/config" marker: "## {mark} {{ _ssh_host }}" block: |- Host {{ _ssh_host }} {{ hostvars[host]['inventory_hostname'] }} @@ -210,7 +196,7 @@ ansible.builtin.blockinfile: create: true mode: "0600" - path: "/home/zuul/.ssh/config" + path: "{{ cifmw_reproducer_controller_user_dir }}/.ssh/config" marker: "## {mark} {{ host }}" block: |- Host {{ host }} {{ _hostname }} {{ _hostname }}.utility {{ hostvars[host].ansible_host }} @@ -232,10 +218,10 @@ - name: Create kube directory ansible.builtin.file: - path: "/home/zuul/.kube" + path: "{{ cifmw_reproducer_controller_user_dir }}/.kube" state: directory - owner: "{{ cifmw_reproducer_user }}" - group: "{{ cifmw_reproducer_user }}" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" mode: "0750" - name: Inject kubeconfig content @@ -243,15 +229,15 @@ - _devscripts_kubeconfig.content is defined or _crc_kubeconfig.content is defined ansible.builtin.copy: - dest: "/home/zuul/.kube/config" + dest: "{{ cifmw_reproducer_controller_user_dir }}/.kube/config" content: >- {{ (_use_ocp | bool) | ternary(_devscripts_kubeconfig.content, _crc_kubeconfig.content) | b64decode }} - owner: "{{ cifmw_reproducer_user }}" - group: "{{ cifmw_reproducer_user }}" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" mode: "0640" - name: Inject kubeadmin-password if exists @@ -259,25 +245,25 @@ - _devscripts_kubeadm.content is defined or _crc_kubeadm.content is defined ansible.builtin.copy: - dest: "/home/zuul/.kube/kubeadmin-password" + dest: "{{ cifmw_reproducer_controller_user_dir }}/.kube/kubeadmin-password" content: >- {{ (_devscripts_kubeadm.content is defined) | ternary(_devscripts_kubeadm.content, _crc_kubeadm.content) | b64decode }} - owner: "{{ cifmw_reproducer_user }}" - group: "{{ cifmw_reproducer_user }}" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" mode: "0600" - name: Inject devscripts private key if set when: - _devscript_privkey.content is defined ansible.builtin.copy: - dest: "{{ ansible_user_dir }}/.ssh/devscripts_key" + dest: "{{ cifmw_reproducer_controller_user_dir }}/.ssh/devscripts_key" content: "{{ _devscript_privkey.content | b64decode }}" - owner: "{{ cifmw_reproducer_user }}" - group: "{{ cifmw_reproducer_user }}" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" mode: "0400" - name: Ensure /etc/ci/env is created @@ -289,8 +275,8 @@ - name: Manage secrets on controller-0 vars: - cifmw_manage_secrets_basedir: "/home/zuul/ci-framework-data" - cifmw_manage_secrets_owner: "{{ cifmw_reproducer_user }}" + cifmw_manage_secrets_basedir: "{{ cifmw_reproducer_controller_basedir }}" + cifmw_manage_secrets_owner: "{{ cifmw_reproducer_controller_user }}" block: - name: Initialize secret manager ansible.builtin.import_role: @@ -362,7 +348,7 @@ delegate_to: localhost ansible.posix.synchronize: src: "{{ cifmw_reproducer_src_dir }}/" - dest: "zuul@{{ item }}:{{ cifmw_reproducer_src_dir }}" + dest: "{{ cifmw_reproducer_controller_user }}@{{ item }}:{{ cifmw_reproducer_controller_user_dir }}/src" archive: true recursive: true loop: "{{ groups['controllers'] }}" @@ -417,19 +403,19 @@ }} ansible.builtin.copy: mode: "0644" - dest: "/home/zuul/ci-framework-data/parameters/reproducer-variables.yml" + dest: "{{ cifmw_reproducer_controller_basedir }}/parameters/reproducer-variables.yml" content: "{{ _filtered_vars | to_nice_yaml }}" - name: Create reproducer-variables.yml symlink to old location ansible.builtin.file: - dest: "/home/zuul/reproducer-variables.yml" - src: "/home/zuul/ci-framework-data/parameters/reproducer-variables.yml" + dest: "{{ cifmw_reproducer_controller_user_dir }}/reproducer-variables.yml" + src: "{{ cifmw_reproducer_controller_basedir }}/parameters/reproducer-variables.yml" state: link - name: Inject local environment parameters ansible.builtin.copy: mode: "0644" - dest: "/home/zuul/ci-framework-data/parameters/openshift-environment.yml" + dest: "{{ cifmw_reproducer_controller_basedir }}/parameters/openshift-environment.yml" content: |- {% raw %} --- @@ -450,14 +436,14 @@ - name: Create openshift-environment.yml symlink to old location ansible.builtin.file: - dest: "/home/zuul/openshift-environment.yml" - src: "/home/zuul/ci-framework-data/parameters/openshift-environment.yml" + dest: "{{ cifmw_reproducer_controller_user_dir }}/openshift-environment.yml" + src: "{{ cifmw_reproducer_controller_basedir }}/parameters/openshift-environment.yml" state: link - name: Get interfaces-info content register: _nic_info ansible.builtin.slurp: - src: "{{ _ctl_reproducer_basedir }}/parameters/interfaces-info.yml" + src: "{{ cifmw_reproducer_controller_basedir }}/parameters/interfaces-info.yml" # We detected OCP cluster may have some downtime even after it's supposed # to be started. @@ -484,7 +470,7 @@ {{ _nic_info.content | b64decode | from_yaml }} cifmw_networking_mapper_network_name: >- {{ _cifmw_libvirt_manager_layout.vms.controller.nets.1 }} - cifmw_networking_mapper_basedir: "/home/zuul/ci-framework-data" + cifmw_networking_mapper_basedir: "{{ cifmw_reproducer_controller_basedir }}" ansible.builtin.import_role: name: networking_mapper @@ -494,11 +480,11 @@ block: - name: Inject CRC ssh key ansible.builtin.copy: - dest: "/home/zuul/.ssh/crc_key" + dest: "{{ cifmw_reproducer_controller_user_dir }}/.ssh/crc_key" content: "{{ crc_priv_key['content'] | b64decode }}" mode: "0400" - owner: "{{ cifmw_reproducer_user }}" - group: "{{ cifmw_reproducer_user }}" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" - name: Ensure we have all dependencies installed ansible.builtin.async_status: diff --git a/roles/reproducer/tasks/configure_post_deployment.yml b/roles/reproducer/tasks/configure_post_deployment.yml index a8a6a27c3c..79bd23453e 100644 --- a/roles/reproducer/tasks/configure_post_deployment.yml +++ b/roles/reproducer/tasks/configure_post_deployment.yml @@ -6,18 +6,18 @@ vars: run_directory: "{{ _cifmw_reproducer_framework_location }}" exports: - ANSIBLE_LOG_PATH: "{{ ansible_user_dir }}/ansible-post-deployment.log" + ANSIBLE_LOG_PATH: "{{ cifmw_reproducer_controller_user_dir }}/ansible-post-deployment.log" default_extravars: - - "@{{ ansible_user_dir }}/ci-framework-data/parameters/reproducer-variables.yml" - - "@{{ ansible_user_dir }}/ci-framework-data/parameters/openshift-environment.yml" + - "@{{ cifmw_reproducer_controller_basedir }}/parameters/reproducer-variables.yml" + - "@{{ cifmw_reproducer_controller_basedir }}/parameters/openshift-environment.yml" extravars: "{{ cifmw_reproducer_play_extravars }}" playbook: "post-deployment.yml" ansible.builtin.template: - dest: "/home/zuul/post_deployment.sh" + dest: "{{ cifmw_reproducer_controller_user_dir }}/post_deployment.sh" src: "script.sh.j2" mode: "0755" - owner: "zuul" - group: "zuul" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" - name: Rotate some logs tags: diff --git a/roles/reproducer/tasks/libvirt_layout.yml b/roles/reproducer/tasks/libvirt_layout.yml index 56da59c775..be682d42d2 100644 --- a/roles/reproducer/tasks/libvirt_layout.yml +++ b/roles/reproducer/tasks/libvirt_layout.yml @@ -46,7 +46,7 @@ ansible.builtin.command: # noqa: command-instead-of-module cmd: >- rsync -r {{ cifmw_reproducer_basedir }}/reproducer-inventory/ - zuul@controller-0:reproducer-inventory + {{ cifmw_reproducer_controller_user }}@controller-0:reproducer-inventory - name: Run post tasks in OCP cluster case when: diff --git a/roles/reproducer/tasks/main.yml b/roles/reproducer/tasks/main.yml index 4f1b81479b..deebf619d0 100644 --- a/roles/reproducer/tasks/main.yml +++ b/roles/reproducer/tasks/main.yml @@ -32,34 +32,6 @@ tags: - bootstrap_layout -- name: Discover and expose CI Framework path on remote node - tags: - - always - vars: - default_path: >- - {{ - cifmw_reproducer_default_repositories | - selectattr('src', 'match', '^.*/ci[_\-]framework$') | - map(attribute='dest') | first - }} - custom_path: >- - {{ - cifmw_reproducer_repositories | - selectattr('src', 'match', '^.*/ci-framework$') | - map(attribute='dest') - }} - _path: >- - {{ - (custom_path | length > 0) | - ternary(custom_path | first, default_path) - }} - ansible.builtin.set_fact: - _cifmw_reproducer_framework_location: >- - {{ - (_path is match('.*/ci-framework/?$')) | - ternary(_path, [_path, 'ci-framework'] | path_join) - }} - - name: Build final libvirt layout tags: - bootstrap_env @@ -252,6 +224,34 @@ }} failed_when: false +- name: Discover and expose CI Framework path on remote node + tags: + - always + vars: + default_path: >- + {{ + cifmw_reproducer_default_repositories | + selectattr('src', 'match', '^.*/ci[_\-]framework$') | + map(attribute='dest') | first + }} + custom_path: >- + {{ + cifmw_reproducer_repositories | + selectattr('src', 'match', '^.*/ci-framework$') | + map(attribute='dest') + }} + _path: >- + {{ + (custom_path | length > 0) | + ternary(custom_path | first, default_path) + }} + ansible.builtin.set_fact: + _cifmw_reproducer_framework_location: >- + {{ + (_path is match('.*/ci-framework/?$')) | + ternary(_path, [_path, 'ci-framework'] | path_join) + }} + - name: Run only on hypervisor with controller-0 when: - ( @@ -281,13 +281,15 @@ - always ansible.builtin.include_tasks: rotate_log.yml loop: - - "/home/zuul/ansible-bootstrap.log" + - "{{ cifmw_reproducer_controller_user_dir }}/ansible-bootstrap.log" - name: Bootstrap environment on controller-0 vars: # NOTE: need to overwrite parent vars: # ./roles/reproducer/molecule/crc_layout/converge.yml - cifmw_basedir: "{{ ansible_user_dir ~ '/ci-framework-data' }}" + cifmw_basedir: "{{ cifmw_reproducer_controller_basedir }}" + ansible_user_dir: "{{ cifmw_reproducer_controller_user_dir }}" + ansible_user_id: "{{ cifmw_reproducer_controller_user }}" no_log: "{{ cifmw_nolog | default(true) | bool }}" ansible.builtin.import_role: name: cifmw_setup @@ -300,7 +302,8 @@ _devsetup_path: >- {{ ( - cifmw_installyamls_repos, + cifmw_reproducer_controller_user_dir, + 'src/github.com/openstack-k8s-operators/install_yamls', 'devsetup' ) | ansible.builtin.path_join }} @@ -310,13 +313,13 @@ cmd: >- ansible-playbook -i ~/ci-framework-data/artifacts/zuul_inventory.yml download_tools.yaml --tags kustomize,kubectl - creates: "/home/zuul/bin/kubectl" + creates: "{{ cifmw_reproducer_controller_user_dir }}/bin/kubectl" - name: Configure CRC network if needed when: - _use_crc | bool vars: - cifmw_openshift_kubeconfig: "/home/zuul/.kube/config" + cifmw_openshift_kubeconfig: "{{ cifmw_reproducer_controller_user_dir }}/.kube/config" ansible.builtin.include_role: name: openshift_setup tasks_from: patch_network_operator.yml @@ -396,8 +399,8 @@ extravars: "{{ cifmw_reproducer_play_extravars }}" playbook: "deploy-edpm.yml" ansible.builtin.template: - dest: "/home/zuul/deploy-edpm.sh" + dest: "{{ cifmw_reproducer_controller_user_dir }}/deploy-edpm.sh" src: "script.sh.j2" mode: "0755" - owner: "zuul" - group: "zuul" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" diff --git a/roles/reproducer/tasks/push_code.yml b/roles/reproducer/tasks/push_code.yml index 985b9e655e..c32ef15f6a 100644 --- a/roles/reproducer/tasks/push_code.yml +++ b/roles/reproducer/tasks/push_code.yml @@ -78,7 +78,6 @@ - name: Push random code into the proper location vars: - repo_base_dir: '/home/zuul/src' _cifmw_reproducer_all_repositories: "{{ cifmw_reproducer_repositories | default([]) }}" block: - name: Expand cifmw_reproducer_repositories to pull code from ansible controller to controller-0 @@ -95,7 +94,7 @@ _user_sources: "{{ cifmw_reproducer_repositories | default([]) | map(attribute='src') }}" _repo_entry: src: "{{ ansible_user_dir }}/{{ repo.value.src_dir | regex_replace('/$', '') }}/" - dest: "/home/zuul/{{ repo.value.src_dir }}" + dest: "{{ cifmw_reproducer_controller_user_dir }}/{{ repo.value.src_dir }}" ansible.builtin.set_fact: _cifmw_reproducer_all_repositories: "{{ _cifmw_reproducer_all_repositories + [_repo_entry] }}" loop: "{{ _zuul['projects'] | dict2items }}" @@ -126,7 +125,7 @@ - item.src is abs or item.src is not match('.*:.*') ansible.posix.synchronize: src: "{{ item.src }}" - dest: "zuul@controller-0:{{ item.dest }}" + dest: "{{ cifmw_reproducer_controller_user }}@controller-0:{{ item.dest }}" archive: true recursive: true delete: true diff --git a/roles/reproducer/tasks/reuse_main.yaml b/roles/reproducer/tasks/reuse_main.yaml index a0c4d92a51..db5630a924 100644 --- a/roles/reproducer/tasks/reuse_main.yaml +++ b/roles/reproducer/tasks/reuse_main.yaml @@ -114,7 +114,7 @@ - always ansible.builtin.include_tasks: rotate_log.yml loop: - - "/home/zuul/ansible-bootstrap.log" + - "{{ cifmw_reproducer_controller_user_dir }}/ansible-bootstrap.log" - name: Bootstrap environment on controller-0 environment: @@ -127,7 +127,7 @@ -e @~/ci-framework-data/parameters/reproducer-variables.yml -e @scenarios/reproducers/networking-definition.yml playbooks/01-bootstrap.yml - creates: "/home/zuul/ansible-bootstrap.log" + creates: "{{ cifmw_reproducer_controller_user_dir }}/ansible-bootstrap.log" - name: Install dev tools from install_yamls on controller-0 environment: @@ -136,7 +136,8 @@ _devsetup_path: >- {{ ( - cifmw_installyamls_repos, + cifmw_reproducer_controller_user_dir, + 'src/github.com/openstack-k8s-operators/install_yamls', 'devsetup' ) | ansible.builtin.path_join }} @@ -146,7 +147,7 @@ cmd: >- ansible-playbook -i ~/ci-framework-data/artifacts/zuul_inventory.yml download_tools.yaml --tags kustomize,kubectl - creates: "/home/zuul/bin/kubectl" + creates: "{{ cifmw_reproducer_controller_user_dir }}/bin/kubectl" # Run from the hypervisor - name: Ensure OCP cluster is stable @@ -201,19 +202,9 @@ ansible.builtin.include_tasks: file: configure_post_deployment.yml - - name: Set facts related to the reproducer - ansible.builtin.set_fact: - _ctl_reproducer_basedir: >- - {{ - ( - '/home/zuul', - 'ci-framework-data', - ) | path_join - }} - - name: Ensure directories exist ansible.builtin.file: - path: "{{ _ctl_reproducer_basedir }}/{{ item }}" + path: "{{ cifmw_reproducer_controller_basedir }}/{{ item }}" state: directory mode: "0755" loop: @@ -247,18 +238,18 @@ }} ansible.builtin.copy: mode: "0644" - dest: "/home/zuul/ci-framework-data/parameters/reproducer-variables.yml" + dest: "{{ cifmw_reproducer_controller_basedir }}/parameters/reproducer-variables.yml" content: "{{ _filtered_vars | to_nice_yaml }}" - name: Create reproducer-variables.yml symlink to old location ansible.builtin.file: - dest: "/home/zuul/reproducer-variables.yml" - src: "/home/zuul/ci-framework-data/parameters/reproducer-variables.yml" + dest: "{{ cifmw_reproducer_controller_user_dir }}/reproducer-variables.yml" + src: "{{ cifmw_reproducer_controller_basedir }}/parameters/reproducer-variables.yml" state: link - name: Slurp kubeadmin password ansible.builtin.slurp: - src: /home/zuul/.kube/kubeadmin-password + src: "{{ cifmw_reproducer_controller_user_dir }}/.kube/kubeadmin-password" register: _kubeadmin_password - name: Prepare ci-like EDPM deploy @@ -276,8 +267,8 @@ extravars: "{{ cifmw_reproducer_play_extravars }}" playbook: "deploy-edpm.yml" ansible.builtin.template: - dest: "/home/zuul/deploy-edpm.sh" + dest: "{{ cifmw_reproducer_controller_user_dir }}/deploy-edpm.sh" src: "script.sh.j2" mode: "0755" - owner: "zuul" - group: "zuul" + owner: "{{ cifmw_reproducer_controller_user }}" + group: "{{ cifmw_reproducer_controller_user }}" diff --git a/roles/reproducer/templates/reproducer_params.yml.j2 b/roles/reproducer/templates/reproducer_params.yml.j2 index 8862925691..d5f9882937 100644 --- a/roles/reproducer/templates/reproducer_params.yml.j2 +++ b/roles/reproducer/templates/reproducer_params.yml.j2 @@ -1,6 +1,6 @@ --- # Generated by CI Framework reproducer -cifmw_openshift_kubeconfig: /home/zuul/.kube/config +cifmw_openshift_kubeconfig: {{ cifmw_reproducer_controller_user_dir }}/.kube/config cifmw_openshift_login_password: 12345678 {% if cifmw_reproducer_params | length > 0 -%} {{ cifmw_reproducer_params | to_nice_yaml }} diff --git a/roles/reproducer/vars/main.yml b/roles/reproducer/vars/main.yml index 64ebcbfe97..85cb23d566 100644 --- a/roles/reproducer/vars/main.yml +++ b/roles/reproducer/vars/main.yml @@ -2,11 +2,11 @@ # Default repositories we always want to have cifmw_reproducer_default_repositories: - src: "https://github.com/openstack-k8s-operators/ci-framework" - dest: "{{ cifmw_project_dir_absolute }}" + dest: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" - src: "https://github.com/openstack-k8s-operators/install_yamls" - dest: "{{ cifmw_installyamls_repos }}" + dest: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" - src: "https://github.com/openstack-k8s-operators/architecture" - dest: "/home/zuul/src/github.com/openstack-k8s-operators/architecture" + dest: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/architecture" # one place to rule them all cifmw_reproducer_nm_dnsmasq: "/etc/NetworkManager/conf.d/zz-dnsmasq.conf" From fd957cc9a9471863815732a0161b072420ab1909 Mon Sep 17 00:00:00 2001 From: Michael Burke Date: Wed, 8 Oct 2025 10:40:46 -0400 Subject: [PATCH 467/480] fix(reproducer): Use controller-0 path when installing requirements Previously, the check for `common-requirements.txt` would check if common requirements exists on localhost at a given path. This path is based on the home directory on localhost. If the file did exist, it would use that same path to try to install the requirements on controller-0. This causes issues when localhost and controller-0 do not have the same users and home directories. --- roles/reproducer/tasks/configure_controller.yml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 2d07a15536..54d6f2aa43 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -357,11 +357,10 @@ - cifmw_reproducer_src_dir_stat.stat.exists - cifmw_reproducer_src_dir_stat.stat.isdir - - name: Check if local common-requirements.txt exists - delegate_to: localhost + - name: Check if common-requirements.txt exists on controller-0 ansible.builtin.stat: - path: "{{ cifmw_project_dir_absolute }}/common-requirements.txt" - register: _local_common_requirements_check + path: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + register: _controller_common_requirements_check run_once: true ignore_errors: true @@ -370,10 +369,10 @@ async: 600 # 10 minutes should be more than enough poll: 0 ansible.builtin.pip: - requirements: "{{ have_local | ternary(local, remote) }}" + requirements: "{{ have_controller_reqs | ternary(controller_reqs, remote) }}" vars: - have_local: "{{ _local_common_requirements_check.stat is defined and _local_common_requirements_check.stat.exists }}" - local: "{{ cifmw_project_dir_absolute }}/common-requirements.txt" + have_controller_reqs: "{{ _controller_common_requirements_check.stat is defined and _controller_common_requirements_check.stat.exists }}" + controller_reqs: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" remote: https://raw.githubusercontent.com/openstack-k8s-operators/ci-framework/main/common-requirements.txt - name: Inject most of the cifmw_ parameters passed to the reproducer run From ae5f34caf133c08eae500e9d864eacfd5167e380 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 29 Oct 2025 14:07:46 +0100 Subject: [PATCH 468/480] Set timeout in each oc adm must-gather command execution It happens that the "oc adm must-gather" command takes longer than expected, so the CI job gets timeout and the collecting logs process is disturbed. In some places, we spotted that the '--timeout' parameter in 'oc adm' command is just ignored: TASK [os_must_gather : Run openstack-must-gather command output_dir={{ cifmw_os_must_gather_output_dir }}/artifacts, script=oc adm must-gather --image {{ cifmw_os_must_gather_image }} --timeout {{ cifmw_os_must_gather_timeout }} --host-network={{ cifmw_os_must_gather_host_network }} --dest-dir {{ cifmw_os_must_gather_output_log_dir }} -- ADDITIONAL_NAMESPACES={{ cifmw_os_must_gather_additional_namespaces }} OPENSTACK_DATABASES=$OPENSTACK_DATABASES SOS_EDPM=$SOS_EDPM SOS_DECOMPRESS=$SOS_DECOMPRESS gather 2>&1] POST-RUN END RESULT_TIMED_OUT: [untrusted : playbooks/baremetal/collect-logs.yaml@main] so let's add also timeout before 'oc adm' command to make sure that it would be "killed" in proper amount of time. Signed-off-by: Daniel Pawlik --- roles/os_must_gather/tasks/main.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/os_must_gather/tasks/main.yml b/roles/os_must_gather/tasks/main.yml index 7df2086910..a409961162 100644 --- a/roles/os_must_gather/tasks/main.yml +++ b/roles/os_must_gather/tasks/main.yml @@ -65,6 +65,7 @@ cifmw.general.ci_script: output_dir: "{{ cifmw_os_must_gather_output_dir }}/artifacts" script: >- + timeout {{ (cifmw_os_must_gather_timeout | community.general.to_seconds) + 120 }} oc adm must-gather --image {{ cifmw_os_must_gather_image }} --timeout {{ cifmw_os_must_gather_timeout }} --host-network={{ cifmw_os_must_gather_host_network }} @@ -101,7 +102,11 @@ KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default(cifmw_os_must_gather_kubeconfig) }}" PATH: "{{ cifmw_path }}" ansible.builtin.command: - cmd: oc adm must-gather --dest-dir {{ ansible_user_dir }}/ci-framework-data/must-gather + cmd: >- + timeout {{ (cifmw_os_must_gather_timeout | community.general.to_seconds) + 120 }} + oc adm must-gather + --dest-dir {{ ansible_user_dir }}/ci-framework-data/must-gather + --timeout {{ cifmw_os_must_gather_timeout }} always: - name: Create oc_inspect log directory ansible.builtin.file: From 9ccc1eecb3cd5d57a29f82590ae99e66ee749cc0 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Mon, 3 Nov 2025 09:11:38 +0100 Subject: [PATCH 469/480] [cifmw_helpers] Verify if file exists before parsing Because of how some CI jobs were made, some files might not be available for parse. Add task to ensure that the file exists before parsing, to avoid making condition directly in call playbook multiple times. Signed-off-by: Daniel Pawlik --- roles/cifmw_helpers/tasks/inventory_file.yml | 34 ++++++++++++-------- roles/cifmw_helpers/tasks/var_file.yml | 30 +++++++++-------- 2 files changed, 37 insertions(+), 27 deletions(-) diff --git a/roles/cifmw_helpers/tasks/inventory_file.yml b/roles/cifmw_helpers/tasks/inventory_file.yml index db8329a784..4eb8389b24 100644 --- a/roles/cifmw_helpers/tasks/inventory_file.yml +++ b/roles/cifmw_helpers/tasks/inventory_file.yml @@ -1,16 +1,24 @@ --- -- name: Read inventory file - ansible.builtin.slurp: - src: "{{ include_inventory_file }}" - register: _inventory_file +- name: Check if inventory file exists + ansible.builtin.stat: + path: "{{ include_inventory_file | trim }}" + register: _include_inventory_file -- name: Parse inventory file content - ansible.builtin.set_fact: - inventory_data: "{{ _inventory_file.content | b64decode | from_yaml }}" +- name: Parse inventory file + when: _include_inventory_file.stat.exists + block: + - name: Read inventory file + ansible.builtin.slurp: + src: "{{ include_inventory_file }}" + register: _inventory_file -- name: Process each group with hosts - ansible.builtin.include_tasks: - file: parse_inventory.yml - loop: "{{ inventory_data | dict2items | selectattr('value.hosts', 'defined') | list }}" - loop_control: - loop_var: group_item + - name: Parse inventory file content + ansible.builtin.set_fact: + inventory_data: "{{ _inventory_file.content | b64decode | from_yaml }}" + + - name: Process each group with hosts + ansible.builtin.include_tasks: + file: parse_inventory.yml + loop: "{{ inventory_data | dict2items | selectattr('value.hosts', 'defined') | list }}" + loop_control: + loop_var: group_item diff --git a/roles/cifmw_helpers/tasks/var_file.yml b/roles/cifmw_helpers/tasks/var_file.yml index 6d2c6e4678..df988ae034 100644 --- a/roles/cifmw_helpers/tasks/var_file.yml +++ b/roles/cifmw_helpers/tasks/var_file.yml @@ -10,19 +10,21 @@ path: "{{ provided_file | trim }}" register: _param_file -- name: Read vars +- name: Read vars and set as fact when: _param_file.stat.exists - ansible.builtin.slurp: - src: "{{ provided_file | trim }}" - register: _parsed_vars - no_log: "{{ cifmw_helpers_no_log }}" + block: + - name: Read the vars + ansible.builtin.slurp: + src: "{{ provided_file | trim }}" + register: _parsed_vars + no_log: "{{ cifmw_helpers_no_log }}" -- name: Set vars as fact - when: "'content' in _parsed_vars" - ansible.builtin.set_fact: - "{{ file_item.key }}": "{{ file_item.value }}" - cacheable: true - loop: "{{ _parsed_vars['content'] | b64decode | from_yaml | dict2items }}" - no_log: "{{ cifmw_helpers_no_log }}" - loop_control: - loop_var: file_item + - name: Set vars as fact + when: "'content' in _parsed_vars" + ansible.builtin.set_fact: + "{{ file_item.key }}": "{{ file_item.value }}" + cacheable: true + loop: "{{ _parsed_vars['content'] | b64decode | from_yaml | dict2items }}" + no_log: "{{ cifmw_helpers_no_log }}" + loop_control: + loop_var: file_item From b31e1157eb70699075ac7bae0885e794d3022270 Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Wed, 29 Oct 2025 17:39:40 +0100 Subject: [PATCH 470/480] Configure additional DHCP host records for hybrid scenarios Full routing solution for hybrid scenarios involving virtual-media BMO, requires extra host record for hypervisor host pointing out to ocpbm bridge IP. Add a playbook to create this record, which can be applied as a post_infra hook. Note that neither _vm_records, nor cifmw_dnsmasq_host_record can't be used for this currently. Signed-off-by: Bohdan Dobrelia --- hooks/playbooks/hybrid_dhcp_records.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 hooks/playbooks/hybrid_dhcp_records.yml diff --git a/hooks/playbooks/hybrid_dhcp_records.yml b/hooks/playbooks/hybrid_dhcp_records.yml new file mode 100644 index 0000000000..0cca31a423 --- /dev/null +++ b/hooks/playbooks/hybrid_dhcp_records.yml @@ -0,0 +1,25 @@ +- name: Configure additional DHCP host records for hybrid scenarios + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: false + tasks: + - name: Add host record and restart dnsmasq + when: + - hypervisor is defined + - ocpbm_ip is defined + become: true + block: + - name: Add host record + ansible.builtin.lineinfile: + create: true + path: "{{ cifmw_dnsmasq_basedir | default('/etc/cifmw-dnsmasq.d') }}/host_records.conf" + mode: '0644' + line: >- + host-record={{ hypervisor }},{{ ocpbm_ip }} + state: present + validate: "/usr/sbin/dnsmasq -C %s --test" + register: _add_host_record + - name: Restart dnsmasq # noqa no-handler + when: _add_host_record.changed + ansible.builtin.systemd_service: + name: cifmw-dnsmasq.service + state: restarted From 078a0e8429a218e7839598c4f1c6b92ad0877de7 Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Tue, 4 Nov 2025 09:19:51 +0100 Subject: [PATCH 471/480] Fix nova04delta cifmw_baremetal_hosts defaults Signed-off-by: Bohdan Dobrelia --- .../templates/nova04delta/edpm-nodeset-values/values.yaml.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 index a8e321cea2..4039046670 100644 --- a/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 @@ -54,6 +54,7 @@ data: {% endfor %} {% endif %} +{% if cifmw_baremetal_hosts | default({}) | length > 0 %} # source roles/deploy_bmh/template/bmh.yml.j2, but it patches kustomize built outputs baremetalhosts: {% for host, def in cifmw_baremetal_hosts.items() if 'compute' in host %} @@ -85,6 +86,7 @@ data: preprovisioningNetworkDataName: {{ _host }}-nmstate-secret {% endif %} {% endfor %} +{% endif %} {% if 'compute' in _vm_type %} nova: From 975ab760eb37cf56142d7d684ba68fea7a532dcb Mon Sep 17 00:00:00 2001 From: Bohdan Dobrelia Date: Tue, 4 Nov 2025 13:58:03 +0100 Subject: [PATCH 472/480] Dedup edpm services from nova04delta template Signed-off-by: Bohdan Dobrelia --- .../nova04delta/edpm-nodeset-values/values.yaml.j2 | 9 --------- 1 file changed, 9 deletions(-) diff --git a/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 b/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 index 4039046670..4987ed49d7 100644 --- a/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 +++ b/roles/ci_gen_kustomize_values/templates/nova04delta/edpm-nodeset-values/values.yaml.j2 @@ -4,7 +4,6 @@ {% set instances_names = [] %} {% set _original_nodeset = original_content.data.nodeset | default({}) %} {% set _original_nodes = _original_nodeset.nodes | default({}) %} -{% set _original_services = _original_nodeset['services'] | default([]) %} {% set _vm_type = (_original_nodes.keys() | first).split('-')[1] %} {% for _inst in cifmw_baremetal_hosts.keys() %} {% if 'compute' in _inst %} @@ -45,14 +44,6 @@ data: - "{{ range }}" {% endfor %} {% endif %} -{% if ('repo-setup' not in _original_services) and - ('repo-setup' in ci_gen_kustomize_edpm_nodeset_predeployed_services) %} - services: - - "repo-setup" -{% for svc in _original_services %} - - "{{ svc }}" -{% endfor %} -{% endif %} {% if cifmw_baremetal_hosts | default({}) | length > 0 %} # source roles/deploy_bmh/template/bmh.yml.j2, but it patches kustomize built outputs From 5fdc07795acb3a48dac9cb707aaf4445aaaf540a Mon Sep 17 00:00:00 2001 From: rabi Date: Thu, 6 Nov 2025 10:00:46 +0530 Subject: [PATCH 473/480] Install buildah which is a requirement Now the cleanup target expects buildah to be installed on hypervisor host after[1]. This probably need to be installed in dev-scripts 01_install_requirements.sh, but we can fix our broken bmo01[2]. [1] https://github.com/openshift-metal3/dev-scripts/commit/a7ebd8590b66dc763d8310bc3a303f254fa16991 [2] https://sf.apps.int.gpc.ocp-hub.prod.psi.redhat.com/zuul/t/components-integration/build/1a32dcff28824f7d946940cbbadbda49 Signed-off-by: rabi --- roles/devscripts/vars/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/devscripts/vars/main.yml b/roles/devscripts/vars/main.yml index 7b9870b5bb..40e646857c 100644 --- a/roles/devscripts/vars/main.yml +++ b/roles/devscripts/vars/main.yml @@ -26,6 +26,7 @@ cifmw_devscripts_packages: - NetworkManager-initscripts-updown - patch - python3-jmespath + - buildah cifmw_devscripts_repo: "https://github.com/openshift-metal3/dev-scripts.git" cifmw_devscripts_repo_branch: HEAD From e6ccd23387cc8ef816dea74f35dc24dd605e466f Mon Sep 17 00:00:00 2001 From: Amartya Sinha Date: Tue, 4 Nov 2025 10:57:14 +0530 Subject: [PATCH 474/480] Replace hardcoded values with group_vars Some hardcoded paths landed again. Let's use relative path vars to avoid hardcoding. Signed-off-by: Amartya Sinha --- roles/reproducer/tasks/configure_controller.yml | 4 ++-- roles/reproducer/vars/main.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 54d6f2aa43..000b1704fe 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -359,7 +359,7 @@ - name: Check if common-requirements.txt exists on controller-0 ansible.builtin.stat: - path: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + path: "{{ cifmw_reproducer_controller_user_dir }}/{{ cifmw_project_dir }}/common-requirements.txt" register: _controller_common_requirements_check run_once: true ignore_errors: true @@ -372,7 +372,7 @@ requirements: "{{ have_controller_reqs | ternary(controller_reqs, remote) }}" vars: have_controller_reqs: "{{ _controller_common_requirements_check.stat is defined and _controller_common_requirements_check.stat.exists }}" - controller_reqs: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/common-requirements.txt" + controller_reqs: "{{ cifmw_reproducer_controller_user_dir }}/{{ cifmw_project_dir }}/common-requirements.txt" remote: https://raw.githubusercontent.com/openstack-k8s-operators/ci-framework/main/common-requirements.txt - name: Inject most of the cifmw_ parameters passed to the reproducer run diff --git a/roles/reproducer/vars/main.yml b/roles/reproducer/vars/main.yml index 85cb23d566..120f6245a1 100644 --- a/roles/reproducer/vars/main.yml +++ b/roles/reproducer/vars/main.yml @@ -2,9 +2,9 @@ # Default repositories we always want to have cifmw_reproducer_default_repositories: - src: "https://github.com/openstack-k8s-operators/ci-framework" - dest: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework" + dest: "{{ cifmw_reproducer_controller_user_dir }}/{{ cifmw_project_dir }}" - src: "https://github.com/openstack-k8s-operators/install_yamls" - dest: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/install_yamls" + dest: "{{ cifmw_reproducer_controller_user_dir }}/{{ cifmw_installyamls_repos_relative }}" - src: "https://github.com/openstack-k8s-operators/architecture" dest: "{{ cifmw_reproducer_controller_user_dir }}/src/github.com/openstack-k8s-operators/architecture" From 4913ce2c97c4ae14311e12a92498e0b2606302ef Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Tue, 28 Oct 2025 11:51:13 +0100 Subject: [PATCH 475/480] Add git-commit-msg-hook script The script is verifying if the minimum body length contains 10 characters. Also raise a warning if there is no: "Signed-Off-By". Signed-off-by: Daniel Pawlik --- .../workflows/commit-message-validator.yml | 44 ++++++++++++++++++ CONTRIBUTING.md | 13 ++++++ scripts/git-check-commit-body-length.sh | 45 +++++++++++++++++++ 3 files changed, 102 insertions(+) create mode 100644 .github/workflows/commit-message-validator.yml create mode 100755 scripts/git-check-commit-body-length.sh diff --git a/.github/workflows/commit-message-validator.yml b/.github/workflows/commit-message-validator.yml new file mode 100644 index 0000000000..075a47e580 --- /dev/null +++ b/.github/workflows/commit-message-validator.yml @@ -0,0 +1,44 @@ +name: Check if commit message body is not too short + +on: + pull_request: + types: [opened, synchronize, edited, reopened] + +jobs: + verify-body-length: + runs-on: ubuntu-latest + # set as non-voting for now. + continue-on-error: true + + permissions: + contents: write + pull-requests: write + repository-projects: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Dump commit message to file + run: | + git fetch origin ${{ github.event.pull_request.head.sha }} + git log -1 --pretty=format:"%B" ${{ github.event.pull_request.head.sha }} > commit-message-file + + - name: Run commit message check + id: bodylength + run: | + set +e + ./scripts/git-check-commit-body-length.sh commit-message-file > result.log 2>&1 + EXIT_CODE=$? + echo "exit_code=$EXIT_CODE" >> $GITHUB_OUTPUT + cat result.log + + - name: Comment on PR if body length check failed + if: steps.bodylength.outputs.exit_code != '0' + uses: peter-evans/create-or-update-comment@v5 + with: + issue-number: ${{ github.event.pull_request.number }} + body-path: ./result.log + reactions: confused diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ed71f2c698..39df571fa1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,6 +45,19 @@ Here is an example, based on a common use-case, on how to use those variables oc get openstackdataplane -n {{ cifmw_install_yamls_defaults['NAMESPACE'] }} ~~~ +## A few words about using Git + +Before you make a pull request, make sure that: + +* the title of your git commit message begins with the role + name in brackets: `[my_wonderful_role]` or `(my_wonderful_role)` +* the git commit body message is longer than 10 characters and describes + the reason why you added this change +* sign your git commit using the `Signed-Off-By` option by + adding: `--signoff` or `-s` when using the command: `git commit`. +* if you already make a commit, and you want to add `Signed-Off-By`, + use command: `git commit --amend --signoff` + ### Documentation A new role must get proper documentation. Please edit the README.md located in diff --git a/scripts/git-check-commit-body-length.sh b/scripts/git-check-commit-body-length.sh new file mode 100755 index 0000000000..2ce9e64ea9 --- /dev/null +++ b/scripts/git-check-commit-body-length.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +MSG_FILE="$1" +MIN_BODY_LEN=10 + +# If no file provided, get latest commit message +if [ -z "$MSG_FILE" ]; then + TMP_FILE=$(mktemp) + git log -1 --pretty=format:"%B" >"$TMP_FILE" + MSG_FILE="$TMP_FILE" +fi + +# print commit message +echo -e "Processing commit message:\n" +cat "$MSG_FILE" +echo -e "\nEnd of commit message" + +# 0 = pass, 1 = fail +FAIL_LENGTH=0 +FAIL_SIGNED_OFF_BY=0 + +BODY=$(tail -n +3 "$MSG_FILE" | sed '/^\s*#/d' | sed '/^\s*$/d') +BODY_LEN=$(echo -n "$BODY" | wc -m) + +if [ "$BODY_LEN" -lt "$MIN_BODY_LEN" ]; then + echo -e "\n\n**WARNING: Commit message body is too short (has $BODY_LEN chars, minimum $MIN_BODY_LEN required).**\n" >&2 + echo "Please add a detailed explanation after the subject line." >&2 + FAIL_LENGTH=1 +fi + +if ! grep -qi '^Signed-off-by:' "$MSG_FILE"; then + echo -e "\n\n**WARNING: Missing 'Signed-off-by:' line in commit message.**\n" >&2 + echo "Add: Signed-off-by: Your Name " >&2 + FAIL_SIGNED_OFF_BY=1 +fi + +[ -n "$TMP_FILE" ] && rm -f "$TMP_FILE" + +if [ "$FAIL_LENGTH" -eq 0 ] && [ "$FAIL_SIGNED_OFF_BY" -eq 0 ]; then + echo "Commit message passes all checks." + exit 0 +else + echo -e "\nSome checks failed. See warnings above.\n" + exit 1 +fi From cf9217346602be24280b23e84efa5cfe623eb0a3 Mon Sep 17 00:00:00 2001 From: Jeremy Agee Date: Wed, 29 Oct 2025 12:13:51 -0400 Subject: [PATCH 476/480] Update federation multirealm httpd template This patch will resolve horizon OIDC users running into issues when logging out as one IDP user and then trying to login with a user from a different IDP before the first users OIDC session times out. --- .../templates/federation-multirealm.conf.j2 | 41 ++++++++----------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/roles/federation/templates/federation-multirealm.conf.j2 b/roles/federation/templates/federation-multirealm.conf.j2 index 59e7af480c..4c628f15d7 100644 --- a/roles/federation/templates/federation-multirealm.conf.j2 +++ b/roles/federation/templates/federation-multirealm.conf.j2 @@ -7,34 +7,29 @@ OIDCPassClaimsAs "{{ cifmw_federation_keystone_OIDC_PassClaimsAs }}" OIDCCryptoPassphrase "{{ cifmw_federation_keystone_OIDC_CryptoPassphrase }}" OIDCMetadataDir "/var/lib/httpd/metadata" OIDCRedirectURI "{{ cifmw_federation_keystone_url }}/v3/redirect_uri" -LogLevel debug +OIDCAuthRequestParams "prompt=login" +LogLevel rewrite:trace3 auth_openidc:debug - - AuthType "openid-connect" - Require valid-user - + + + Header always add Set-Cookie "mod_auth_openidc_session=deleted; Path=/; Max-Age=0; HttpOnly; Secure; SameSite=None" + + - - AuthType oauth20 - Require valid-user - +RewriteEngine On - - AuthType "openid-connect" - Require valid-user - +RewriteRule ^/v3/auth/OS-FEDERATION/identity_providers/({{ cifmw_federation_IdpName }}|{{ cifmw_federation_IdpName2 }})/protocols/openid/websso$ \ + /v3/local-logout/clear [R=302,L] - - AuthType oauth20 - Require valid-user - +RewriteRule ^/v3/local-logout/clear$ \ + /v3/auth/OS-FEDERATION/websso/openid [R=302,L,QSA,NE] - - Require valid-user + AuthType openid-connect + Require valid-user - - AuthType "openid-connect" - Require valid-user - + + AuthType openid-connect + Require valid-user + From e11ce21e017149f5f85262203c4439edbaa0b15f Mon Sep 17 00:00:00 2001 From: Enrique Vallespi Gil Date: Fri, 10 Oct 2025 11:59:32 +0200 Subject: [PATCH 477/480] feat(/home/zuul): Replace /home/zuulpath Root and playbooks files We're moving /home/zuul hardcoded paths from root-level files and ocurrences in files at playbook folder. --- create-infra.yml | 4 ++-- deploy-edpm-reuse.yaml | 6 +++--- playbooks/dcn.yml | 2 +- reproducer.yml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/create-infra.yml b/create-infra.yml index 089708ad43..1328faf6d4 100644 --- a/create-infra.yml +++ b/create-infra.yml @@ -149,9 +149,9 @@ name: sushy_emulator tasks_from: verify.yml - - name: Set permissions on /home/zuul/ci-framework-data on controller-0 + - name: Set permissions on ci-framework-data folder on controller-0 ansible.builtin.file: - path: "{{ cifmw_basedir | default('/home/zuul/ci-framework-data') }}" + path: "{{ cifmw_basedir | default(ansible_user_dir + '/ci-framework-data') }}" state: directory recurse: true owner: "{{ ansible_user_id }}" diff --git a/deploy-edpm-reuse.yaml b/deploy-edpm-reuse.yaml index 1550d4390d..5d5e202bd8 100644 --- a/deploy-edpm-reuse.yaml +++ b/deploy-edpm-reuse.yaml @@ -32,7 +32,7 @@ when: cifmw_cleanup_architecture | default(true) | bool delegate_to: controller-0 ansible.builtin.command: - cmd: "/home/zuul/cleanup-architecture.sh" + cmd: "$HOME/cleanup-architecture.sh" - name: Inherit from parent scenarios if needed ansible.builtin.include_tasks: @@ -114,7 +114,7 @@ poll: 20 delegate_to: controller-0 ansible.builtin.command: - cmd: "/home/zuul/deploy-architecture.sh {{ cifmw_deploy_architecture_args | default('') }}" + cmd: "$HOME/deploy-architecture.sh {{ cifmw_deploy_architecture_args | default('') }}" - name: Run post deployment if instructed to when: @@ -124,4 +124,4 @@ poll: 20 delegate_to: controller-0 ansible.builtin.command: - cmd: "/home/zuul/post_deployment.sh {{ cifmw_post_deploy_args | default('') }}" + cmd: "$HOME/post_deployment.sh {{ cifmw_post_deploy_args | default('') }}" diff --git a/playbooks/dcn.yml b/playbooks/dcn.yml index a9b247c6da..430e2b8733 100644 --- a/playbooks/dcn.yml +++ b/playbooks/dcn.yml @@ -147,7 +147,7 @@ - name: Copy found CR files to the manifests folder ansible.builtin.copy: src: "{{ item.path }}" - dest: "/home/zuul/ci-framework-data/artifacts/manifests/openstack/cr" + dest: "{{ ansible_user_dir }}/ci-framework-data/artifacts/manifests/openstack/cr" mode: "0644" loop: "{{ dcn_crs.files }}" when: dcn_crs.matched > 0 diff --git a/reproducer.yml b/reproducer.yml index 91bffd1f29..75a92922a1 100644 --- a/reproducer.yml +++ b/reproducer.yml @@ -102,7 +102,7 @@ poll: 20 delegate_to: controller-0 ansible.builtin.command: - cmd: "/home/zuul/deploy-architecture.sh {{ cifmw_deploy_architecture_args | default('') }}" + cmd: "$HOME/deploy-architecture.sh {{ cifmw_deploy_architecture_args | default('') }}" - name: Run post deployment if instructed to when: @@ -113,4 +113,4 @@ poll: 20 delegate_to: controller-0 ansible.builtin.command: - cmd: "/home/zuul/post_deployment.sh {{ cifmw_post_deploy_args | default('') }}" + cmd: "$HOME/post_deployment.sh {{ cifmw_post_deploy_args | default('') }}" From 970e4d31cfb0aa493c2c7d75c27cb64e8e296aac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Tue, 4 Nov 2025 00:15:03 +0100 Subject: [PATCH 478/480] Wait deployments Available instead of pods Ready MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change MetalLB controller-manager and webhook-server wait tasks to check for Deployment Available condition instead of Pod Ready condition. This ensures service endpoints are fully registered before applying MetalLB CRs, preventing intermittent webhook errors: "failed calling webhook...no endpoints available for service" The Available condition on Deployments is more reliable than Pod Ready for webhook availability, as it confirms the service is reachable. This follows the same pattern used for NMstate webhook deployment. Jira: OSPCIX-1095 Assisted-By: Claude Code/claude-4.5-sonnet Signed-off-by: Harald Jensås --- .../tasks/install_operators.yml | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/roles/kustomize_deploy/tasks/install_operators.yml b/roles/kustomize_deploy/tasks/install_operators.yml index 0d7da877ed..fcf3650b4a 100644 --- a/roles/kustomize_deploy/tasks/install_operators.yml +++ b/roles/kustomize_deploy/tasks/install_operators.yml @@ -230,39 +230,39 @@ name: openshift_setup tasks_from: patch_dependent_operators_source.yml - - name: Wait for controller-manager pods + - name: Wait for controller-manager deployment kubernetes.core.k8s_info: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - kind: Pod + kind: Deployment namespace: metallb-system label_selectors: - control-plane = controller-manager wait: true wait_condition: - type: Ready + type: Available status: "True" wait_timeout: 300 retries: 3 delay: 60 - register: _controller_manager_pods - until: _controller_manager_pods is success + register: _controller_manager_deployment + until: _controller_manager_deployment is success - - name: Wait for webhook-server pods + - name: Wait for webhook-server deployment kubernetes.core.k8s_info: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" - kind: Pod + kind: Deployment namespace: metallb-system label_selectors: - component = webhook-server wait: true wait_condition: - type: Ready + type: Available status: "True" wait_timeout: 300 retries: 3 delay: 60 - register: _webhook_server_pods - until: _webhook_server_pods is success + register: _webhook_server_deployment + until: _webhook_server_deployment is success - name: Wait until NMstate operator resources are deployed kubernetes.core.k8s_info: From 84c46b67b6d463f7c0599a6da1ba2bcf2f29ab37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Wed, 29 Oct 2025 21:30:31 +0100 Subject: [PATCH 479/480] devscripts: Restore pull-secret post-installation when mirror_images is enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When mirror_images is enabled in dev-scripts (either explicitly or automatically for IPv6 deployments), the pull-secret is replaced with only the local mirror registry credentials during installation. This causes operator installation and workload deployments to fail because the cluster cannot authenticate to external registries like quay.io, registry.redhat.io, etc. This change adds post-installation logic to: - Merge the original pull-secret with the local mirror credentials - Update the cluster's pull-secret in openshift-config namespace - Re-enable OperatorHub default sources (disabled during mirroring) - Preserve ImageContentSourcePolicy manifests for mirror preference The merged pull-secret allows the cluster to pull from both the local mirror (when available) and external registries (as fallback), enabling operator installation while maintaining the benefits of image mirroring. This particularly helps IPv6 deployments where dev-scripts automatically sets MIRROR_IMAGES=true by default. Changes: - roles/devscripts/tasks/320_restore_pull_secret.yml (new) - roles/devscripts/tasks/300_post.yml - roles/devscripts/README.md Goal: The goal is to improve stability, especially for IPv6 jobs that operate behind the nat64-appliance VM for all external traffic. Assisted-By: Claude Code/claude-4.5-sonnet Signed-off-by: Harald Jensås --- docs/dictionary/en-custom.txt | 2 + roles/devscripts/README.md | 13 ++- roles/devscripts/tasks/300_post.yml | 7 ++ .../tasks/320_restore_pull_secret.yml | 93 +++++++++++++++++++ 4 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 roles/devscripts/tasks/320_restore_pull_secret.yml diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 170a695d37..6d203d5edd 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -238,6 +238,7 @@ IDM IdP Idempotency idrac +imagecontentsourcepolicy iface igfsbg igmp @@ -419,6 +420,7 @@ openstackprovisioner openstacksdk openstackversion operatorgroup +operatorhub opn orchestrator osd diff --git a/roles/devscripts/README.md b/roles/devscripts/README.md index 79eac7f133..85e55bf8f3 100644 --- a/roles/devscripts/README.md +++ b/roles/devscripts/README.md @@ -16,7 +16,6 @@ networks. building the various needed files. * `devscripts_deploy`: Overlaps with the previous tag, and adds the actual deployment of devscripts managed services. -* `devscripts_post`: Only runs the post-installation tasks. ## Parameters @@ -136,6 +135,18 @@ Allowed values can be found [here](https://mirror.openshift.com/pub/openshift-v4 | extra_worker_disk | | The disk size to be set for each extra nodes. | | extra_worker_vcpu | | The number of vCPUs to be configured for each extra nodes. | +#### Registry and Image Mirroring + +| Key | Default Value | Description | +| --- | ------------- | ----------- | +| mirror_images | `false` | When set to `true`, enables image mirroring to a local registry. This is useful for disconnected/air-gapped environments. **Note:** When enabled, the pull-secret and OperatorHub sources are automatically restored after installation to allow pulling images from external registries for operators and other workloads. | + +**Important:** When `mirror_images` is enabled: +- During installation, only the local mirror registry credentials are used +- Post-installation, the original pull-secret is automatically merged with the local mirror credentials +- OperatorHub default sources are re-enabled to allow operator installation +- ImageContentSourcePolicy manifests remain in place to prefer the local mirror when available, with fallback to external registries + ### Support keys in cifmw_devscripts_external_net | Key | Description | diff --git a/roles/devscripts/tasks/300_post.yml b/roles/devscripts/tasks/300_post.yml index 6e03cd8d23..46a2ac9ab1 100644 --- a/roles/devscripts/tasks/300_post.yml +++ b/roles/devscripts/tasks/300_post.yml @@ -26,6 +26,13 @@ - not cifmw_devscripts_ocp_online | bool ansible.builtin.import_tasks: set_cluster_fact.yml +- name: Restore pull-secret if mirror_images is enabled + when: + - cifmw_devscripts_config.mirror_images | default(false) | bool + tags: + - devscripts_deploy + ansible.builtin.include_tasks: 320_restore_pull_secret.yml + - name: Prepare for disk overlay configuration when: - not cifmw_devscripts_ocp_comply | bool diff --git a/roles/devscripts/tasks/320_restore_pull_secret.yml b/roles/devscripts/tasks/320_restore_pull_secret.yml new file mode 100644 index 0000000000..0b5f80e374 --- /dev/null +++ b/roles/devscripts/tasks/320_restore_pull_secret.yml @@ -0,0 +1,93 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# When mirror_images is enabled in dev-scripts, the pull-secret is replaced +# with only the local mirror registry credentials during installation. +# This task restores the original pull-secret post-installation to allow +# pulling images from external registries for operators and other workloads. + +- name: Get original pull-secret content + no_log: true + ansible.builtin.slurp: + src: "{{ cifmw_devscripts_repo_dir }}/pull_secret.json" + register: _original_pull_secret + +- name: Get current cluster pull-secret + no_log: true + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + kind: Secret + name: pull-secret + namespace: openshift-config + register: _cluster_pull_secret_raw + +- name: Update cluster pull-secret + no_log: true + vars: + _original_auths: "{{ (_original_pull_secret.content | b64decode | from_json).auths }}" + _cluster_auths: "{{ (_cluster_pull_secret_raw.resources[0].data['.dockerconfigjson'] | b64decode | from_json).auths }}" + _merged_pull_secret: + auths: "{{ _cluster_auths | combine(_original_auths, recursive=true) }}" + kubernetes.core.k8s: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: pull-secret + namespace: openshift-config + type: kubernetes.io/dockerconfigjson + data: + .dockerconfigjson: "{{ _merged_pull_secret | to_json | b64encode }}" + +- name: Wait for nodes to stabilize after pull-secret update + kubernetes.core.k8s_info: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + kind: Node + register: _nodes + retries: 20 + delay: 30 + until: >- + _nodes.resources | length > 0 and + _nodes.resources | selectattr('status.conditions', 'defined') | + map(attribute='status.conditions') | flatten | + selectattr('type', 'equalto', 'Ready') | + selectattr('status', 'equalto', 'True') | + list | length == (_nodes.resources | length) + +- name: Re-enable OperatorHub default sources + kubernetes.core.k8s_json_patch: + kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + api_version: config.openshift.io/v1 + kind: OperatorHub + name: cluster + patch: + - op: replace + path: /spec/disableAllDefaultSources + value: false + +- name: Display pull-secret restoration status + ansible.builtin.debug: + msg: >- + Pull-secret has been restored with original credentials while keeping local mirror registry access. + OperatorHub default sources have been re-enabled to allow operator installation. From a98ac9f196f70c44b04378a261b279046f4da79f Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Fri, 7 Nov 2025 10:20:06 +0100 Subject: [PATCH 480/480] Test --- roles/cifmw_helpers/tasks/include_dir.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/cifmw_helpers/tasks/include_dir.yml b/roles/cifmw_helpers/tasks/include_dir.yml index d6a3682df2..3d2205700e 100644 --- a/roles/cifmw_helpers/tasks/include_dir.yml +++ b/roles/cifmw_helpers/tasks/include_dir.yml @@ -1,7 +1,7 @@ --- # This is a workaround for reading Ansible yaml files, # that instead of have clear values, it uses jinja2 variables, -# so reading the file and parse as fact does not work. +# so reading the file and parse as fact does not work - name: Check directory is available ansible.builtin.stat: