diff --git a/playbooks/skmo/ensure-central-ca-bundle.yaml b/playbooks/skmo/ensure-central-ca-bundle.yaml
new file mode 100644
index 0000000000..a37bccb458
--- /dev/null
+++ b/playbooks/skmo/ensure-central-ca-bundle.yaml
@@ -0,0 +1,28 @@
+---
+- name: Ensure central control plane uses custom CA bundle
+ hosts: localhost
+ gather_facts: false
+ vars:
+ central_namespace: openstack
+ controlplane_name: controlplane
+ ca_bundle_secret_name: custom-ca-certs
+ tasks:
+ - name: Check current caBundleSecretName
+ ansible.builtin.shell: |
+ set -euo pipefail
+ oc -n {{ central_namespace }} get osctlplane {{ controlplane_name }} \
+ -o jsonpath='{.spec.tls.caBundleSecretName}'
+ args:
+ executable: /bin/bash
+ register: ca_bundle_name
+ changed_when: false
+ failed_when: false
+
+ - name: Patch control plane to use custom CA bundle when unset
+ ansible.builtin.shell: |
+ set -euo pipefail
+ oc -n {{ central_namespace }} patch osctlplane {{ controlplane_name }} \
+ --type json -p '[{"op":"add","path":"/spec/tls","value":{}},{"op":"add","path":"/spec/tls/caBundleSecretName","value":"{{ ca_bundle_secret_name }}"}]'
+ args:
+ executable: /bin/bash
+ when: ca_bundle_name.stdout | trim == ""
diff --git a/playbooks/skmo/prepare-leaf.yaml b/playbooks/skmo/prepare-leaf.yaml
new file mode 100644
index 0000000000..a2219d4467
--- /dev/null
+++ b/playbooks/skmo/prepare-leaf.yaml
@@ -0,0 +1,131 @@
+---
+- name: Prepare SKMO leaf prerequisites in regionZero
+ hosts: localhost
+ gather_facts: false
+ vars:
+ skmo_values_file: "{{ cifmw_architecture_repo }}/examples/va/multi-namespace-skmo/control-plane2/skmo-values.yaml"
+ osp_secrets_env_file: "{{ cifmw_architecture_repo }}/lib/control-plane/base/osp-secrets.env"
+ central_namespace: openstack
+ leaf_namespace: openstack2
+ leaf_secret_name: osp-secret
+ central_rootca_secret: rootca-public
+ tasks:
+ - name: Load SKMO values
+ ansible.builtin.set_fact:
+ skmo_values: "{{ lookup('file', skmo_values_file) | from_yaml }}"
+
+ - name: Set SKMO leaf facts
+ ansible.builtin.set_fact:
+ leaf_region: "{{ skmo_values.data.leafRegion }}"
+ leaf_admin_user: "{{ skmo_values.data.leafAdminUser }}"
+ leaf_admin_project: "{{ skmo_values.data.leafAdminProject }}"
+ leaf_admin_password_key: "{{ skmo_values.data.leafAdminPasswordKey }}"
+ keystone_internal_url: "{{ skmo_values.data.keystoneInternalURL }}"
+ keystone_public_url: "{{ skmo_values.data.keystonePublicURL }}"
+ ca_bundle_secret_name: "{{ skmo_values.data.leafCaBundleSecretName }}"
+
+ - name: Ensure leaf osp-secret exists (pre-create from env file)
+ ansible.builtin.shell: |
+ set -euo pipefail
+ if ! oc -n {{ leaf_namespace }} get secret {{ leaf_secret_name }} >/dev/null 2>&1; then
+ oc -n {{ leaf_namespace }} create secret generic {{ leaf_secret_name }} \
+ --from-env-file="{{ osp_secrets_env_file }}" \
+ --dry-run=client -o yaml | oc apply -f -
+ fi
+ args:
+ executable: /bin/bash
+
+ - name: Read leaf admin password from leaf secret
+ ansible.builtin.shell: |
+ set -euo pipefail
+ oc -n {{ leaf_namespace }} get secret {{ leaf_secret_name }} \
+ -o jsonpath='{.data.{{ leaf_admin_password_key }}}' | base64 -d
+ args:
+ executable: /bin/bash
+ register: leaf_admin_password
+ changed_when: false
+
+ - name: Ensure leaf region exists in central Keystone
+ ansible.builtin.shell: |
+ set -euo pipefail
+ oc -n {{ central_namespace }} rsh openstackclient \
+ openstack region show {{ leaf_region }} >/dev/null 2>&1 || \
+ oc -n {{ central_namespace }} rsh openstackclient \
+ openstack region create {{ leaf_region }}
+ args:
+ executable: /bin/bash
+
+ - name: Ensure keystone catalog endpoints exist for leaf region
+ ansible.builtin.shell: |
+ set -euo pipefail
+ if ! oc -n {{ central_namespace }} rsh openstackclient \
+ openstack endpoint list --service keystone --interface public --region {{ leaf_region }} \
+ -f value -c ID | head -1 | grep -q .; then
+ oc -n {{ central_namespace }} rsh openstackclient \
+ openstack endpoint create --region {{ leaf_region }} identity public "{{ keystone_public_url }}"
+ fi
+ if ! oc -n {{ central_namespace }} rsh openstackclient \
+ openstack endpoint list --service keystone --interface internal --region {{ leaf_region }} \
+ -f value -c ID | head -1 | grep -q .; then
+ oc -n {{ central_namespace }} rsh openstackclient \
+ openstack endpoint create --region {{ leaf_region }} identity internal "{{ keystone_internal_url }}"
+ fi
+ args:
+ executable: /bin/bash
+
+ - name: Ensure leaf admin project exists in central Keystone
+ ansible.builtin.shell: |
+ set -euo pipefail
+ oc -n {{ central_namespace }} rsh openstackclient \
+ openstack project show {{ leaf_admin_project }} >/dev/null 2>&1 || \
+ oc -n {{ central_namespace }} rsh openstackclient \
+ openstack project create {{ leaf_admin_project }}
+ args:
+ executable: /bin/bash
+
+ - name: Ensure leaf admin user exists and has admin role
+ ansible.builtin.shell: |
+ set -euo pipefail
+ if ! oc -n {{ central_namespace }} rsh openstackclient \
+ openstack user show {{ leaf_admin_user }} >/dev/null 2>&1; then
+ oc -n {{ central_namespace }} rsh openstackclient \
+ openstack user create --domain Default --password "{{ leaf_admin_password.stdout | trim }}" {{ leaf_admin_user }}
+ fi
+ oc -n {{ central_namespace }} rsh openstackclient \
+ openstack role add --project {{ leaf_admin_project }} --user {{ leaf_admin_user }} admin
+ args:
+ executable: /bin/bash
+
+ - name: Create or append leaf CA bundle secret
+ ansible.builtin.shell: |
+ set -euo pipefail
+ tmpdir="$(mktemp -d)"
+ newkey="skmo-central-rootca.crt"
+ export TMPDIR="${tmpdir}"
+
+ if oc -n {{ leaf_namespace }} get secret {{ ca_bundle_secret_name }} \
+ >/dev/null 2>&1; then
+ oc -n {{ leaf_namespace }} get secret {{ ca_bundle_secret_name }} \
+ -o json | python3 -c '
+ import base64, json, os, sys
+ tmpdir = os.environ.get("TMPDIR")
+ data = json.load(sys.stdin).get("data", {})
+ for key, value in data.items():
+ path = os.path.join(tmpdir, key)
+ with open(path, "wb") as f:
+ f.write(base64.b64decode(value))
+ '
+ fi
+
+ oc -n {{ central_namespace }} get secret {{ central_rootca_secret }} \
+ -o jsonpath='{.data.tls\.crt}' | base64 -d \
+ > "${tmpdir}/${newkey}"
+
+ oc -n {{ leaf_namespace }} create secret generic \
+ {{ ca_bundle_secret_name }} \
+ --from-file="${tmpdir}" \
+ --dry-run=client -o yaml | oc apply -f -
+
+ rm -rf "${tmpdir}"
+ args:
+ executable: /bin/bash
diff --git a/playbooks/skmo/trust-leaf-ca.yaml b/playbooks/skmo/trust-leaf-ca.yaml
new file mode 100644
index 0000000000..5fd86d48e3
--- /dev/null
+++ b/playbooks/skmo/trust-leaf-ca.yaml
@@ -0,0 +1,51 @@
+---
+- name: Trust SKMO leaf CA in central region
+ hosts: localhost
+ gather_facts: false
+ vars:
+ skmo_values_file: "{{ cifmw_architecture_repo }}/examples/va/multi-namespace-skmo/control-plane2/skmo-values.yaml"
+ central_namespace: openstack
+ leaf_namespace: openstack2
+ leaf_rootca_secret: rootca-public
+ tasks:
+ - name: Load SKMO values
+ ansible.builtin.set_fact:
+ skmo_values: "{{ lookup('file', skmo_values_file) | from_yaml }}"
+
+ - name: Set central CA bundle secret name
+ ansible.builtin.set_fact:
+ central_ca_bundle_secret_name: "{{ skmo_values.data.centralCaBundleSecretName }}"
+
+ - name: Create or append central CA bundle secret
+ ansible.builtin.shell: |
+ set -euo pipefail
+ tmpdir="$(mktemp -d)"
+ newkey="skmo-leaf-rootca.crt"
+ export TMPDIR="${tmpdir}"
+
+ if oc -n {{ central_namespace }} get secret \
+ {{ central_ca_bundle_secret_name }} >/dev/null 2>&1; then
+ oc -n {{ central_namespace }} get secret \
+ {{ central_ca_bundle_secret_name }} -o json | python3 -c '
+ import base64, json, os, sys
+ tmpdir = os.environ.get("TMPDIR")
+ data = json.load(sys.stdin).get("data", {})
+ for key, value in data.items():
+ path = os.path.join(tmpdir, key)
+ with open(path, "wb") as f:
+ f.write(base64.b64decode(value))
+ '
+ fi
+
+ oc -n {{ leaf_namespace }} get secret {{ leaf_rootca_secret }} \
+ -o jsonpath='{.data.tls\.crt}' | base64 -d \
+ > "${tmpdir}/${newkey}"
+
+ oc -n {{ central_namespace }} create secret generic \
+ {{ central_ca_bundle_secret_name }} \
+ --from-file="${tmpdir}" \
+ --dry-run=client -o yaml | oc apply -f -
+
+ rm -rf "${tmpdir}"
+ args:
+ executable: /bin/bash
diff --git a/roles/ci_gen_kustomize_values/templates/multi-namespace-skmo b/roles/ci_gen_kustomize_values/templates/multi-namespace-skmo
new file mode 120000
index 0000000000..67c8e7f36c
--- /dev/null
+++ b/roles/ci_gen_kustomize_values/templates/multi-namespace-skmo
@@ -0,0 +1 @@
+multi-namespace
\ No newline at end of file
diff --git a/scenarios/reproducers/va-multi-skmo.yml b/scenarios/reproducers/va-multi-skmo.yml
new file mode 100644
index 0000000000..86ad2007b6
--- /dev/null
+++ b/scenarios/reproducers/va-multi-skmo.yml
@@ -0,0 +1,406 @@
+---
+cifmw_architecture_scenario: multi-namespace-skmo
+
+# HERE if you want to override kustomization, you can uncomment this parameter
+# and push the data structure you want to apply.
+# cifmw_architecture_user_kustomize:
+# stage_0:
+# 'network-values':
+# data:
+# starwars: Obiwan
+
+# HERE, if you want to stop the deployment loop at any stage, you can uncomment
+# the following parameter and update the value to match the stage you want to
+# reach. Known stages are:
+# pre_kustomize_stage_INDEX
+# pre_apply_stage_INDEX
+# post_apply_stage_INDEX
+#
+# cifmw_deploy_architecture_stopper:
+
+cifmw_arch_automation_file: multi-namespace-skmo.yaml
+cifmw_os_must_gather_additional_namespaces: kuttl,openshift-storage,sushy-emulator,openstack2
+cifmw_reproducer_validate_network_host: "192.168.122.1"
+cifmw_libvirt_manager_default_gw_nets:
+ - ocpbm
+ - osptrunk2
+cifmw_networking_mapper_interfaces_info_translations:
+ osp_trunk:
+ - controlplane
+ - ctlplane
+ osptrunk2:
+ - ctlplane2
+
+# Override the default 3-compute VA setting, since 3 computes in both namespaces is too expensive
+cifmw_libvirt_manager_compute_amount: 2
+
+cifmw_libvirt_manager_configuration:
+ networks:
+ osp_trunk: |
+
+ osp_trunk
+
+
+
+
+
+
+ osptrunk2: |
+
+ osptrunk2
+
+
+
+
+
+
+ ocpbm: |
+
+ ocpbm
+
+
+
+
+
+
+ ocppr: |
+
+ ocppr
+
+
+
+ vms:
+ ocp:
+ amount: 3
+ admin_user: core
+ image_local_dir: "{{ cifmw_basedir }}/images/"
+ disk_file_name: "ocp_master"
+ disksize: "100"
+ extra_disks_num: 3
+ extra_disks_size: "50G"
+ cpus: 16
+ memory: 32
+ root_part_id: 4
+ uefi: true
+ nets:
+ - ocppr
+ - ocpbm
+ - osp_trunk # ctlplane and isolated networks for openstack namespace cloud
+ - osptrunk2 # ctlplane and isolated networks for openstack2 namespace cloud
+ - osp_trunk # OVN datacentre for openstack namespace cloud
+ - osptrunk2 # OVN datacentre for openstack2 namespace cloud
+ compute:
+ uefi: "{{ cifmw_use_uefi }}"
+ root_part_id: "{{ cifmw_root_partition_id }}"
+ amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}"
+ image_url: "{{ cifmw_discovered_image_url }}"
+ sha256_image_name: "{{ cifmw_discovered_hash }}"
+ image_local_dir: "{{ cifmw_basedir }}/images/"
+ disk_file_name: "base-os.qcow2"
+ disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}"
+ memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}"
+ cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}"
+ nets:
+ - ocpbm
+ - osp_trunk
+ compute2:
+ uefi: "{{ cifmw_use_uefi }}"
+ root_part_id: "{{ cifmw_root_partition_id }}"
+ amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 2] | max }}"
+ image_url: "{{ cifmw_discovered_image_url }}"
+ sha256_image_name: "{{ cifmw_discovered_hash }}"
+ image_local_dir: "{{ cifmw_basedir }}/images/"
+ disk_file_name: "base-os.qcow2"
+ disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}"
+ memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}"
+ cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}"
+ nets:
+ - ocpbm
+ - osptrunk2
+ controller:
+ uefi: "{{ cifmw_use_uefi }}"
+ root_part_id: "{{ cifmw_root_partition_id }}"
+ image_url: "{{ cifmw_discovered_image_url }}"
+ sha256_image_name: "{{ cifmw_discovered_hash }}"
+ image_local_dir: "{{ cifmw_basedir }}/images/"
+ disk_file_name: "base-os.qcow2"
+ disksize: 50
+ memory: 8
+ cpus: 4
+ nets:
+ - ocpbm
+ - osp_trunk
+ - osptrunk2
+
+## devscript support for OCP deploy
+cifmw_devscripts_config_overrides:
+ fips_mode: "{{ cifmw_fips_enabled | default(false) | bool }}"
+
+# Set Logical Volume Manager Storage by default for local storage
+cifmw_use_lvms: true
+cifmw_lvms_disk_list:
+ - /dev/vda
+ - /dev/vdb
+ - /dev/vdc
+
+cifmw_networking_definition:
+ networks:
+ ctlplane:
+ network: "192.168.122.0/24"
+ gateway: "192.168.122.1"
+ dns:
+ - "192.168.122.1"
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ netconfig:
+ ranges:
+ - start: 100
+ end: 120
+ - start: 150
+ end: 170
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ ctlplane2:
+ network: "192.168.133.0/24"
+ gateway: "192.168.133.1"
+ dns:
+ - "192.168.133.1"
+ mtu: 1500
+ tools:
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ netconfig:
+ ranges:
+ - start: 100
+ end: 120
+ - start: 150
+ end: 170
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ internalapi:
+ network: "172.17.0.0/24"
+ vlan: 20
+ mtu: 1496
+ tools:
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ internalapi2:
+ network: "172.17.10.0/24"
+ vlan: 30
+ mtu: 1496
+ tools:
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ storage:
+ network: "172.18.0.0/24"
+ vlan: 21
+ mtu: 1496
+ tools:
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ storage2:
+ network: "172.18.10.0/24"
+ vlan: 31
+ mtu: 1496
+ tools:
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ tenant:
+ network: "172.19.0.0/24"
+ tools:
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ vlan: 22
+ mtu: 1496
+ tenant2:
+ network: "172.19.10.0/24"
+ tools:
+ metallb:
+ ranges:
+ - start: 80
+ end: 90
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+ multus:
+ ranges:
+ - start: 30
+ end: 70
+ vlan: 32
+ mtu: 1496
+ external:
+ network: "10.0.0.0/24"
+ tools:
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+ vlan: 22
+ mtu: 1500
+ external2:
+ network: "10.10.0.0/24"
+ tools:
+ netconfig:
+ ranges:
+ - start: 100
+ end: 250
+ vlan: 32
+ mtu: 1500
+
+ group-templates:
+ ocps:
+ network-template:
+ range:
+ start: 10
+ length: 10
+ networks: &ocps_nets
+ ctlplane: {}
+ internalapi:
+ trunk-parent: ctlplane
+ tenant:
+ trunk-parent: ctlplane
+ storage:
+ trunk-parent: ctlplane
+ ctlplane2: {}
+ internalapi2:
+ trunk-parent: ctlplane2
+ tenant2:
+ trunk-parent: ctlplane2
+ storage2:
+ trunk-parent: ctlplane2
+ ocp_workers:
+ network-template:
+ range:
+ start: 20
+ length: 10
+ networks: *ocps_nets
+ computes:
+ network-template:
+ range:
+ start: 100
+ length: 21
+ networks:
+ ctlplane: {}
+ internalapi:
+ trunk-parent: ctlplane
+ tenant:
+ trunk-parent: ctlplane
+ storage:
+ trunk-parent: ctlplane
+ compute2s:
+ network-template:
+ range:
+ start: 200
+ length: 21
+ networks:
+ ctlplane2: {}
+ internalapi2:
+ trunk-parent: ctlplane2
+ tenant2:
+ trunk-parent: ctlplane2
+ storage2:
+ trunk-parent: ctlplane2
+ instances:
+ controller-0:
+ networks:
+ ctlplane:
+ ip: "192.168.122.9"
+ ctlplane2:
+ ip: "192.168.133.9"
+
+# Hooks
+post_deploy:
+ - name: Discover hypervisors for openstack2 namespace
+ type: playbook
+ source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/hooks/playbooks/nova_manage_discover_hosts.yml"
+ extra_vars:
+ namespace: openstack2
+ _cell_conductors: nova-cell0-conductor-0
+
+pre_admin_setup:
+ - name: Prepare OSP networks in openstack2 namespace
+ type: playbook
+ source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_osp_networks.yaml"
+ extra_vars:
+ cifmw_os_net_setup_namespace: openstack2
+ cifmw_os_net_setup_public_cidr: "192.168.133.0/24"
+ cifmw_os_net_setup_public_start: "192.168.133.230"
+ cifmw_os_net_setup_public_end: "192.168.133.250"
+ cifmw_os_net_setup_public_gateway: "192.168.133.1"
+
+post_tests:
+ - name: Run tempest against openstack2 namespace
+ type: playbook
+ source: "{{ ansible_user_dir }}/src/github.com/openstack-k8s-operators/ci-framework/playbooks/multi-namespace/ns2_validation.yaml"
+ extra_vars:
+ cifmw_test_operator_tempest_name: tempest-tests2
+ cifmw_test_operator_namespace: openstack2