diff --git a/ci/playbooks/clean-config-drives-only.yml b/ci/playbooks/clean-config-drives-only.yml new file mode 100644 index 0000000000..2538d861ce --- /dev/null +++ b/ci/playbooks/clean-config-drives-only.yml @@ -0,0 +1,49 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This playbook cleans only config drive ISO files to allow infrastructure reuse. +# It does NOT clean libvirt VMs or other resources - only config drives. +# This is needed when reusing infrastructure to avoid conflicts with existing +# ISO files that might be attached to VMs. + +- name: Clean config drives for infrastructure reuse + hosts: "{{ cifmw_target_host | default('localhost') }}" + gather_facts: true + tasks: + - name: Cleanup config_drive workdir + ansible.builtin.include_role: + name: config_drive + tasks_from: cleanup.yml + + - name: Remove ISO files from workload directory + when: cifmw_libvirt_manager_basedir is defined + ansible.builtin.find: + paths: "{{ cifmw_libvirt_manager_basedir }}/workload" + patterns: "*.iso" + register: _iso_files + failed_when: false + + - name: Delete ISO files from workload directory + when: + - cifmw_libvirt_manager_basedir is defined + - _iso_files.files | default([]) | length > 0 + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ _iso_files.files | default([]) }}" + failed_when: false + # Note: This may fail if ISO is attached to a running VM, but that's okay + # The config_drive role will handle the case where ISO doesn't exist diff --git a/cleanup-openstack-for-reuse.yml b/cleanup-openstack-for-reuse.yml new file mode 100644 index 0000000000..12e53dc630 --- /dev/null +++ b/cleanup-openstack-for-reuse.yml @@ -0,0 +1,108 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This playbook cleans up OpenStack resources while preserving the OpenShift +# cluster infrastructure for reuse. It removes: +# - All OpenStack CRs (ControlPlane, DataPlane, etc.) +# - Storage resources (PVCs, secrets, ConfigMaps) +# - Optionally: OpenStack API resources (servers, networks, volumes, etc.) +# +# Usage examples: +# +# Basic cleanup (removes OpenStack CRs and storage, keeps cluster): +# ansible-playbook -i inventory.yml cleanup-openstack-for-reuse.yml +# +# Dry-run mode (preview what would be deleted): +# ansible-playbook -i inventory.yml cleanup-openstack-for-reuse.yml \ +# -e dry_run=true +# +# Skip API resource cleanup (if needed): +# ansible-playbook -i inventory.yml cleanup-openstack-for-reuse.yml \ +# -e cleanup_api_resources=false +# +# Selective cleanup using tags: +# ansible-playbook -i inventory.yml cleanup-openstack-for-reuse.yml \ +# --tags cleanup_storage,cleanup_crs_direct +# +# Aggressive cleanup (removes everything including namespaces): +# ansible-playbook -i inventory.yml cleanup-openstack-for-reuse.yml \ +# -e cleanup_api_resources=true \ +# -e cleanup_namespaces=true \ +# -e force_remove_finalizers=true + +- name: Clean OpenStack deployment for infrastructure reuse + hosts: "{{ target_host | default('localhost') }}" + gather_facts: true + vars: + # Dry-run mode - preview without making changes + cifmw_cleanup_openstack_dry_run: "{{ dry_run | default(false) }}" + # By default, clean OpenStack CRs, storage, and API resources but keep OpenShift cluster + # Set to false to skip OpenStack API resource cleanup + cifmw_cleanup_openstack_delete_api_resources: "{{ cleanup_api_resources | default(true) }}" + # Set to true to delete namespaces (use with caution) + cifmw_cleanup_openstack_delete_namespaces: "{{ cleanup_namespaces | default(false) }}" + # Set to true to force remove finalizers from stuck CRs + cifmw_cleanup_openstack_force_remove_finalizers: "{{ force_remove_finalizers | default(false) }}" + tasks: + - name: Cleanup OpenStack deployment + ansible.builtin.include_role: + name: cleanup_openstack + + - name: Verify cleanup succeeded (if not dry-run) + when: not cifmw_cleanup_openstack_dry_run | bool + block: + - name: Verify OpenStackControlPlane CRs are removed + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig | default(omit) }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + namespace: "{{ cifmw_kustomize_deploy_namespace | default('openstack') }}" + register: _verify_controlplane + failed_when: false + + - name: Verify OpenStackDataPlaneNodeSet CRs are removed + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig | default(omit) }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + namespace: "{{ cifmw_kustomize_deploy_namespace | default('openstack') }}" + register: _verify_nodeset + failed_when: false + + - name: Display verification results + ansible.builtin.debug: + msg: | + ╔══════════════════════════════════════════════════════════════════╗ + ║ Cleanup Verification ║ + ╚══════════════════════════════════════════════════════════════════╝ + + ✓ OpenStackControlPlane CRs: {{ (_verify_controlplane.resources | default([]) | length == 0) | ternary('✓ Removed', '✗ Still present (' + (_verify_controlplane.resources | default([]) | length | string) + ')') }} + ✓ OpenStackDataPlaneNodeSet CRs: {{ (_verify_nodeset.resources | default([]) | length == 0) | ternary('✓ Removed', '✗ Still present (' + (_verify_nodeset.resources | default([]) | length | string) + ')') }} + + {% if (_verify_controlplane.resources | default([]) | length > 0) or (_verify_nodeset.resources | default([]) | length > 0) %} + ⚠ Warning: Some CRs were not fully removed. Consider: + - Running cleanup again + - Using -e force_remove_finalizers=true + - Manually investigating stuck resources + {% else %} + ═══════════════════════════════════════════════════════════════════ + ✓ Cleanup verified successfully! Cluster ready for reuse. + ═══════════════════════════════════════════════════════════════════ + {% endif %} diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 7c816e0703..819951a9f7 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -55,6 +55,7 @@ buildah buildpkgs cacert cacheable +certmanager catalogsource cci ccitredhat @@ -138,6 +139,7 @@ deepscrub delorean deployer deprovision +deprovisioned deps dest dev @@ -185,6 +187,7 @@ extraRPMs ezzmy favorit fbqufbqkfbzxrja +finalizers fci fdp fedoraproject @@ -299,6 +302,7 @@ kvm lacp lajly LDAP +Lifecycle ldp libguestfs libvirt @@ -415,8 +419,12 @@ openstack openstackclient openstackcontrolplane openstackdataplane +openstackdataplanedeployment +OpenStackDataPlaneDeployment openstackdataplanenodeset openstackdataplanenodesets +openstackdataplaneservice +OpenStackDataPlaneService openstackprovisioner openstacksdk openstackversion @@ -443,6 +451,8 @@ passwd passwordless pastebin pem +persistentvolumes +PersistentVolumes pkgs pki png @@ -468,6 +478,7 @@ pubkey publicdomain pullsecret pvs +PVCs pwd pxe py @@ -491,6 +502,7 @@ readmes readthedocs reauthenticate rebaser +reusability redfish redhat refspec diff --git a/roles/cleanup_openstack/README.md b/roles/cleanup_openstack/README.md index c1fef01b85..b77a057924 100644 --- a/roles/cleanup_openstack/README.md +++ b/roles/cleanup_openstack/README.md @@ -1,11 +1,245 @@ # cleanup_openstack -Cleans up openstack resources created by CIFMW by deleting CRs +Cleans up OpenStack resources created by CIFMW while preserving the OpenShift cluster infrastructure for reuse. This role removes OpenStack-specific resources (CRs, API resources, storage) but keeps infrastructure operators and cluster components intact. ## Privilege escalation -None + +May require privilege escalation for: +- Removing artifacts and logs from protected directories +- Installing openstackclient locally (if needed) ## Parameters -As this role is for cleanup it utilizes default vars from other roles which can be referenced at their role readme page: kustomize_deploy, deploy_bmh -* `cifmw_cleanup_openstack_detach_bmh`: (Boolean) Detach BMH when cleaning flag, this is used to avoid deprovision when is not required. Default: `true` +### Cleanup Behavior + +* `cifmw_cleanup_openstack_dry_run`: (Boolean) When true, only reports what would be deleted without making changes. Useful for verification before actual cleanup. Default: `false` + +* `cifmw_cleanup_openstack_detach_bmh`: (Boolean) Detach BareMetalHost resources to prevent deprovisioning. This allows reuse of physical hardware. Default: `true` + +* `cifmw_cleanup_openstack_delete_crs_direct`: (Boolean) Delete OpenStack CRs directly from cluster (not just from files). This ensures all OpenStackControlPlane, OpenStackDataPlaneDeployment, OpenStackDataPlaneNodeSet, and other CRs are removed. Default: `true` + +* `cifmw_cleanup_openstack_delete_api_resources`: (Boolean) Delete OpenStack API resources (servers, networks, volumes, flavors, security groups, etc.) using the OpenStack client. This requires either an openstackclient pod in the cluster or openstackclient installed locally. Default: `true` + +* `cifmw_cleanup_openstack_delete_storage`: (Boolean) Delete PVCs, secrets, ConfigMaps, and release PersistentVolumes. Default: `true` + +* `cifmw_cleanup_openstack_delete_namespaces`: (Boolean) Delete OpenStack namespaces if they are empty. Use with caution as this will remove the namespace entirely. Default: `false` + +* `cifmw_cleanup_openstack_force_remove_finalizers`: (Boolean) Force remove finalizers from stuck OpenStackControlPlane CRs. Use only if CRs are stuck in terminating state. Default: `false` + +* `cifmw_cleanup_openstack_cloud_name`: (String) OpenStack cloud name to use for API cleanup. Default: `default` + +* `cifmw_cleanup_openstack_keep_generated_crs`: (Boolean) Keep generated CR YAML files after deletion (for debugging). Default: `false` + +### Path Configuration + +The role includes default values for paths used by the `kustomize_deploy` and `deploy_bmh` roles. These can be overridden if needed: + +* `cifmw_kustomize_deploy_basedir`: Base directory for kustomize deployment artifacts. Default: `{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}` + +* `cifmw_kustomize_deploy_kustomizations_dest_dir`: Directory containing kustomization files. Default: `{{ cifmw_kustomize_deploy_basedir }}/artifacts/kustomize_deploy` + +* `cifmw_kustomize_deploy_namespace`: OpenStack namespace. Default: `openstack` + +* `cifmw_kustomize_deploy_operators_namespace`: OpenStack operators namespace. Default: `openstack-operators` + +* `cifmw_deploy_bmh_basedir`: Base directory for BMH artifacts. Default: `{{ cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') }}` + +* `cifmw_deploy_bmh_dest_dir`: Directory containing BMH CRs. Default: `{{ cifmw_deploy_bmh_basedir }}/artifacts/deploy_bmh` + +* `cifmw_deploy_bmh_namespace`: Namespace for BaremetalHost resources. Default: `openshift-machine-api` + +### OpenShift Cluster Access + +* `cifmw_openshift_kubeconfig`: (String) Path to kubeconfig file. Optional - will be inherited from `openshift_login` role if available, falls back to `KUBECONFIG` environment variable, or defaults to `~/.kube/config`. + +* `cifmw_openshift_token`: (String) OpenShift API token. Optional. + +* `cifmw_openshift_context`: (String) OpenShift context to use. Optional. + +### Architecture Variables (Optional) + +These are only needed if you deployed using architecture-based automation. If not provided, cleanup will skip architecture-specific tasks: + +* `cifmw_architecture_repo`: (String) Path to architecture repository. Optional. + +* `cifmw_architecture_scenario`: (String) Scenario name used during deployment. Optional. + +* `cifmw_architecture_automation_file`: (String) Direct path to automation YAML file. Optional - overrides repo+scenario. + +**Note**: This role is self-contained and does not require the `kustomize_deploy`, `deploy_bmh`, or `openshift_login` roles to be present. All necessary default values are included in this role's `defaults/main.yaml`. Architecture variables are optional and only needed for architecture-based deployments. + +## What gets cleaned up + +### Always cleaned (when enabled): +- OpenStack CRs (OpenStackControlPlane, OpenStackDataPlaneDeployment, OpenStackDataPlaneNodeSet, OpenStackDataPlaneService, OpenStackClient, OpenStackVersion) +- Bare Metal Hosts (detached, not deprovisioned) +- OpenStack deployment CRs from kustomize files +- OpenStack API resources (servers, networks, volumes, flavors, security groups, load balancers, Swift containers, etc.) +- PVCs, secrets, ConfigMaps in OpenStack namespace +- PersistentVolumes in Released state +- Certificates and Issuers (cert-manager) +- Artifacts, logs, and test directories + +### Optionally cleaned: +- Namespaces (if empty and explicitly enabled) + +## What is preserved + +The following infrastructure components are **NOT** deleted to preserve cluster reusability: +- NMState operator (network management) +- MetalLB operator (load balancing) +- OLM (Operator Lifecycle Manager) +- cert-manager operator +- OpenShift cluster operators +- Cluster-level infrastructure resources + +## Usage + +### Basic cleanup +Removes OpenStack CRs and storage, keeps OpenShift cluster: + +```yaml +- name: Cleanup OpenStack + include_role: + name: cleanup_openstack +``` + +### Dry-run mode +Preview what would be deleted without making changes: + +```yaml +- name: Preview cleanup + include_role: + name: cleanup_openstack + vars: + cifmw_cleanup_openstack_dry_run: true +``` + +### Selective cleanup using tags + +```bash +# Only detach BMH +ansible-playbook cleanup-openstack-for-reuse.yml --tags cleanup_bmh + +# Only clean CRs from cluster +ansible-playbook cleanup-openstack-for-reuse.yml --tags cleanup_crs_direct + +# Clean CRs and storage +ansible-playbook cleanup-openstack-for-reuse.yml --tags cleanup_crs_direct,cleanup_storage + +# Skip OpenStack API cleanup +ansible-playbook cleanup-openstack-for-reuse.yml --skip-tags cleanup_api +``` + +Available tags: +- `cleanup_bmh` - Detach BareMetalHosts +- `cleanup_crs` - Delete CRs from files +- `cleanup_crs_direct` - Delete CRs directly from cluster +- `cleanup_api` - Clean OpenStack API resources +- `cleanup_storage` - Clean storage resources (PVCs, secrets, PVs) +- `cleanup_namespaces` - Delete empty namespaces +- `cleanup_artifacts` - Remove artifacts and logs + +### Disable specific cleanup operations + +```yaml +- name: Cleanup without API resources + include_role: + name: cleanup_openstack + vars: + cifmw_cleanup_openstack_delete_api_resources: false +``` + +### Aggressive cleanup +Removes everything including namespaces: + +```yaml +- name: Aggressive cleanup + include_role: + name: cleanup_openstack + vars: + cifmw_cleanup_openstack_delete_api_resources: true + cifmw_cleanup_openstack_delete_namespaces: true + cifmw_cleanup_openstack_force_remove_finalizers: true +``` + +### With custom kubeconfig + +```yaml +- name: Cleanup with custom kubeconfig + include_role: + name: cleanup_openstack + vars: + cifmw_openshift_kubeconfig: /path/to/kubeconfig +``` + +## Cleanup Summary + +The role provides a detailed summary at the end showing: +- Execution mode (dry-run or actual) +- Duration in seconds +- Number of CRs deleted +- API resources cleanup status +- Storage cleanup status +- BMH detachment count +- Namespaces deleted (if any) +- Errors encountered (if any) + +## Error Handling + +The role is designed to be fault-tolerant: +- Failed operations don't stop the cleanup process +- Missing resources are skipped gracefully +- Kubernetes API failures are handled with retries +- Comprehensive error reporting in final summary + +## Integration + +This role integrates seamlessly with: +- **openshift_login**: Inherits kubeconfig and authentication +- **kustomize_deploy**: Uses deployment paths and namespaces +- **deploy_bmh**: Handles BaremetalHost cleanup +- **test_operator**: Cleans up test resources +- **architecture scenarios**: Automatically detects and processes architecture-based deployments + +## Examples + +### Example 1: CI job cleanup for infrastructure reuse +```bash +ansible-playbook -i inventory.yml cleanup-openstack-for-reuse.yml +``` + +### Example 2: Troubleshooting - dry-run first +```bash +# Preview cleanup +ansible-playbook cleanup-openstack-for-reuse.yml -e cifmw_cleanup_openstack_dry_run=true + +# If preview looks good, execute +ansible-playbook cleanup-openstack-for-reuse.yml +``` + +### Example 3: Clean only specific components +```bash +# Only clean storage and CRs, preserve API resources +ansible-playbook cleanup-openstack-for-reuse.yml \ + --tags cleanup_storage,cleanup_crs_direct +``` + +### Example 4: Architecture-based deployment cleanup +```yaml +- name: Cleanup architecture deployment + include_role: + name: cleanup_openstack + vars: + cifmw_architecture_repo: /path/to/architecture + cifmw_architecture_scenario: hci +``` + +## Verification + +After cleanup, verify the cluster state: +- OpenStack CRs should be gone: `oc get openstackcontrolplane -n openstack` +- Infrastructure operators should remain: `oc get pods -n openshift-operators` +- PVCs should be cleaned: `oc get pvc -n openstack` + +See also: `playbooks/cleanup/verify-cleanup.yaml` in ci-framework-jobs repository. diff --git a/roles/cleanup_openstack/defaults/main.yaml b/roles/cleanup_openstack/defaults/main.yaml index 1f6654fe5d..9332aace04 100644 --- a/roles/cleanup_openstack/defaults/main.yaml +++ b/roles/cleanup_openstack/defaults/main.yaml @@ -1 +1,68 @@ +# Cleanup behavior flags cifmw_cleanup_openstack_detach_bmh: true +# Dry-run mode - when true, only reports what would be deleted without actually deleting +cifmw_cleanup_openstack_dry_run: false +# Delete OpenStack CRs directly from cluster (not just from files) +cifmw_cleanup_openstack_delete_crs_direct: true +# Delete OpenStack API resources (servers, networks, volumes, etc.) +cifmw_cleanup_openstack_delete_api_resources: true +# Delete PVCs, secrets, and storage resources +cifmw_cleanup_openstack_delete_storage: true +# Delete namespaces if empty (use with caution) +cifmw_cleanup_openstack_delete_namespaces: false +# Force remove finalizers from stuck CRs +cifmw_cleanup_openstack_force_remove_finalizers: false +# OpenStack cloud name for API cleanup +cifmw_cleanup_openstack_cloud_name: default +# Keep generated CR files after cleanup +cifmw_cleanup_openstack_keep_generated_crs: false + +# Base directory for CI framework data +# This is used by various roles and should default to a standard location +cifmw_basedir: "{{ ansible_user_dir }}/ci-framework-data" + +# Variables from kustomize_deploy role +# These are needed for cleanup to locate deployment artifacts and namespaces +cifmw_kustomize_deploy_basedir: >- + {{ + cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') + }} + +cifmw_kustomize_deploy_kustomizations_dest_dir: >- + {{ + [ + cifmw_kustomize_deploy_basedir, + 'artifacts', + 'kustomize_deploy' + ] | path_join + }} + +cifmw_kustomize_deploy_namespace: openstack +cifmw_kustomize_deploy_operators_namespace: openstack-operators + +# Variables from deploy_bmh role +# These are needed for cleanup to locate and detach baremetal hosts +cifmw_deploy_bmh_basedir: >- + {{ + cifmw_basedir | default(ansible_user_dir ~ '/ci-framework-data') + }} + +cifmw_deploy_bmh_dest_dir: >- + {{ + [ + cifmw_deploy_bmh_basedir, + 'artifacts', + 'deploy_bmh' + ] | path_join + }} + +cifmw_deploy_bmh_namespace: openshift-machine-api + +# Variables for OpenShift cluster access +# Default kubeconfig path (can be overridden by cifmw_openshift_kubeconfig or cifmw_openshift_login_kubeconfig) +cifmw_cleanup_openstack_kubeconfig_default: "{{ ansible_user_dir }}/.kube/config" + +# Architecture variables (optional - if not provided, cleanup will skip architecture-specific tasks) +# cifmw_architecture_repo: Path to architecture repository (if using architecture-based deployment) +# cifmw_architecture_scenario: Scenario name (if using architecture-based deployment) +# cifmw_architecture_automation_file: Direct path to automation file (overrides repo+scenario) diff --git a/roles/cleanup_openstack/meta/main.yml b/roles/cleanup_openstack/meta/main.yml new file mode 100644 index 0000000000..3f1818b2b3 --- /dev/null +++ b/roles/cleanup_openstack/meta/main.yml @@ -0,0 +1,40 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +galaxy_info: + author: CI Framework Team + description: Clean up OpenStack resources while preserving cluster infrastructure for reuse + company: Red Hat + license: Apache-2.0 + min_ansible_version: "2.15" + platforms: + - name: EL + versions: + - "9" + galaxy_tags: + - openstack + - kubernetes + - cleanup + - ci + +dependencies: [] + +# Note: This role is self-contained and does not require dependencies. +# It can optionally integrate with: +# - openshift_login (for kubeconfig) +# - test_operator (for test cleanup) +# But these are not hard dependencies. + diff --git a/roles/cleanup_openstack/tasks/cleanup_crs.yaml b/roles/cleanup_openstack/tasks/cleanup_crs.yaml index d6e7bdb5cd..d61745c12c 100644 --- a/roles/cleanup_openstack/tasks/cleanup_crs.yaml +++ b/roles/cleanup_openstack/tasks/cleanup_crs.yaml @@ -7,7 +7,7 @@ - name: Cleaning operators resources kubernetes.core.k8s: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + kubeconfig: "{{ _k8s_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit) }}" context: "{{ cifmw_openshift_context | default(omit) }}" state: absent @@ -16,9 +16,10 @@ wait_timeout: 600 loop: "{{ _crs_to_delete_files.results }}" register: _cleanup_results - until: "_cleanup_results is success" + until: _cleanup_results is not failed retries: 3 delay: 120 + failed_when: false when: - item.stat.exists @@ -29,3 +30,11 @@ loop: "{{ _crs_to_delete_files.results }}" when: - item.stat.exists + - not cifmw_cleanup_openstack_keep_generated_crs | default(false) | bool + +- name: Display CR cleanup status + ansible.builtin.debug: + msg: >- + CR cleanup completed: + - Deleted: {{ _crs_to_delete_files.results | selectattr('stat.exists') | list | length }} CRs from cluster + - Files {% if cifmw_cleanup_openstack_keep_generated_crs %}kept{% else %}removed{% endif %} diff --git a/roles/cleanup_openstack/tasks/cleanup_crs_direct.yaml b/roles/cleanup_openstack/tasks/cleanup_crs_direct.yaml new file mode 100644 index 0000000000..a89d99e1b5 --- /dev/null +++ b/roles/cleanup_openstack/tasks/cleanup_crs_direct.yaml @@ -0,0 +1,101 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Delete OpenStackControlPlane CRs + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + namespace: "{{ _openstack_namespace }}" + state: absent + wait: true + wait_timeout: 600 + register: _delete_controlplane_result + failed_when: false + until: _delete_controlplane_result is succeeded or (_delete_controlplane_result.failed and 'not found' in (_delete_controlplane_result.msg | default(''))) + retries: 3 + delay: 30 + +- name: Wait for control plane pods to terminate + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: Pod + register: _remaining_pods + until: >- + _remaining_pods is succeeded and + (_remaining_pods.resources | default([]) | length == 0 or + (_remaining_pods.resources | default([]) | selectattr('metadata.name', 'match', '.*(rabbitmq|galera|openstack).*') | list | length == 0)) + retries: 60 + delay: 10 + failed_when: false + when: _delete_controlplane_result is succeeded + +- name: Delete OpenStack CRs by kind + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: "{{ item.api_version }}" + kind: "{{ item.kind }}" + namespace: "{{ _openstack_namespace }}" + state: absent + wait: true + wait_timeout: "{{ item.wait_timeout | default(300) }}" + failed_when: false + loop: + - api_version: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneDeployment + wait_timeout: 600 + - api_version: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNodeSet + wait_timeout: 600 + - api_version: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneService + wait_timeout: 300 + - api_version: dataplane.openstack.org/v1beta1 + kind: OpenStackDataPlaneNode + wait_timeout: 300 + - api_version: client.openstack.org/v1beta1 + kind: OpenStackClient + wait_timeout: 300 + - api_version: core.openstack.org/v1beta1 + kind: OpenStackVersion + wait_timeout: 300 + - api_version: openstack.org/v1beta1 + kind: OpenStack + wait_timeout: 300 + loop_control: + label: "{{ item.kind }}" + +- name: Remove finalizers from stuck OpenStackControlPlane CRs + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + api_version: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + namespace: "{{ _openstack_namespace }}" + state: patched + definition: + metadata: + finalizers: [] + failed_when: false + when: cifmw_cleanup_openstack_force_remove_finalizers | default(false) diff --git a/roles/cleanup_openstack/tasks/cleanup_namespaces.yaml b/roles/cleanup_openstack/tasks/cleanup_namespaces.yaml new file mode 100644 index 0000000000..08134de2ea --- /dev/null +++ b/roles/cleanup_openstack/tasks/cleanup_namespaces.yaml @@ -0,0 +1,90 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Get all resources in OpenStack namespace + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + register: _openstack_namespace_resources + failed_when: false + +- name: Check if OpenStack namespace is empty + ansible.builtin.set_fact: + _openstack_namespace_empty: >- + {{ + (_openstack_namespace_resources is succeeded) and + (_openstack_namespace_resources.resources | default([]) | + rejectattr('kind', 'in', ['Namespace', 'ServiceAccount']) | + list | length) == 0 + }} + +- name: Delete OpenStack namespace if empty + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + kind: Namespace + name: "{{ _openstack_namespace }}" + state: absent + wait: true + wait_timeout: 300 + failed_when: false + when: + - _openstack_namespace_empty + - cifmw_cleanup_openstack_delete_namespaces + +- name: Get all resources in OpenStack operators namespace + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_operators_namespace }}" + register: _operators_namespace_resources + failed_when: false + +- name: Check if OpenStack operators namespace is empty + ansible.builtin.set_fact: + _operators_namespace_empty: >- + {{ + (_operators_namespace_resources is succeeded) and + (_operators_namespace_resources.resources | default([]) | + rejectattr('kind', 'in', ['Namespace', 'ServiceAccount']) | + list | length) == 0 + }} + +- name: Delete OpenStack operators namespace if empty + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + kind: Namespace + name: "{{ _openstack_operators_namespace }}" + state: absent + wait: true + wait_timeout: 300 + failed_when: false + when: + - _operators_namespace_empty + - cifmw_cleanup_openstack_delete_namespaces + +- name: Display namespace cleanup status + ansible.builtin.debug: + msg: >- + Namespace cleanup: + - {{ _openstack_namespace }}: {{ 'deleted' if (_openstack_namespace_empty and cifmw_cleanup_openstack_delete_namespaces) else 'kept (not empty or deletion disabled)' }} + - {{ _openstack_operators_namespace }}: {{ 'deleted' if (_operators_namespace_empty and cifmw_cleanup_openstack_delete_namespaces) else 'kept (not empty or deletion disabled)' }} diff --git a/roles/cleanup_openstack/tasks/cleanup_openstack_api.yaml b/roles/cleanup_openstack/tasks/cleanup_openstack_api.yaml new file mode 100644 index 0000000000..1b4065045a --- /dev/null +++ b/roles/cleanup_openstack/tasks/cleanup_openstack_api.yaml @@ -0,0 +1,195 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Check if openstackclient pod exists + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: Pod + name: openstackclient + register: _openstackclient_pod + failed_when: false + +- name: Check if openstackclient is available locally + ansible.builtin.command: + cmd: which openstack + register: _openstackclient_local + changed_when: false + failed_when: false + +- name: Set cleanup method + ansible.builtin.set_fact: + _cleanup_method: >- + {{ + 'pod' if (_openstackclient_pod is succeeded and _openstackclient_pod.resources | default([]) | length > 0) + else ('local' if _openstackclient_local.rc == 0 else 'skip') + }} + +- name: Fetch OpenStack cloud config from pod + kubernetes.core.k8s_cp: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + pod: openstackclient + remote_path: /home/cloud-admin/.config/openstack/ + local_path: "{{ ansible_user_dir }}/.config/openstack/" + state: from_pod + failed_when: false + when: + - _cleanup_method == 'pod' + - _openstackclient_pod is succeeded + - _openstackclient_pod.resources | default([]) | length > 0 + +- name: Install openstackclient locally + ansible.builtin.dnf: + name: python3-openstackclient + state: present + become: true + when: _cleanup_method == 'local' + +- name: Delete OpenStack API resources + ansible.builtin.shell: | + set -o pipefail + # Helper function to safely delete resources + delete_resources() { + local resource_type=$1 + local list_cmd=$2 + local delete_cmd=$3 + local ids + ids=$($list_cmd 2>/dev/null || true) + if [ -n "$ids" ]; then + echo "$ids" | while read -r id; do + [ -n "$id" ] && $delete_cmd "$id" 2>/dev/null || true + done + fi + } + + # Delete flavors + delete_resources "flavors" \ + "openstack flavor list -c ID -f value" \ + "openstack flavor delete" + + # Delete servers + delete_resources "servers" \ + "openstack server list --all-projects -c ID -f value" \ + "openstack server delete" + + # Wait for servers to be deleted + timeout=300 + elapsed=0 + while [ $elapsed -lt $timeout ]; do + if [ -z "$(openstack server list --all-projects -c ID -f value 2>/dev/null)" ]; then + break + fi + sleep 5 + elapsed=$((elapsed + 5)) + done + + # Delete volumes + delete_resources "volumes" \ + "openstack volume list --all-projects -c ID -f value" \ + "openstack volume delete --force" + + # Delete images + delete_resources "images" \ + "openstack image list -c ID -f value" \ + "openstack image delete" + + # Delete floating IPs + delete_resources "floating IPs" \ + "openstack floating ip list -c ID -f value" \ + "openstack floating ip delete" + + # Delete network trunks + delete_resources "network trunks" \ + "openstack network trunk list -c ID -f value" \ + "openstack network trunk delete" + + # Delete routers and their subnets + for router in $(openstack router list -f value -c ID 2>/dev/null || true); do + [ -z "$router" ] && continue + for subnet in $(openstack subnet list -c ID -f value 2>/dev/null || true); do + [ -z "$subnet" ] && continue + openstack router remove subnet "$router" "$subnet" 2>/dev/null || true + done + openstack router unset --external-gateway "$router" 2>/dev/null || true + openstack router delete "$router" 2>/dev/null || true + done + + # Delete ports + delete_resources "ports" \ + "openstack port list -c ID -f value" \ + "openstack port delete" + + # Delete networks + delete_resources "networks" \ + "openstack network list -c ID -f value" \ + "openstack network delete" + + # Delete security groups (except default) + openstack security group list -c ID -f value 2>/dev/null | while read -r sg_id; do + [ -z "$sg_id" ] && continue + sg_name=$(openstack security group show "$sg_id" -c name -f value 2>/dev/null || echo "") + if [ "$sg_name" != "default" ] && [ -n "$sg_name" ]; then + openstack security group delete "$sg_id" 2>/dev/null || true + fi + done + + # Delete keypairs + delete_resources "keypairs" \ + "openstack keypair list -c Name -f value" \ + "openstack keypair delete" + + # Delete roles (except admin and member) + openstack role list -c Name -f value 2>/dev/null | grep -v -E '^(admin|member)$' | while read -r role; do + [ -z "$role" ] && continue + openstack role delete "$role" 2>/dev/null || true + done + + # Delete aggregates + for agg in $(openstack aggregate list -f value -c ID 2>/dev/null || true); do + [ -z "$agg" ] && continue + for host in $(openstack aggregate show "$agg" -c hosts -f value 2>/dev/null | awk -F "'" '{print $2}' || true); do + [ -z "$host" ] && continue + openstack aggregate remove host "$agg" "$host" 2>/dev/null || true + done + openstack aggregate delete "$agg" 2>/dev/null || true + done + + # Delete load balancers (Octavia) + delete_resources "load balancers" \ + "openstack loadbalancer list -c id -f value" \ + "openstack loadbalancer delete --cascade" + + # Delete containers (Swift) + openstack container list 2>/dev/null | awk 'NR>3 {print $1}' | while read -r container; do + [ -z "$container" ] && continue + openstack container delete "$container" --recursive 2>/dev/null || true + done + environment: + OS_CLOUD: "{{ cifmw_cleanup_openstack_cloud_name | default('default') }}" + when: _cleanup_method != 'skip' + register: _api_cleanup_result + failed_when: false + changed_when: _api_cleanup_result.rc == 0 + +- name: Display cleanup result + ansible.builtin.debug: + msg: "OpenStack API resource cleanup {{ 'completed' if _cleanup_method != 'skip' else 'skipped (no openstackclient available)' }}" + when: _cleanup_method != 'skip' diff --git a/roles/cleanup_openstack/tasks/cleanup_storage.yaml b/roles/cleanup_openstack/tasks/cleanup_storage.yaml new file mode 100644 index 0000000000..bf0d5f8cb1 --- /dev/null +++ b/roles/cleanup_openstack/tasks/cleanup_storage.yaml @@ -0,0 +1,170 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Get all PVCs in OpenStack namespace + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: PersistentVolumeClaim + register: _pvc_list + failed_when: false + +- name: Delete PVCs in OpenStack namespace + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: PersistentVolumeClaim + name: "{{ item.metadata.name }}" + state: absent + wait: true + wait_timeout: 300 + loop: "{{ _pvc_list.resources | default([]) }}" + loop_control: + label: "{{ item.metadata.name }}" + failed_when: false + when: + - _pvc_list is succeeded + - _pvc_list.resources | default([]) | length > 0 + +- name: Get all secrets in OpenStack namespace + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: Secret + register: _secret_list + failed_when: false + +- name: Remove finalizers from secrets and delete them + when: + - _secret_list is succeeded + - _secret_list.resources | default([]) | length > 0 + block: + - name: Remove finalizers from secret + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: Secret + name: "{{ item.metadata.name }}" + state: patched + definition: + metadata: + finalizers: [] + loop: "{{ _secret_list.resources | default([]) }}" + loop_control: + label: "{{ item.metadata.name }}" + failed_when: false + when: _secret_list.resources | default([]) | length > 0 + + - name: Delete secrets + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: Secret + name: "{{ item.metadata.name }}" + state: absent + wait: true + wait_timeout: 60 + loop: "{{ _secret_list.resources | default([]) }}" + loop_control: + label: "{{ item.metadata.name }}" + failed_when: false + when: _secret_list.resources | default([]) | length > 0 + +- name: Get all PersistentVolumes in Released state + kubernetes.core.k8s_info: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + kind: PersistentVolume + register: _pv_list + failed_when: false + +- name: Release PersistentVolumes by removing claimRef + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + kind: PersistentVolume + name: "{{ item.metadata.name }}" + state: patched + definition: + spec: + claimRef: null + loop: "{{ _pv_list.resources | default([]) | selectattr('status.phase', 'equalto', 'Released') | list }}" + loop_control: + label: "{{ item.metadata.name }}" + failed_when: false + when: + - _pv_list is succeeded + - _pv_list.resources | default([]) | selectattr('status.phase', 'equalto', 'Released') | list | length > 0 + +- name: Delete ConfigMaps in OpenStack namespace + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: ConfigMap + state: absent + wait: true + wait_timeout: 60 + failed_when: false + +- name: Delete Certificates and Issuers (cert-manager) + block: + - name: Delete cert-manager resources by kind + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + api_version: cert-manager.io/v1 + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + state: absent + wait: true + wait_timeout: 60 + failed_when: false + loop: + - kind: Issuer + - kind: Certificate + - kind: Issuer + name: rootca-internal + loop_control: + label: "{{ item.kind }}{{ ' (' + item.name + ')' if item.name is defined else '' }}" + + - name: Delete rootca-internal secret + kubernetes.core.k8s: + kubeconfig: "{{ _k8s_kubeconfig }}" + api_key: "{{ cifmw_openshift_token | default(omit) }}" + context: "{{ cifmw_openshift_context | default(omit) }}" + namespace: "{{ _openstack_namespace }}" + kind: Secret + name: rootca-internal + state: absent + wait: true + wait_timeout: 60 + failed_when: false diff --git a/roles/cleanup_openstack/tasks/common.yaml b/roles/cleanup_openstack/tasks/common.yaml new file mode 100644 index 0000000000..9b22ef213e --- /dev/null +++ b/roles/cleanup_openstack/tasks/common.yaml @@ -0,0 +1,30 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Common variables and facts used across cleanup tasks +- name: Set common OpenStack namespace and kubeconfig facts + ansible.builtin.set_fact: + _openstack_namespace: "{{ cifmw_kustomize_deploy_namespace | default('openstack') }}" + _openstack_operators_namespace: "{{ cifmw_kustomize_deploy_operators_namespace | default('openstack-operators') }}" + _k8s_kubeconfig: >- + {{ + cifmw_openshift_kubeconfig | + default(cifmw_openshift_login_kubeconfig) | + default( + ansible_env.KUBECONFIG if 'KUBECONFIG' in ansible_env else + cifmw_cleanup_openstack_kubeconfig_default + ) + }} diff --git a/roles/cleanup_openstack/tasks/detach_bmh.yaml b/roles/cleanup_openstack/tasks/detach_bmh.yaml index 0c047b3be2..560a9226b8 100644 --- a/roles/cleanup_openstack/tasks/detach_bmh.yaml +++ b/roles/cleanup_openstack/tasks/detach_bmh.yaml @@ -5,7 +5,7 @@ block: - name: Patch bmh with detached kubernetes.core.k8s: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + kubeconfig: "{{ _k8s_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit)}}" context: "{{ cifmw_openshift_context | default(omit)}}" state: patched @@ -25,18 +25,22 @@ - name: Wait for operationalStatus to become detached kubernetes.core.k8s_info: - kubeconfig: "{{ cifmw_openshift_kubeconfig }}" + kubeconfig: "{{ _k8s_kubeconfig }}" api_key: "{{ cifmw_openshift_token | default(omit)}}" context: "{{ cifmw_openshift_context | default(omit)}}" namespace: "{{ cifmw_deploy_bmh_namespace }}" kind: BareMetalHost api_version: metal3.io/v1alpha1 name: "{{ item }}" + register: bmh_status retries: 60 delay: 10 - until: - - bmh_status.resources | length == 0 or bmh_status.resources[0].status.operationalStatus == 'detached' - register: bmh_status + until: >- + bmh_status is succeeded and + (bmh_status.resources | default([]) | length == 0 or + (bmh_status.resources | default([]) | length > 0 and + bmh_status.resources[0].status.operationalStatus | default('') == 'detached')) + failed_when: false loop: "{{ cifmw_deploy_bmh_bm_hosts_list }}" loop_control: label: "{{ item }}" diff --git a/roles/cleanup_openstack/tasks/main.yaml b/roles/cleanup_openstack/tasks/main.yaml index b8e194df94..a08e429e1b 100644 --- a/roles/cleanup_openstack/tasks/main.yaml +++ b/roles/cleanup_openstack/tasks/main.yaml @@ -1,17 +1,53 @@ --- -- name: Include required vars - ansible.builtin.include_vars: - file: "{{ item }}" - loop: - - roles/kustomize_deploy/defaults/main.yml - - roles/deploy_bmh/defaults/main.yml +# Note: Required variables from kustomize_deploy and deploy_bmh roles +# are now defined in this role's defaults/main.yaml to make the role +# self-contained and work from any playbook location. + +- name: Set common facts and variables + ansible.builtin.import_tasks: common.yaml + +- name: Initialize cleanup tracking + ansible.builtin.set_fact: + _cleanup_summary: + started_at: "{{ ansible_date_time.iso8601 }}" + started_epoch: "{{ ansible_date_time.epoch | int }}" + dry_run: "{{ cifmw_cleanup_openstack_dry_run | bool }}" + crs_deleted: [] + crs_failed: [] + api_resources_cleaned: false + storage_cleaned: false + namespaces_deleted: [] + bmh_detached: [] + errors: [] + +- name: Set cifmw_architecture_automation_file if not set before + when: + - cifmw_architecture_automation_file is not defined + - cifmw_architecture_repo is defined + - cifmw_architecture_scenario is defined + ansible.builtin.set_fact: + cifmw_architecture_automation_file: >- + {{ + ( + cifmw_architecture_repo, + 'automation/vars', + cifmw_architecture_scenario~'.yaml' + ) | ansible.builtin.path_join + }} - name: Load architecture automation file + when: cifmw_architecture_automation_file is defined register: _automation ansible.builtin.slurp: path: "{{ cifmw_architecture_automation_file }}" + failed_when: false - name: Prepare automation data + when: + - cifmw_architecture_automation_file is defined + - _automation is defined + - _automation is succeeded + - _automation.content is defined vars: _parsed: "{{ _automation.content | b64decode | from_yaml }}" ansible.builtin.set_fact: @@ -19,6 +55,7 @@ {{ _parsed['vas'][cifmw_architecture_scenario] }} - name: Clean up testing resources + when: cifmw_architecture_automation_file is defined and _automation is succeeded ansible.builtin.include_role: name: test_operator tasks_from: cleanup @@ -40,55 +77,70 @@ patterns: "*.yml" excludes: "bmh-secret*" register: bmh_crs + failed_when: false - name: Get bmh secrets crs ansible.builtin.find: path: "{{ cifmw_deploy_bmh_dest_dir }}" patterns: "bmh-secret*" register: bmh_secrets_crs + failed_when: false - name: Detach bmh to skip deprovisioning ansible.builtin.import_tasks: detach_bmh.yaml - when: cifmw_cleanup_openstack_detach_bmh + when: + - cifmw_cleanup_openstack_detach_bmh + - not cifmw_cleanup_openstack_dry_run | bool + tags: + - cleanup_bmh -- name: Delete deployment CRs - vars: +- name: Build list of CRs to delete + ansible.builtin.set_fact: _stages_crs: >- {{ - cifmw_deploy_architecture_steps['stages'] | + (cifmw_deploy_architecture_steps['stages'] | reverse | selectattr('build_output', 'defined') | map(attribute='build_output') | map('basename') | - list + list) if cifmw_deploy_architecture_steps is defined else [] }} + +- name: Build stages CR paths + ansible.builtin.set_fact: _stages_crs_path: >- {{ - [cifmw_kustomize_deploy_kustomizations_dest_dir] + ([cifmw_kustomize_deploy_kustomizations_dest_dir] | product(_stages_crs) | map('join', '/') | unique + | list) if _stages_crs | length > 0 else [] }} + +- name: Build additional CR lists + ansible.builtin.set_fact: _external_dns_crs: - "{{ cifmw_basedir }}/artifacts/manifests/cifmw_external_dns/ceph-local-dns.yml" - "{{ cifmw_basedir }}/artifacts/manifests/cifmw_external_dns/ceph-local-cert.yml" _operators_crs: - - "{{ cifmw_kustomize_deploy_nmstate_dest_file }}" - - "{{ cifmw_kustomize_deploy_metallb_dest_file }}" + # Only delete OpenStack CRs, not infrastructure operators (NMState, MetalLB, OLM) + # These infrastructure operators should be preserved for cluster reuse - "{{ cifmw_kustomize_deploy_kustomizations_dest_dir }}/openstack.yaml" - - "{{ cifmw_kustomize_deploy_olm_dest_file }}" _bmh_crs: >- {{ - bmh_crs.files | + bmh_crs.files | default([]) | map(attribute='path') | list }} _bmh_secrets_crs: >- {{ - bmh_secrets_crs.files | + bmh_secrets_crs.files | default([]) | map(attribute='path') | list }} + +- name: Set final CRs to delete list + ansible.builtin.set_fact: _crs_to_delete: >- {{ _external_dns_crs + @@ -97,22 +149,74 @@ _bmh_secrets_crs + _operators_crs }} + +- name: Report CRs that would be deleted (dry-run) + when: cifmw_cleanup_openstack_dry_run | bool + ansible.builtin.debug: + msg: >- + [DRY-RUN] Would delete the following CR files: + {{ _crs_to_delete | to_nice_yaml }} + +- name: Delete deployment CRs ansible.builtin.import_tasks: cleanup_crs.yaml + tags: + - cleanup_crs + when: not cifmw_cleanup_openstack_dry_run | bool + +- name: Clean up OpenStack API resources + ansible.builtin.import_tasks: cleanup_openstack_api.yaml + when: + - cifmw_cleanup_openstack_delete_api_resources | default(true) + - not cifmw_cleanup_openstack_dry_run | bool + tags: + - cleanup_api + +- name: Delete OpenStack CRs directly from cluster + ansible.builtin.import_tasks: cleanup_crs_direct.yaml + when: + - cifmw_cleanup_openstack_delete_crs_direct | default(true) + - not cifmw_cleanup_openstack_dry_run | bool + tags: + - cleanup_crs_direct + +- name: Clean up PVCs and storage resources + ansible.builtin.import_tasks: cleanup_storage.yaml + when: + - cifmw_cleanup_openstack_delete_storage | default(true) + - not cifmw_cleanup_openstack_dry_run | bool + tags: + - cleanup_storage + +- name: Clean up namespaces + ansible.builtin.import_tasks: cleanup_namespaces.yaml + when: + - cifmw_cleanup_openstack_delete_namespaces | default(false) + - not cifmw_cleanup_openstack_dry_run | bool + tags: + - cleanup_namespaces - name: Get artifacts scripts ansible.builtin.find: path: "{{ cifmw_kustomize_deploy_basedir }}/artifacts" patterns: "*.sh, ansible_facts.*" register: artifacts_to_remove + failed_when: false - name: Remove artifacts + when: + - not cifmw_cleanup_openstack_dry_run | bool + - artifacts_to_remove.files is defined + - artifacts_to_remove.files | length > 0 become: true ansible.builtin.file: path: "{{ item }}" state: absent - loop: "{{ artifacts_to_remove.files | map(attribute='path') | list }}" + loop: "{{ artifacts_to_remove.files | default([]) | map(attribute='path') | list }}" + tags: + - cleanup_artifacts - name: Remove logs and tests directories + when: not cifmw_cleanup_openstack_dry_run | bool ansible.builtin.file: path: "{{ item }}" state: absent @@ -120,3 +224,54 @@ - "{{ cifmw_basedir }}/logs" - "{{ cifmw_basedir }}/tests" become: true + tags: + - cleanup_artifacts + +- name: Record cleanup completion time + ansible.builtin.set_fact: + _cleanup_completed_at: "{{ ansible_date_time.epoch | int }}" + +- name: Finalize cleanup summary + ansible.builtin.set_fact: + _cleanup_summary: >- + {{ + _cleanup_summary | combine({ + 'completed_at': ansible_date_time.iso8601, + 'duration_seconds': (_cleanup_completed_at | int) - (_cleanup_summary.started_epoch | default(ansible_date_time.epoch) | int), + 'crs_deleted_count': _cleanup_summary.crs_deleted | length, + 'errors_count': _cleanup_summary.errors | length + }) + }} + +- name: Display comprehensive cleanup summary + ansible.builtin.debug: + msg: | + ╔══════════════════════════════════════════════════════════════════╗ + ║ OpenStack Cleanup Summary ║ + ╚══════════════════════════════════════════════════════════════════╝ + + Mode: {{ 'DRY-RUN' if cifmw_cleanup_openstack_dry_run else 'EXECUTION' }} + Duration: {{ _cleanup_summary.duration_seconds }}s + + ✓ Custom Resources: {{ _cleanup_summary.crs_deleted_count }} deleted + {% if _cleanup_summary.crs_failed | length > 0 %} + ✗ Failed CRs: {{ _cleanup_summary.crs_failed | length }} + {% endif %} + ✓ API Resources: {{ 'Cleaned' if _cleanup_summary.api_resources_cleaned else 'Skipped' }} + ✓ Storage: {{ 'Cleaned' if _cleanup_summary.storage_cleaned else 'Skipped' }} + ✓ BMH Detached: {{ _cleanup_summary.bmh_detached | length }} + ✓ Namespaces: {{ _cleanup_summary.namespaces_deleted | join(', ') if _cleanup_summary.namespaces_deleted else 'Preserved' }} + + {% if _cleanup_summary.errors | length > 0 %} + ⚠ Errors encountered: {{ _cleanup_summary.errors_count }} + {% for error in _cleanup_summary.errors %} + - {{ error }} + {% endfor %} + {% endif %} + + {% if not cifmw_cleanup_openstack_dry_run %} + ═══════════════════════════════════════════════════════════════════ + Cluster is ready for reuse. Infrastructure operators preserved: + - NMState, MetalLB, OLM, cert-manager + ═══════════════════════════════════════════════════════════════════ + {% endif %}