From 9d2319ffc4ea9ca0b4e7e944100b48de6b72f53a Mon Sep 17 00:00:00 2001 From: ehila Date: Fri, 14 Nov 2025 12:08:17 -0500 Subject: [PATCH] feat: add yamlfmt support added pre-commit check to catch shellcheck and yamlfmt errors added pre-commit install and uninstall tasks to Makefile added yamlfmt script and config to format and verify yaml files Signed-off-by: ehila --- .yamlfmt | 7 ++ Makefile | 19 +++++- deploy/openshift-clusters/clean.yml | 1 + .../collections/requirements.yml | 4 +- deploy/openshift-clusters/init-host.yml | 8 +-- deploy/openshift-clusters/kcli-install.yml | 66 +++++++++---------- deploy/openshift-clusters/kcli-redfish.yml | 14 ++-- deploy/openshift-clusters/redeploy.yml | 16 ++--- deploy/openshift-clusters/redfish.yml | 12 ++-- .../roles/common/tasks/cluster-state.yml | 10 +-- .../common/tasks/update-cluster-inventory.yml | 3 +- .../roles/config/tasks/copy_auth.yaml | 4 +- .../roles/config/tasks/main.yaml | 1 + .../roles/dev-scripts/clean/defaults/main.yml | 1 + .../roles/dev-scripts/clean/tasks/main.yml | 1 + .../dev-scripts/install-dev/defaults/main.yml | 1 + .../dev-scripts/install-dev/handlers/main.yml | 1 + .../dev-scripts/install-dev/tasks/bounce.yml | 1 + .../install-dev/tasks/check_vars.yml | 1 + .../dev-scripts/install-dev/tasks/config.yml | 11 ++-- .../dev-scripts/install-dev/tasks/create.yml | 1 + .../install-dev/tasks/dev-scripts.yml | 1 + .../dev-scripts/install-dev/tasks/main.yml | 5 +- .../install-dev/tasks/teardown.yml | 5 +- .../dev-scripts/install-dev/vars/main.yml | 1 + .../roles/git-user/tasks/main.yml | 1 + .../roles/kcli/kcli-install/tasks/deploy.yml | 4 +- .../kcli-install/tasks/ensure_pool_active.yml | 4 +- .../kcli/kcli-install/tasks/kcli_install.yml | 4 +- .../kcli/kcli-install/tasks/libvirt_setup.yml | 2 +- .../roles/kcli/kcli-install/tasks/main.yml | 2 +- .../roles/kcli/kcli-install/tasks/prepare.yml | 8 +-- .../kcli/kcli-install/tasks/prerequisites.yml | 4 +- .../kcli-install/tasks/storage_pool_setup.yml | 7 +- .../kcli/kcli-install/tasks/validate.yml | 4 +- .../roles/kcli/kcli-install/vars/main.yml | 3 +- .../roles/kcli/kcli-redfish/defaults/main.yml | 15 +++-- .../kcli-redfish/tasks/configure_stonith.yml | 2 +- .../roles/kcli/kcli-redfish/tasks/main.yml | 4 +- .../kcli/kcli-redfish/tasks/prerequisites.yml | 2 +- .../kcli/kcli-redfish/tasks/start_ksushy.yml | 8 +-- .../kcli-redfish/tasks/verify_stonith.yml | 4 +- .../roles/proxy-setup/defaults/main.yml | 6 +- .../roles/proxy-setup/tasks/container.yml | 2 +- .../roles/proxy-setup/tasks/credentials.yml | 2 +- .../roles/proxy-setup/tasks/environment.yml | 8 +-- .../proxy-setup/tasks/infrastructure.yml | 2 +- .../roles/proxy-setup/tasks/main.yml | 2 +- .../roles/redfish/defaults/main.yml | 2 +- .../roles/redfish/tasks/main.yml | 2 +- .../roles/redfish/tasks/process_bmh.yml | 8 +-- deploy/openshift-clusters/setup.yml | 1 + deploy/openshift-clusters/vars/init-host.yml | 1 + hack/pre-commit | 27 ++++++++ hack/yamlfmt.sh | 29 ++++++++ helpers/apply-rpm-patch.yml | 10 +-- helpers/build-and-patch-resource-agents.yml | 2 +- helpers/collect-tnf-logs.yml | 4 +- 58 files changed, 237 insertions(+), 144 deletions(-) create mode 100644 .yamlfmt create mode 100755 hack/pre-commit create mode 100755 hack/yamlfmt.sh diff --git a/.yamlfmt b/.yamlfmt new file mode 100644 index 0000000..b49c5dc --- /dev/null +++ b/.yamlfmt @@ -0,0 +1,7 @@ +formatter: + type: basic + include_document_start: true + trim_trailing_whitespace: true + retain_line_breaks_single: true + scan_folded_as_literal: true +output_format: line diff --git a/Makefile b/Makefile index bf430a5..3656026 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,20 @@ shellcheck: - @./hack/shellcheck.sh \ No newline at end of file + @./hack/shellcheck.sh + +yamlfmt: + @./hack/yamlfmt.sh + +verify: + VALIDATE_ONLY=true $(MAKE) shellcheck + VALIDATE_ONLY=true $(MAKE) yamlfmt + +install-pre-commit: + @echo "Installing pre-commit hook..." + @ln -sf ../../hack/pre-commit .git/hooks/pre-commit + @echo "Pre-commit hook installed successfully!" + +uninstall-pre-commit: + @echo "Uninstalling pre-commit hook..." + @rm -f .git/hooks/pre-commit + @echo "Pre-commit hook uninstalled successfully!" diff --git a/deploy/openshift-clusters/clean.yml b/deploy/openshift-clusters/clean.yml index 3fa1c05..1d6d5f5 100644 --- a/deploy/openshift-clusters/clean.yml +++ b/deploy/openshift-clusters/clean.yml @@ -1,3 +1,4 @@ +--- - hosts: metal_machine roles: - dev-scripts/clean diff --git a/deploy/openshift-clusters/collections/requirements.yml b/deploy/openshift-clusters/collections/requirements.yml index 48d4cb7..291137f 100644 --- a/deploy/openshift-clusters/collections/requirements.yml +++ b/deploy/openshift-clusters/collections/requirements.yml @@ -8,8 +8,8 @@ collections: - name: kubernetes.core version: ">=2.4.0" - name: containers.podman - version: ">=1.10.0" + version: ">=1.10.0" - name: ansible.posix version: ">=2.0" - name: community.general - version: ">=5.0.0" \ No newline at end of file + version: ">=5.0.0" diff --git a/deploy/openshift-clusters/init-host.yml b/deploy/openshift-clusters/init-host.yml index 59d3c98..2cb277b 100644 --- a/deploy/openshift-clusters/init-host.yml +++ b/deploy/openshift-clusters/init-host.yml @@ -10,10 +10,10 @@ # --- Variable Definitions --- vars_files: - vars/init-host.yml - - vars/init-host.yml.local # Optional override file (ignored by git) + - vars/init-host.yml.local # Optional override file (ignored by git) vars: - # Additional variables can be defined here or overridden via command line + # Additional variables can be defined here or overridden via command line # --- Pre-flight Checks --- pre_tasks: @@ -47,7 +47,7 @@ register: rhsm_key_prompt delegate_to: localhost run_once: true - when: + when: - not rhsm_already_registered - interactive_mode | bool - rhsm_activation_key is not defined @@ -233,7 +233,7 @@ post_tasks: - name: Host initialization completed ansible.builtin.debug: - msg: | + msg: |- Host initialization completed successfully: - Hostname set to: {{ hostname_prefix }} - User '{{ init_user }}' configured with sudo access diff --git a/deploy/openshift-clusters/kcli-install.yml b/deploy/openshift-clusters/kcli-install.yml index 59ea21a..83a6aca 100644 --- a/deploy/openshift-clusters/kcli-install.yml +++ b/deploy/openshift-clusters/kcli-install.yml @@ -11,13 +11,13 @@ collections: - community.libvirt - kubernetes.core - + # --- Variable Definitions --- vars: # Default to non-interactive mode to prevent prompts during automated runs # Set interactive_mode: true to enable prompts for manual execution interactive_mode: false - + # Default topology is fencing (can be overridden to 'arbiter' if needed, not supported at the moment) # This default ensures no prompt is triggered when interactive_mode is false topology: fencing @@ -85,34 +85,34 @@ - vars/main.yml tasks: - # Setup proxy access as a separate step after cluster deployment - - name: Setup proxy access - include_role: - name: proxy-setup - vars: - kubeconfig_path: "{{ ansible_user_dir }}/.kcli/clusters/{{ test_cluster_name }}/auth/kubeconfig" - kubeadmin_password_path: "{{ ansible_user_dir }}/.kcli/clusters/{{ test_cluster_name }}/auth/kubeadmin-password" - - - name: Update inventory with cluster VMs - include_tasks: roles/common/tasks/update-cluster-inventory.yml - - # Configure stonith fencing after cluster installation - - name: Configure Redfish BMC simulation for fencing topology - shell: ansible-playbook kcli-redfish.yml -i {{ inventory_file | default('inventory.ini') }} - args: - chdir: "{{ playbook_dir }}" - delegate_to: localhost - run_once: true - when: topology == "fencing" - - - name: "Final verification message" - ansible.builtin.debug: - msg: | - Installation tasks have completed for {{ topology }} topology. - - Next steps: - 1. Source the proxy environment from anywhere: - source {{ playbook_dir }}/proxy.env - (or from openshift-clusters directory: source proxy.env) - 2. Verify cluster access: oc get nodes - 3. Access the cluster console if needed \ No newline at end of file + # Setup proxy access as a separate step after cluster deployment + - name: Setup proxy access + include_role: + name: proxy-setup + vars: + kubeconfig_path: "{{ ansible_user_dir }}/.kcli/clusters/{{ test_cluster_name }}/auth/kubeconfig" + kubeadmin_password_path: "{{ ansible_user_dir }}/.kcli/clusters/{{ test_cluster_name }}/auth/kubeadmin-password" + + - name: Update inventory with cluster VMs + include_tasks: roles/common/tasks/update-cluster-inventory.yml + + # Configure stonith fencing after cluster installation + - name: Configure Redfish BMC simulation for fencing topology + shell: ansible-playbook kcli-redfish.yml -i {{ inventory_file | default('inventory.ini') }} + args: + chdir: "{{ playbook_dir }}" + delegate_to: localhost + run_once: true + when: topology == "fencing" + + - name: "Final verification message" + ansible.builtin.debug: + msg: |- + Installation tasks have completed for {{ topology }} topology. + + Next steps: + 1. Source the proxy environment from anywhere: + source {{ playbook_dir }}/proxy.env + (or from openshift-clusters directory: source proxy.env) + 2. Verify cluster access: oc get nodes + 3. Access the cluster console if needed diff --git a/deploy/openshift-clusters/kcli-redfish.yml b/deploy/openshift-clusters/kcli-redfish.yml index 811bd19..d77e1c4 100644 --- a/deploy/openshift-clusters/kcli-redfish.yml +++ b/deploy/openshift-clusters/kcli-redfish.yml @@ -58,7 +58,7 @@ - name: Proxy environment configuration used debug: msg: | - + Using proxy.env file for cluster access configuration. Proxy settings have been applied to the kcli-redfish role. @@ -69,10 +69,10 @@ - name: No proxy environment detected debug: msg: | - + proxy.env file not found. Assuming direct cluster access. The kcli-redfish role will run without proxy configuration. - + Note: Ensure your current environment has: - Valid KUBECONFIG environment variable or ~/.kube/config - Direct network access to the OpenShift cluster API @@ -87,15 +87,15 @@ rescue: - name: Handle kcli-redfish configuration errors debug: - msg: | + msg: |- An error occurred while running kcli-redfish configuration. Error details: {{ ansible_failed_result.msg | default('Unknown error') }} - + Common issues: 1. Ensure cluster is deployed with kcli and fencing topology 2. Verify ksushy BMC simulator is running on the hypervisor 3. Check that kcli clusters are running: kcli list cluster 4. Verify you're running this on the correct inventory host (hypervisor) - + You can override auto-detection with explicit variables: - ansible-playbook kcli-redfish.yml -e "test_cluster_name=your-cluster" \ No newline at end of file + ansible-playbook kcli-redfish.yml -e "test_cluster_name=your-cluster" diff --git a/deploy/openshift-clusters/redeploy.yml b/deploy/openshift-clusters/redeploy.yml index 61abe51..22551bf 100644 --- a/deploy/openshift-clusters/redeploy.yml +++ b/deploy/openshift-clusters/redeploy.yml @@ -1,3 +1,4 @@ +--- - hosts: metal_machine gather_facts: no force_handlers: yes @@ -36,7 +37,7 @@ - Recovers from incomplete/failed deployment state 3. COMPLETE REBUILD (different topology): - - Two-node with fencing ↔ Two-node with arbiter + - Two-node with fencing ↔ Two-node with arbiter - Uses make realclean + full installation from scratch - Slower but guarantees clean state with correct topology @@ -81,7 +82,7 @@ REASON: Cluster topology change detected NOTE: Ensures completely clean state but takes longest time {% elif clean_needed | bool %} - STRATEGY: Clean deployment (make clean + full install) + STRATEGY: Clean deployment (make clean + full install) REASON: Previous deployment incomplete or status unknown NOTE: Recovers from incomplete state, faster than complete rebuild {% else %} @@ -97,7 +98,7 @@ prompt: |- Please verify the information above is correct. Press Enter to proceed with: {% if vm_cleanup_needed | bool %}COMPLETE REBUILD - {% elif clean_needed | bool %}CLEAN DEPLOYMENT + {% elif clean_needed | bool %}CLEAN DEPLOYMENT {% else %}FAST REDEPLOY {% endif %} delegate_to: localhost @@ -123,14 +124,14 @@ chdir: "{{ dev_scripts_path }}" target: clean register: clean_result - when: + when: - clean_needed | bool - - not (vm_cleanup_needed | bool) # Don't clean if we already did realclean + - not (vm_cleanup_needed | bool) # Don't clean if we already did realclean - name: Display clean completion ansible.builtin.debug: msg: "Clean completed - ready for fresh installation to recover from incomplete state" - when: + when: - clean_needed | bool - not (vm_cleanup_needed | bool) @@ -141,5 +142,4 @@ post_tasks: - name: Update inventory with cluster VMs include_tasks: roles/common/tasks/update-cluster-inventory.yml - - # Cluster state management is now handled by the install-dev role \ No newline at end of file + # Cluster state management is now handled by the install-dev role diff --git a/deploy/openshift-clusters/redfish.yml b/deploy/openshift-clusters/redfish.yml index a349b9c..7212df6 100644 --- a/deploy/openshift-clusters/redfish.yml +++ b/deploy/openshift-clusters/redfish.yml @@ -56,7 +56,7 @@ - name: Proxy environment configuration used debug: msg: | - + Using proxy.env file for cluster access configuration. Proxy settings have been applied to the redfish role. @@ -67,10 +67,10 @@ - name: No proxy environment detected debug: msg: | - + proxy.env file not found. Assuming direct cluster access. The redfish role will run without proxy configuration. - + Note: Ensure your current environment has: - Valid KUBECONFIG environment variable or ~/.kube/config - Direct network access to the OpenShift cluster API @@ -84,9 +84,9 @@ rescue: - name: Handle redfish configuration errors debug: - msg: | + msg: |- An error occurred while running redfish configuration. Error details: {{ ansible_failed_result.msg | default('Unknown error') }} - + You can try running manually: - source proxy.env && ansible-playbook -i localhost, roles/redfish/tasks/main.yml \ No newline at end of file + source proxy.env && ansible-playbook -i localhost, roles/redfish/tasks/main.yml diff --git a/deploy/openshift-clusters/roles/common/tasks/cluster-state.yml b/deploy/openshift-clusters/roles/common/tasks/cluster-state.yml index bf28d06..1ea127e 100644 --- a/deploy/openshift-clusters/roles/common/tasks/cluster-state.yml +++ b/deploy/openshift-clusters/roles/common/tasks/cluster-state.yml @@ -37,7 +37,7 @@ force: yes delegate_to: localhost run_once: true - when: + when: - ansible_check_mode == false - cluster_state_phase is defined and cluster_state_phase == 'deploying' @@ -71,7 +71,7 @@ force: yes delegate_to: localhost run_once: true - when: + when: - ansible_check_mode == false - cluster_state_phase is defined and cluster_state_phase == 'deployed' @@ -106,7 +106,7 @@ force: yes delegate_to: localhost run_once: true - when: + when: - install_dev_mode is defined and install_dev_mode == 'redeploy' - ansible_check_mode == false - cluster_state_phase is defined and cluster_state_phase == 'deployed' @@ -120,6 +120,6 @@ - Status: DEPLOYED (cluster successfully redeployed) - Redeployed at: {{ now(utc=True).isoformat() }} run_once: true - when: + when: - install_dev_mode is defined and install_dev_mode == 'redeploy' - - cluster_state_phase is defined and cluster_state_phase == 'deployed' \ No newline at end of file + - cluster_state_phase is defined and cluster_state_phase == 'deployed' diff --git a/deploy/openshift-clusters/roles/common/tasks/update-cluster-inventory.yml b/deploy/openshift-clusters/roles/common/tasks/update-cluster-inventory.yml index 3d74ffb..c668f4c 100644 --- a/deploy/openshift-clusters/roles/common/tasks/update-cluster-inventory.yml +++ b/deploy/openshift-clusters/roles/common/tasks/update-cluster-inventory.yml @@ -141,7 +141,6 @@ - name: Build cluster_vms section set_fact: cluster_vms_section: | - [cluster_vms] {% for entry in parsed_vm_entries %} {{ entry.name }} ansible_host='{{ entry.ip }}' @@ -172,6 +171,6 @@ - name: No cluster VMs found when: not (has_cluster_vms | bool) debug: - msg: | + msg: |- No cluster VMs found for cluster '{{ test_cluster_name }}'. Inventory will not be updated with cluster VMs. diff --git a/deploy/openshift-clusters/roles/config/tasks/copy_auth.yaml b/deploy/openshift-clusters/roles/config/tasks/copy_auth.yaml index 33d62dc..40ad006 100644 --- a/deploy/openshift-clusters/roles/config/tasks/copy_auth.yaml +++ b/deploy/openshift-clusters/roles/config/tasks/copy_auth.yaml @@ -38,10 +38,10 @@ - name: Display auth files location debug: - msg: | + msg: |- Cluster authentication files copied to ~/auth/: - kubeconfig: ~/auth/kubeconfig {% if kubeadmin_password_path is defined %} - kubeadmin-password: ~/auth/kubeadmin-password {% endif %} - - Default kubeconfig symlink: ~/.kube/config -> ~/auth/kubeconfig \ No newline at end of file + - Default kubeconfig symlink: ~/.kube/config -> ~/auth/kubeconfig diff --git a/deploy/openshift-clusters/roles/config/tasks/main.yaml b/deploy/openshift-clusters/roles/config/tasks/main.yaml index a3c2e02..129ed49 100644 --- a/deploy/openshift-clusters/roles/config/tasks/main.yaml +++ b/deploy/openshift-clusters/roles/config/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Check if instance.env exists stat: path: "{{ playbook_dir }}/../aws-hypervisor/instance.env" diff --git a/deploy/openshift-clusters/roles/dev-scripts/clean/defaults/main.yml b/deploy/openshift-clusters/roles/dev-scripts/clean/defaults/main.yml index abdb5e8..9002a5b 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/clean/defaults/main.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/clean/defaults/main.yml @@ -1,3 +1,4 @@ +--- dev_scripts_path: openshift-metal3/dev-scripts dev_scripts_branch: master dev_scripts_src_repo: https://github.com/openshift-metal3/dev-scripts diff --git a/deploy/openshift-clusters/roles/dev-scripts/clean/tasks/main.yml b/deploy/openshift-clusters/roles/dev-scripts/clean/tasks/main.yml index e9290d8..7794767 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/clean/tasks/main.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/clean/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Check for dev_scripts_path fail: msg: dev_scripts_path must be defined diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/defaults/main.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/defaults/main.yml index d9a9469..be8890e 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/defaults/main.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/defaults/main.yml @@ -1,3 +1,4 @@ +--- dev_scripts_path: openshift-metal3/dev-scripts dev_scripts_branch: master dev_scripts_src_repo: https://github.com/openshift-metal3/dev-scripts diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/handlers/main.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/handlers/main.yml index 52ce5cc..c2a73bb 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/handlers/main.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/handlers/main.yml @@ -1,2 +1,3 @@ +--- - name: Set OCP project command: oc --kubeconfig="{{kubeconfig_path}}" project openshift-machine-api diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/bounce.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/bounce.yml index ce54985..acaaa83 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/bounce.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/bounce.yml @@ -1,3 +1,4 @@ +--- - import_tasks: teardown.yml - import_tasks: config.yml - import_tasks: create.yml diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/check_vars.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/check_vars.yml index 04e5cd5..2d74f8b 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/check_vars.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/check_vars.yml @@ -1,3 +1,4 @@ +--- - name: Check for dev_scripts_path fail: msg: dev_scripts_path must be defined diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/config.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/config.yml index 9c5d69a..f281efe 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/config.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/config.yml @@ -1,3 +1,4 @@ +--- - name: Copy pull secrets copy: dest: "{{dev_scripts_path}}/pull_secret.json" @@ -17,9 +18,9 @@ - name: Install dependencies ansible.builtin.dnf: - name: - - podman - - runc - - containernetworking-plugins + name: + - podman + - runc + - containernetworking-plugins state: present - become: true \ No newline at end of file + become: true diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/create.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/create.yml index d96e912..fe4aa69 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/create.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/create.yml @@ -1,3 +1,4 @@ +--- - import_tasks: check_vars.yml - name: Trigger set project assert: diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/dev-scripts.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/dev-scripts.yml index 1758dd0..7837fed 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/dev-scripts.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/dev-scripts.yml @@ -1,3 +1,4 @@ +--- - import_tasks: check_vars.yml - name: ssh configuration copy: diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/main.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/main.yml index 2e43d25..12dd4a7 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/main.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/main.yml @@ -1,3 +1,4 @@ +--- - import_tasks: dev-scripts.yml # Full installation mode (topology change - after realclean) @@ -16,12 +17,12 @@ cluster_state_phase: 'deploying' installation_method: 'ipi' default_playbook_name: 'setup.yml' - when: + when: - install_dev_mode is defined and install_dev_mode == 'install' - not kubeconfig_after_realclean.stat.exists - import_tasks: create.yml - when: + when: - install_dev_mode is defined and install_dev_mode == 'install' - not kubeconfig_after_realclean.stat.exists diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/teardown.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/teardown.yml index b220eb7..ea4bc29 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/teardown.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/tasks/teardown.yml @@ -1,8 +1,9 @@ +--- - name: Check for dev_scripts_path fail: msg: dev_scripts_path must be defined when: dev_scripts_path is undefined - name: Stop OpenShift make: - chdir: "{{dev_scripts_path}}" - target: clean + chdir: "{{dev_scripts_path}}" + target: clean diff --git a/deploy/openshift-clusters/roles/dev-scripts/install-dev/vars/main.yml b/deploy/openshift-clusters/roles/dev-scripts/install-dev/vars/main.yml index 4379017..35a6bad 100644 --- a/deploy/openshift-clusters/roles/dev-scripts/install-dev/vars/main.yml +++ b/deploy/openshift-clusters/roles/dev-scripts/install-dev/vars/main.yml @@ -1,3 +1,4 @@ +--- kubeconfig_path: "{{dev_scripts_path}}/ocp/{{test_cluster_name}}/auth/kubeconfig" kubeadmin_password_path: "{{dev_scripts_path}}/ocp/{{test_cluster_name}}/auth/kubeadmin-password" method: ipi diff --git a/deploy/openshift-clusters/roles/git-user/tasks/main.yml b/deploy/openshift-clusters/roles/git-user/tasks/main.yml index aeea0e0..8c271a1 100644 --- a/deploy/openshift-clusters/roles/git-user/tasks/main.yml +++ b/deploy/openshift-clusters/roles/git-user/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Install git dnf: name: git diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/deploy.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/deploy.yml index 391a87f..73f8102 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/deploy.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/deploy.yml @@ -16,7 +16,7 @@ - name: Deploy OpenShift cluster with kcli command: "{{ kcli_cmd }}" register: kcli_deploy_result - async: 7200 # 2 hours timeout + async: 7200 # 2 hours timeout poll: 30 environment: KUBECONFIG: "{{ kubeconfig_path }}" @@ -24,4 +24,4 @@ - name: Check deployment status fail: msg: "kcli deployment failed: {{ kcli_deploy_result.stderr }}" - when: kcli_deploy_result.rc != 0 \ No newline at end of file + when: kcli_deploy_result.rc != 0 diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/ensure_pool_active.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/ensure_pool_active.yml index 6c1479c..c663556 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/ensure_pool_active.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/ensure_pool_active.yml @@ -11,10 +11,10 @@ - name: Start pool if inactive command: virsh pool-start {{ pool_name }} become: true - when: + when: - pool_status.rc == 0 - "'inactive' in pool_status.stdout or 'State:' not in pool_status.stdout or 'running' not in pool_status.stdout" register: pool_start_result failed_when: - pool_start_result.rc != 0 - - "'is already active' not in pool_start_result.stderr" \ No newline at end of file + - "'is already active' not in pool_start_result.stderr" diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/kcli_install.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/kcli_install.yml index 47de0ec..f44d172 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/kcli_install.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/kcli_install.yml @@ -72,7 +72,7 @@ - name: Verify kcli can see default pool fail: msg: "kcli cannot see the default storage pool. Available pools: {{ kcli_pools.stdout_lines }}" - when: + when: - kcli_pools.rc == 0 - "'default' not in kcli_pools.stdout" @@ -84,4 +84,4 @@ - name: Display available networks debug: - msg: "Available kcli networks: {{ kcli_networks.stdout_lines }}" \ No newline at end of file + msg: "Available kcli networks: {{ kcli_networks.stdout_lines }}" diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/libvirt_setup.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/libvirt_setup.yml index dcba779..5c165ed 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/libvirt_setup.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/libvirt_setup.yml @@ -66,4 +66,4 @@ - name: Fail if libvirt is not accessible fail: msg: "Libvirt is not accessible. Error: {{ libvirt_test.stderr }}" - when: libvirt_test.rc != 0 \ No newline at end of file + when: libvirt_test.rc != 0 diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/main.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/main.yml index 161c10d..00eb8e3 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/main.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/main.yml @@ -24,4 +24,4 @@ name: config tasks_from: copy_auth -# Deployment completed \ No newline at end of file +# Deployment completed diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/prepare.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/prepare.yml index 6e7cef3..6b4cbe9 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/prepare.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/prepare.yml @@ -71,8 +71,8 @@ path: "{{ item.path }}" mode: "{{ item.mode }}" loop: - - { path: "{{ ansible_user_dir }}/.ssh/id_ed25519", mode: "0600" } - - { path: "{{ ansible_user_dir }}/.ssh/id_ed25519.pub", mode: "0644" } + - {path: "{{ ansible_user_dir }}/.ssh/id_ed25519", mode: "0600"} + - {path: "{{ ansible_user_dir }}/.ssh/id_ed25519.pub", mode: "0644"} when: not ssh_key_check.stat.exists - name: Display authentication file locations @@ -94,7 +94,7 @@ - name: Set ksushy IP for BMC simulation for fencing topology set_fact: ksushy_ip: "192.168.122.1" - when: + when: - topology == "fencing" - ksushy_ip is not defined @@ -125,4 +125,4 @@ - name: Show kcli deployment parameters debug: - msg: "{{ params_content.content | b64decode | from_yaml }}" \ No newline at end of file + msg: "{{ params_content.content | b64decode | from_yaml }}" diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/prerequisites.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/prerequisites.yml index 9757dfd..1ce23f3 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/prerequisites.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/prerequisites.yml @@ -1,7 +1,7 @@ --- # Prerequisites check and installation for kcli-install -# Setup libvirt virtualization infrastructure +# Setup libvirt virtualization infrastructure - name: Setup libvirt infrastructure include_tasks: libvirt_setup.yml @@ -14,4 +14,4 @@ include_role: name: config vars: - ansible_skip_tags: git-setup \ No newline at end of file + ansible_skip_tags: git-setup diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/storage_pool_setup.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/storage_pool_setup.yml index e4aecdc..ba8afe2 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/storage_pool_setup.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/storage_pool_setup.yml @@ -47,12 +47,11 @@ - name: Define storage pool command: > - virsh pool-define-as {{ pool_name }} dir - --target {{ pool_path }} + virsh pool-define-as {{ pool_name }} dir --target {{ pool_path }} become: true when: "pool_name not in existing_pools.stdout_lines" register: pool_define_result - failed_when: + failed_when: - pool_define_result.rc != 0 - "'already exists' not in pool_define_result.stderr" @@ -83,4 +82,4 @@ - name: Fail if storage pool is not active fail: msg: "Storage pool {{ pool_name }} is not active. Pool info: {{ pool_info.stdout }}" - when: pool_info.rc != 0 or 'running' not in pool_info.stdout \ No newline at end of file + when: pool_info.rc != 0 or 'running' not in pool_info.stdout diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/validate.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/validate.yml index 1166602..4fdc8cf 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/validate.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/tasks/validate.yml @@ -64,6 +64,6 @@ - name: Fail if cluster exists and force_cleanup is false fail: msg: "Cluster {{ test_cluster_name }} already exists. Set force_cleanup=true to remove existing cluster first." - when: + when: - cluster_vms_exist - - not force_cleanup \ No newline at end of file + - not force_cleanup diff --git a/deploy/openshift-clusters/roles/kcli/kcli-install/vars/main.yml b/deploy/openshift-clusters/roles/kcli/kcli-install/vars/main.yml index 993f7c0..3932a1e 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-install/vars/main.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-install/vars/main.yml @@ -1,3 +1,4 @@ +--- # Computed variables for kcli-install role # These are derived from user-configurable defaults and should not be overridden @@ -13,4 +14,4 @@ kubeadmin_password_path: "{{ ansible_user_dir }}/.kcli/clusters/{{ test_cluster_ # Topology-based computed values feature_set: "{{ 'TechPreviewNoUpgrade' if topology == 'arbiter' else 'DevPreviewNoUpgrade' }}" -enable_arbiter: "{{ 'true' if topology == 'arbiter' else 'false' }}" \ No newline at end of file +enable_arbiter: "{{ 'true' if topology == 'arbiter' else 'false' }}" diff --git a/deploy/openshift-clusters/roles/kcli/kcli-redfish/defaults/main.yml b/deploy/openshift-clusters/roles/kcli/kcli-redfish/defaults/main.yml index 8f92469..feb5a8a 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-redfish/defaults/main.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-redfish/defaults/main.yml @@ -1,3 +1,4 @@ +--- # kcli-redfish role default variables # SSL configuration for ksushy (simulated BMC) @@ -7,13 +8,13 @@ ssl_insecure_param: "ssl_insecure=1" # These will be set to reasonable defaults if not provided # Cluster configuration (uses kcli-install defaults) -test_cluster_name: "" # Will be set to kcli-install default (tnt-cluster) -ksushy_ip: "" # Will be set to 192.168.122.1 (standard libvirt gateway) -ksushy_port: 9000 # Standard ksushy systemd service port +test_cluster_name: "" # Will be set to kcli-install default (tnt-cluster) +ksushy_ip: "" # Will be set to 192.168.122.1 (standard libvirt gateway) +ksushy_port: 9000 # Standard ksushy systemd service port # BMC credentials (uses kcli-install defaults) -bmc_user: "" # Will be set to kcli-install default (admin) -bmc_password: "" # Will be set to kcli-install default (admin123) +bmc_user: "" # Will be set to kcli-install default (admin) +bmc_password: "" # Will be set to kcli-install default (admin123) -# Cluster node naming pattern for kcli deployments -node_name_pattern: "{{ test_cluster_name }}-ctlplane" \ No newline at end of file +# Cluster node naming pattern for kcli deployments +node_name_pattern: "{{ test_cluster_name }}-ctlplane" diff --git a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/configure_stonith.yml b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/configure_stonith.yml index 6470041..0ee928c 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/configure_stonith.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/configure_stonith.yml @@ -73,4 +73,4 @@ when: - pcs_stonith_status_result.rc != 0 - pcs_stonith_create_result is defined - - pcs_stonith_create_result.rc != 0 \ No newline at end of file + - pcs_stonith_create_result.rc != 0 diff --git a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/main.yml b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/main.yml index 639e2e1..f882e85 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/main.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/main.yml @@ -22,7 +22,7 @@ Hypervisor IP: {{ ksushy_ip }} BMC User: {{ bmc_user }} BMC Port: {{ ksushy_port }} - + Override with: ansible-playbook kcli-redfish.yml -e "test_cluster_name=my-cluster" -e "ksushy_ip=X.X.X.X" - name: Start ksushy BMC simulator @@ -41,4 +41,4 @@ - Hypervisor IP: {{ ksushy_ip }} - BMC User: {{ bmc_user }} - BMC Password: [{{ 'SET' if bmc_password != '' else 'MISSING' }}] - delegate_to: localhost \ No newline at end of file + delegate_to: localhost diff --git a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/prerequisites.yml b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/prerequisites.yml index 131a7c1..f46deaf 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/prerequisites.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/prerequisites.yml @@ -21,4 +21,4 @@ name: - python-kubernetes state: present - become: true \ No newline at end of file + become: true diff --git a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/start_ksushy.yml b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/start_ksushy.yml index 396836b..878b537 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/start_ksushy.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/start_ksushy.yml @@ -35,11 +35,11 @@ {% else %} ksushy BMC simulator started successfully on {{ ksushy_ip }}:{{ ksushy_port }} {% endif %} - + BMC endpoints for {{ test_cluster_name }}: - https://{{ ksushy_ip }}:{{ ksushy_port }}/redfish/v1/Systems/{{ test_cluster_name }}-ctlplane-0 - https://{{ ksushy_ip }}:{{ ksushy_port }}/redfish/v1/Systems/{{ test_cluster_name }}-ctlplane-1 - + - name: Test ksushy BMC endpoint accessibility shell: "curl -s -k 'https://{{ ksushy_ip }}:{{ ksushy_port }}/redfish/v1/Systems/local' --connect-timeout 5 | head -1" register: ksushy_test @@ -49,12 +49,12 @@ fail: msg: | WARNING: ksushy BMC simulator is not accessible at https://{{ ksushy_ip }}:{{ ksushy_port }} - + This will prevent STONITH fencing from working properly. Please check: 1. ksushy service status: systemctl --user status ksushy.service 2. Firewall configuration: firewall-cmd --list-ports --zone=libvirt 3. Network connectivity from VMs to hypervisor IP - + Test result: {{ ksushy_test.stdout | default('No response') }} Return code: {{ ksushy_test.rc }} when: ksushy_test.rc != 0 diff --git a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/verify_stonith.yml b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/verify_stonith.yml index 3543dd1..55f341c 100644 --- a/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/verify_stonith.yml +++ b/deploy/openshift-clusters/roles/kcli/kcli-redfish/tasks/verify_stonith.yml @@ -47,6 +47,6 @@ - name: Display final stonith configuration debug: - msg: | + msg: |- Final stonith configuration: - {{ pcs_stonith_list_result.stdout | default('No stonith resources found') }} \ No newline at end of file + {{ pcs_stonith_list_result.stdout | default('No stonith resources found') }} diff --git a/deploy/openshift-clusters/roles/proxy-setup/defaults/main.yml b/deploy/openshift-clusters/roles/proxy-setup/defaults/main.yml index cc2e7a4..5802c3b 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/defaults/main.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/defaults/main.yml @@ -12,10 +12,10 @@ proxy_image: quay.io/openshifttest/squid-proxy:multiarch proxy_no_proxy_list: "static.redhat.com,redhat.io,quay.io,openshift.org,openshift.com,svc,amazonaws.com,r2.cloudflarestorage.com,github.com,githubusercontent.com,google.com,googleapis.com,fedoraproject.org,cloudfront.net,localhost,127.0.0.1" # Cluster access configuration -cluster_domains: +cluster_domains: - ".metalkube.org" - - ".ocpci.eng.rdu2.redhat.com" + - ".ocpci.eng.rdu2.redhat.com" - ".okd.on.massopen.cloud" - ".p1.openshiftapps.com" - "sso.redhat.com" - - ".lab.example.com" \ No newline at end of file + - ".lab.example.com" diff --git a/deploy/openshift-clusters/roles/proxy-setup/tasks/container.yml b/deploy/openshift-clusters/roles/proxy-setup/tasks/container.yml index ffbf4a8..4f8c28e 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/tasks/container.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/tasks/container.yml @@ -29,4 +29,4 @@ restart_policy: always network: host volumes: - - "{{ proxy_home }}/squid.conf:/etc/squid/squid.conf:Z" \ No newline at end of file + - "{{ proxy_home }}/squid.conf:/etc/squid/squid.conf:Z" diff --git a/deploy/openshift-clusters/roles/proxy-setup/tasks/credentials.yml b/deploy/openshift-clusters/roles/proxy-setup/tasks/credentials.yml index 63c6c31..b622191 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/tasks/credentials.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/tasks/credentials.yml @@ -11,4 +11,4 @@ fetch: src: "{{ kubeadmin_password_path }}" dest: ./kubeadmin-password - flat: true \ No newline at end of file + flat: true diff --git a/deploy/openshift-clusters/roles/proxy-setup/tasks/environment.yml b/deploy/openshift-clusters/roles/proxy-setup/tasks/environment.yml index 0240d57..693b203 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/tasks/environment.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/tasks/environment.yml @@ -16,17 +16,17 @@ export http_proxy=http://${EC2_PUBLIC_IP}:${PROXYPORT}/ export https_proxy=http://${EC2_PUBLIC_IP}:${PROXYPORT}/ export no_proxy="{{ proxy_no_proxy_list }}" - + # Set KUBECONFIG to the absolute path of kubeconfig file next to this proxy.env export KUBECONFIG="${PROXY_ENV_DIR}/kubeconfig" - + # K8S_AUTH_PROXY for ansible kubernetes.core collection export K8S_AUTH_PROXY=http://${EC2_PUBLIC_IP}:${PROXYPORT}/ # Display helpful information when sourced echo "Proxy environment loaded from: ${PROXY_ENV_DIR}" echo "KUBECONFIG set to: ${KUBECONFIG}" - + # Verify kubeconfig exists if [[ ! -f "${KUBECONFIG}" ]]; then echo "Warning: KUBECONFIG file not found at ${KUBECONFIG}" @@ -35,4 +35,4 @@ echo "✓ KUBECONFIG file found" fi dest: "./proxy.env" - delegate_to: localhost \ No newline at end of file + delegate_to: localhost diff --git a/deploy/openshift-clusters/roles/proxy-setup/tasks/infrastructure.yml b/deploy/openshift-clusters/roles/proxy-setup/tasks/infrastructure.yml index cf08960..ed25297 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/tasks/infrastructure.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/tasks/infrastructure.yml @@ -26,4 +26,4 @@ permanent: true state: enabled immediate: true - become: true \ No newline at end of file + become: true diff --git a/deploy/openshift-clusters/roles/proxy-setup/tasks/main.yml b/deploy/openshift-clusters/roles/proxy-setup/tasks/main.yml index 498845f..e9fc29a 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/tasks/main.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/tasks/main.yml @@ -11,4 +11,4 @@ include_tasks: infrastructure.yml - name: Setup and start proxy container - include_tasks: container.yml \ No newline at end of file + include_tasks: container.yml diff --git a/deploy/openshift-clusters/roles/redfish/defaults/main.yml b/deploy/openshift-clusters/roles/redfish/defaults/main.yml index 1d7f095..cde146f 100644 --- a/deploy/openshift-clusters/roles/redfish/defaults/main.yml +++ b/deploy/openshift-clusters/roles/redfish/defaults/main.yml @@ -6,4 +6,4 @@ bmh_namespace: "openshift-machine-api" # SSL certificate verification setting -ssl_insecure_param: "" \ No newline at end of file +ssl_insecure_param: "" diff --git a/deploy/openshift-clusters/roles/redfish/tasks/main.yml b/deploy/openshift-clusters/roles/redfish/tasks/main.yml index 89719c1..c64bafa 100644 --- a/deploy/openshift-clusters/roles/redfish/tasks/main.yml +++ b/deploy/openshift-clusters/roles/redfish/tasks/main.yml @@ -24,4 +24,4 @@ loop: "{{ bmh_names }}" loop_control: loop_var: current_bmh_name # This will be used by process_bmh.yml - when: bmh_names is defined and bmh_names | length > 0 \ No newline at end of file + when: bmh_names is defined and bmh_names | length > 0 diff --git a/deploy/openshift-clusters/roles/redfish/tasks/process_bmh.yml b/deploy/openshift-clusters/roles/redfish/tasks/process_bmh.yml index 1fe2a77..ff7456e 100644 --- a/deploy/openshift-clusters/roles/redfish/tasks/process_bmh.yml +++ b/deploy/openshift-clusters/roles/redfish/tasks/process_bmh.yml @@ -17,8 +17,8 @@ - name: Extract node name from BMH status set_fact: node_name: "{{ bmh_detail.resources[0].status.hardware.hostname }}" - when: - - bmh_detail.resources is defined + when: + - bmh_detail.resources is defined - bmh_detail.resources | length > 0 - bmh_detail.resources[0].status.hardware.hostname is defined @@ -79,7 +79,6 @@ var: pcs_stonith_status_result when: pcs_stonith_status_result is defined - - name: Create PCS stonith resource on node {{ node_name }} if it doesn't exist # Using OCP debug command to access the node without requiring SSH ansible.builtin.shell: | @@ -111,7 +110,6 @@ var: pcs_stonith_create_result when: pcs_stonith_create_result is defined - - name: Enable PCS stonith on node {{ node_name }} # Using OCP debug command to access the node without requiring SSH ansible.builtin.shell: | @@ -134,5 +132,3 @@ var: pcs_stonith_create_result when: pcs_stonith_create_result is defined - - diff --git a/deploy/openshift-clusters/setup.yml b/deploy/openshift-clusters/setup.yml index 5e2d7b1..8042b75 100644 --- a/deploy/openshift-clusters/setup.yml +++ b/deploy/openshift-clusters/setup.yml @@ -1,3 +1,4 @@ +--- - hosts: metal_machine gather_facts: no force_handlers: yes diff --git a/deploy/openshift-clusters/vars/init-host.yml b/deploy/openshift-clusters/vars/init-host.yml index d68bda1..8f88345 100644 --- a/deploy/openshift-clusters/vars/init-host.yml +++ b/deploy/openshift-clusters/vars/init-host.yml @@ -1,3 +1,4 @@ +--- # Default variables for init-host playbook # Copy this file to vars/init-host.yml.local to override defaults without committing secrets diff --git a/hack/pre-commit b/hack/pre-commit new file mode 100755 index 0000000..2ee8e56 --- /dev/null +++ b/hack/pre-commit @@ -0,0 +1,27 @@ +#!/bin/bash + +set -e + +echo "Running pre-commit verification..." + +if ! make verify; then + echo "" + echo "❌ Pre-commit verification failed!" + echo "" + echo "To fix formatting issues, run:" + echo " make yamlfmt" + echo "" + echo "To fix shell script issues, run:" + echo " make shellcheck" + echo "" + echo "After fixing issues, stage your changes and commit again." + echo "" + echo "To bypass this hook (not recommended), use:" + echo " git commit --no-verify" + echo "" + exit 1 +fi + +echo "✅ Pre-commit verification passed!" +exit 0 + diff --git a/hack/yamlfmt.sh b/hack/yamlfmt.sh new file mode 100755 index 0000000..d645379 --- /dev/null +++ b/hack/yamlfmt.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -e + +CONTAINER_ENGINE=${CONTAINER_ENGINE:-podman} +CONTAINER_IMAGE="ghcr.io/google/yamlfmt:latest" +FMT_CONFIG=".yamlfmt" +VALIDATE_ONLY=${VALIDATE_ONLY:-false} +VALIDATE_ONLY_FLAG_ARGS="--lint" +EXTRA_FLAG_ARGS="" + +if [ "$VALIDATE_ONLY" != "false" ]; then + EXTRA_FLAG_ARGS="$VALIDATE_ONLY_FLAG_ARGS" +fi + + +if [ "$OPENSHIFT_CI" != "" ]; then + TOP_DIR="${1:-.}" + yamlfmt -conf "$FMT_CONFIG" "$EXTRA_FLAG_ARGS" "$TOP_DIR" +else + $CONTAINER_ENGINE run --rm \ + --env OPENSHIFT_CI=TRUE \ + --env VALIDATE_ONLY="$VALIDATE_ONLY" \ + --volume "${PWD}:/workdir:z" \ + --entrypoint sh \ + --workdir /workdir \ + $CONTAINER_IMAGE \ + hack/yamlfmt.sh "${@}" +fi; diff --git a/helpers/apply-rpm-patch.yml b/helpers/apply-rpm-patch.yml index 423dfe2..baff5ae 100644 --- a/helpers/apply-rpm-patch.yml +++ b/helpers/apply-rpm-patch.yml @@ -27,8 +27,8 @@ - name: Install RPM package using rpm-ostree command: rpm-ostree -C override replace /var/home/core/{{ rpm_package }} register: rpm_ostree_result - async: 600 # 10 minutes timeout - poll: 30 # check every 30 seconds + async: 600 # 10 minutes timeout + poll: 30 # check every 30 seconds become: yes - name: Display rpm-ostree command output @@ -44,11 +44,11 @@ reboot: msg: "Rebooting node to apply rpm-ostree changes" connect_timeout: 5 - reboot_timeout: 600 # Extended timeout for ostree boot + reboot_timeout: 600 # Extended timeout for ostree boot pre_reboot_delay: 10 - post_reboot_delay: 60 # Wait longer after reboot + post_reboot_delay: 60 # Wait longer after reboot test_command: podman exec etcd etcdctl member list - throttle: 1 # Reboot one node at a time + throttle: 1 # Reboot one node at a time become: yes - name: Gather package facts after reboot diff --git a/helpers/build-and-patch-resource-agents.yml b/helpers/build-and-patch-resource-agents.yml index 65f6155..63b3da8 100644 --- a/helpers/build-and-patch-resource-agents.yml +++ b/helpers/build-and-patch-resource-agents.yml @@ -153,7 +153,7 @@ tasks: - name: Show completion message debug: - msg: | + msg: |- Resource-agents build and patch complete! RPM location: {{ playbook_dir }}/{{ hostvars[groups['metal_machine'][0]]['rpm_filename'] }} Cluster nodes have been patched and rebooted. diff --git a/helpers/collect-tnf-logs.yml b/helpers/collect-tnf-logs.yml index 5d221f3..7a777ce 100644 --- a/helpers/collect-tnf-logs.yml +++ b/helpers/collect-tnf-logs.yml @@ -48,7 +48,7 @@ - name: Collect etcd podman logs shell: podman logs etcd register: etcd_logs - async: 30 # 30 sec timeout + async: 30 # 30 sec timeout poll: 10 failed_when: false become: yes @@ -77,7 +77,7 @@ - name: Display collection summary debug: - msg: | + msg: |- Log collection complete for {{ inventory_hostname }}: - Pacemaker logs: {{ 'collected' if pacemaker_logs.rc == 0 else 'failed' }} - Etcd logs: {{ 'collected' if etcd_logs.rc == 0 else 'failed/timeout' }}