diff --git a/.github/dependabot.yml b/.github/dependabot.yml index ce740cc..888fc67 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,3 +6,5 @@ updates: directory: "/" schedule: interval: "weekly" + cooldown: + default-days: 7 diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml index 43908d7..0a5bd14 100644 --- a/.github/workflows/ansible-lint.yml +++ b/.github/workflows/ansible-lint.yml @@ -10,11 +10,11 @@ jobs: steps: - name: Checkout Code - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: persist-credentials: false - name: Lint Ansible Playbook - uses: ansible/ansible-lint@43e758bad47344f1ce7b699c0020299f486a8026 + uses: ansible/ansible-lint@7f6abc5ef97d0fb043a0f3d416dfbc74399fbda0 with: setup_python: "true" diff --git a/.github/workflows/ansible-sanitytest.yml b/.github/workflows/ansible-sanitytest.yml index 0675adf..d2be96f 100644 --- a/.github/workflows/ansible-sanitytest.yml +++ b/.github/workflows/ansible-sanitytest.yml @@ -16,13 +16,13 @@ jobs: steps: - name: Checkout Code - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: path: ansible_collections/rhvp/cluster_utils persist-credentials: false - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v6 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/ansible-unittest.yml b/.github/workflows/ansible-unittest.yml index c195aee..fa9f1fd 100644 --- a/.github/workflows/ansible-unittest.yml +++ b/.github/workflows/ansible-unittest.yml @@ -16,13 +16,13 @@ jobs: steps: - name: Checkout Code - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: path: ansible_collections/rhvp/cluster_utils persist-credentials: false - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v6 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/jsonschema.yaml b/.github/workflows/jsonschema.yaml index 53310d4..b9c1424 100644 --- a/.github/workflows/jsonschema.yaml +++ b/.github/workflows/jsonschema.yaml @@ -15,12 +15,12 @@ jobs: steps: - name: Checkout Code - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: persist-credentials: false - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v6 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/superlinter.yml b/.github/workflows/superlinter.yml index d772de5..9492da0 100644 --- a/.github/workflows/superlinter.yml +++ b/.github/workflows/superlinter.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Checkout Code - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: # Full git history is needed to get a proper list of changed files within `super-linter` fetch-depth: 0 @@ -22,7 +22,7 @@ jobs: # Run Linter against code base # ################################ - name: Lint Code Base - uses: super-linter/super-linter/slim@2bdd90ed3262e023ac84bf8fe35dc480721fc1f2 + uses: super-linter/super-linter/slim@61abc07d755095a68f4987d1c2c3d1d64408f1f9 # v8.5.0 env: VALIDATE_ALL_CODEBASE: true DEFAULT_BRANCH: main @@ -36,6 +36,7 @@ jobs: VALIDATE_JSON_PRETTIER: false VALIDATE_MARKDOWN_PRETTIER: false VALIDATE_KUBERNETES_KUBECONFORM: false + VALIDATE_PYTHON_BLACK: false VALIDATE_PYTHON_PYLINT: false VALIDATE_PYTHON_PYINK: false VALIDATE_PYTHON_RUFF_FORMAT: false diff --git a/.github/workflows/trigger-utility-imperative-container-builds.yml b/.github/workflows/trigger-utility-imperative-container-builds.yml index b26be07..ba4ae2b 100644 --- a/.github/workflows/trigger-utility-imperative-container-builds.yml +++ b/.github/workflows/trigger-utility-imperative-container-builds.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Generate GitHub App token id: generate-token - uses: actions/create-github-app-token@v2 + uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2 with: app-id: ${{ secrets.GH_WORKFLOW_AUTOMATION_CLIENT_ID }} private-key: ${{ secrets.GH_WORKFLOW_AUTOMATION_PRIVATE_KEY }} diff --git a/playbooks/argo_healthcheck.yml b/playbooks/argo_healthcheck.yml index fa812fc..2259876 100644 --- a/playbooks/argo_healthcheck.yml +++ b/playbooks/argo_healthcheck.yml @@ -3,6 +3,6 @@ hosts: localhost connection: local gather_facts: false - roles: - - role: oc_check - - role: argo_healthcheck + tasks: + - name: Check health of argo applications + ansible.builtin.include_tasks: tasks/check_argo_health.yml diff --git a/playbooks/display_secrets_info.yml b/playbooks/display_secrets_info.yml index ca900eb..8ce8619 100644 --- a/playbooks/display_secrets_info.yml +++ b/playbooks/display_secrets_info.yml @@ -27,7 +27,7 @@ # This will allow us to determine schema version and which backend to use - name: Determine how to load secrets ansible.builtin.set_fact: - secrets_yaml: '{{ values_secrets_data | from_yaml }}' + secrets_yaml: "{{ values_secrets_data if values_secrets_data is not string else values_secrets_data | from_yaml }}" - name: Parse secrets data no_log: '{{ hide_sensitive_output }}' diff --git a/playbooks/install.yml b/playbooks/install.yml index d793a28..2ff805a 100644 --- a/playbooks/install.yml +++ b/playbooks/install.yml @@ -2,7 +2,7 @@ - name: Install the pattern via pattern-install chart ansible.builtin.import_playbook: operator_deploy.yml -- name: Load secrets (if not explicity disabled in values-global.yaml) +- name: Load secrets (if not explicitly disabled in values-global.yaml) ansible.builtin.import_playbook: load_secrets.yml - name: Wait for pattern to finish installation (all Argo apps should be healthy/synced) @@ -11,18 +11,14 @@ gather_facts: false vars: - max_retries: 30 + max_retries: 60 retry_count: 0 retry_delay: 60 tasks: - name: Print start message - ansible.builtin.shell: | - printf "==> Waiting for all argo applications to be healthy/synced.\n" > /dev/tty - - - name: Ensure oc is installed - ansible.builtin.include_role: - name: oc_check + ansible.builtin.debug: + msg: "Waiting for all argo applications to be healthy/synced." - name: Wait for all Argo applications to be healthy and synced with retry logic ansible.builtin.include_tasks: tasks/retry_argo_healthcheck.yml diff --git a/playbooks/load_secrets.yml b/playbooks/load_secrets.yml index 49b2670..a8d7a6a 100644 --- a/playbooks/load_secrets.yml +++ b/playbooks/load_secrets.yml @@ -7,22 +7,17 @@ - role: pattern_settings tasks: - - name: Check values-global to see if secret loading is explicity disabled + - name: Check values-global to see if secret loading is explicitly disabled ansible.builtin.set_fact: secret_loader_disabled: "{{ values_global.global.secretLoader.disabled | default(false) | bool }}" - name: Load secrets (when enabled) + ansible.builtin.include_role: + name: load_secrets when: not secret_loader_disabled - block: - - name: Announce secrets loading - ansible.builtin.shell: | - printf "==> Loading secrets (this may take several minutes)...\n" > /dev/tty - - - name: Process secrets via role - ansible.builtin.include_role: - name: load_secrets - name: Print secret loading disabled message - ansible.builtin.shell: | - printf "==> Secrets loading is currently disabled. To enable, update the value of .global.secretLoader.disabled in your values-global.yaml to false.\n" > /dev/tty + ansible.builtin.debug: + msg: | + Secrets loading is currently disabled. To enable, update the value of ''.global.secretLoader.disabled' in 'values-global.yaml' to 'false'. when: secret_loader_disabled diff --git a/playbooks/operator_deploy.yml b/playbooks/operator_deploy.yml index adc7718..9758b4d 100644 --- a/playbooks/operator_deploy.yml +++ b/playbooks/operator_deploy.yml @@ -7,7 +7,7 @@ roles: - role: pattern_settings # set general pattern vars - role: install_settings # set pattern-install specific vars - - role: validate_prereq # ensure installation depencies are present + - role: validate_prereq # ensure installation dependencies are present - role: validate_cluster # ensure a cluster is connected and has a default storage class - role: pattern_install_template # render the pattern-install helm chart @@ -18,10 +18,9 @@ ansible.builtin.set_fact: disable_validate_origin: >- {{ - ( - disable_validate_origin - | default(lookup('env', 'DISABLE_VALIDATE_ORIGIN'), true) - | default('false', false) + disable_validate_origin | default( + lookup('env', 'DISABLE_VALIDATE_ORIGIN') or 'false', + true ) | bool }} @@ -30,29 +29,25 @@ name: validate_origin when: not disable_validate_origin - - name: Apply rendered pattern-install chart manifests (with retry) + - name: Apply rendered pattern-install chart manifests block: - - name: Preview manifest that will be applied - ansible.builtin.shell: | - printf "==> Applying the following manifest to the cluster:\n\n" > /dev/tty - printf "%s\n" "{{ pattern_install_rendered_yaml }}" > /dev/tty - - - name: Apply via oc with retry - ansible.builtin.command: oc apply -f - - args: - stdin: "{{ pattern_install_rendered_yaml }}" - stdin_add_newline: false + - name: Apply manifests via native k8s module + kubernetes.core.k8s: + definition: "{{ pattern_install_rendered_yaml }}" + state: present register: _apply retries: 10 delay: 15 - until: _apply.rc == 0 + until: not _apply.failed - name: Print success message - ansible.builtin.shell: printf "==> Installation succeeded!\n" > /dev/tty + ansible.builtin.debug: + msg: | + Installation of {{ pattern_name }} succeeded! rescue: - - name: Print failure summary and abort - ansible.builtin.shell: | - printf "==> Installation failed. Error:\n" > /dev/tty - printf "%s\n" "{{ _apply.stderr | default(_apply.stdout) | default('') }}" > /dev/tty - exit 1 + - name: Print failure summary + ansible.builtin.fail: + msg: | + Failed to install pattern after 10 retries. + Error: {{ _apply.error | default(_apply.msg) | default('Unknown error') }} diff --git a/playbooks/process_secrets.yml b/playbooks/process_secrets.yml index 6379111..6329dda 100644 --- a/playbooks/process_secrets.yml +++ b/playbooks/process_secrets.yml @@ -21,7 +21,7 @@ # This will allow us to determine schema version and which backend to use - name: Determine how to load secrets ansible.builtin.set_fact: - secrets_yaml: '{{ values_secrets_data | from_yaml }}' + secrets_yaml: "{{ values_secrets_data if values_secrets_data is not string else values_secrets_data | from_yaml }}" - name: Parse secrets data no_log: '{{ hide_sensitive_output | default(true) }}' diff --git a/playbooks/show.yml b/playbooks/show.yml index 8315b5d..f24efff 100644 --- a/playbooks/show.yml +++ b/playbooks/show.yml @@ -3,12 +3,16 @@ hosts: localhost connection: local gather_facts: false + vars: + include_crds: false + roles: - role: pattern_settings # set general pattern vars - role: install_settings # set pattern-install specific vars - role: pattern_install_template # render the pattern-install helm chart tasks: - - name: Print rendered pattern-install chart manifests - ansible.builtin.shell: | - printf "\n%s\n" "{{ pattern_install_rendered_yaml }}" > /dev/tty + - name: Print rendered pattern-install chart + ansible.builtin.debug: + msg: | + {{ pattern_install_rendered_yaml }} diff --git a/playbooks/tasks/check_argo_health.yml b/playbooks/tasks/check_argo_health.yml index f4ada55..6e40b95 100644 --- a/playbooks/tasks/check_argo_health.yml +++ b/playbooks/tasks/check_argo_health.yml @@ -1,62 +1,37 @@ --- -- name: Get all Argo CD applications as JSON - ansible.builtin.command: oc get applications.argoproj.io -A -o json - register: apps_raw - changed_when: false +- name: Get all Argo CD applications + kubernetes.core.k8s_info: + api_version: argoproj.io/v1alpha1 + kind: Application + register: argo_apps -- name: Extract and analyze applications +- name: Process Application Statuses ansible.builtin.set_fact: - apps_items: >- - {{ - (apps_raw.stdout | default('{}')) - | from_json - | json_query('items') - | default([]) - }} - -- name: Reset applications summary - ansible.builtin.set_fact: - apps_summary: [] - -- name: Build applications summary - ansible.builtin.set_fact: - apps_summary: >- - {{ apps_summary + [ - { - 'namespace': (item.metadata.namespace | default('')), - 'name': (item.metadata.name | default('')), - 'sync': (item.status.sync.status | default('')), - 'health': (item.status.health.status | default('')), - 'bad': ((item.status.sync.status | default('')) != 'Synced') - or ((item.status.health.status | default('')) != 'Healthy') - } - ] - }} - loop: "{{ apps_items }}" - loop_control: - label: "{{ item.metadata.namespace }}:{{ item.metadata.name }}" - -- name: Filter unhealthy or unsynced applications - ansible.builtin.set_fact: - unhealthy_apps: "{{ apps_summary | default([]) | selectattr('bad') | list }}" - -- name: Print unhealthy/unsynced applications to /dev/tty - when: unhealthy_apps | length > 0 - ansible.builtin.shell: - cmd: | - printf "==> Unhealthy or unsynced applications:\n" > /dev/tty - {% for app in unhealthy_apps %} - printf " {{ app.namespace }}/{{ app.name }} -> Sync: {{ app.sync }} - Health: {{ app.health }}\n" > /dev/tty + apps_summary: "{{ (summary_yaml | from_yaml) | default([], true) }}" + vars: + summary_yaml: | + {% for item in argo_apps.resources -%} + - namespace: {{ item.metadata.namespace }} + name: {{ item.metadata.name }} + sync: {{ item.status.sync.status | default('Unknown') }} + health: {{ item.status.health.status | default('Unknown') }} + bad: {{ (item.status.sync.status | default('Unknown') != 'Synced' or item.status.health.status | default('Unknown') != 'Healthy') | lower }} {% endfor %} - printf "==> Retrying in 60 seconds...\n" > /dev/tty -- name: Fail if any applications are not healthy/synced - when: unhealthy_apps | length > 0 - ansible.builtin.fail: - msg: "{{ unhealthy_apps | length }} application(s) are not healthy/synced" - -- name: Print success message - when: unhealthy_apps | length == 0 - ansible.builtin.shell: - cmd: | - printf "==> All {{ apps_summary | length }} Argo applications are healthy and synced.\n" > /dev/tty +- name: Validate Cluster Health + vars: + bad_apps: "{{ apps_summary | selectattr('bad') | list }}" + ansible.builtin.assert: + that: + - apps_summary | length > 0 + - bad_apps | length == 0 + fail_msg: | + {% if apps_summary | length == 0 %} + No ArgoCD applications found in the cluster. + {% else %} + The following ArgoCD applications are out-of-sync or unhealthy: + {% for app in bad_apps %} + - {{ app.namespace }}/{{ app.name }} (Sync: {{ app.sync }}, Health: {{ app.health }}) + {% endfor %} + {% endif %} + quiet: true diff --git a/plugins/callback/readable.py b/plugins/callback/readable.py new file mode 100644 index 0000000..5884339 --- /dev/null +++ b/plugins/callback/readable.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2023, Al Bowles <@akatch> +# Copyright (c) 2012-2014, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: readable +type: stdout +author: Al Bowles (@akatch), tweaked by Michele Baldessari & Drew Minnear +short_description: condensed Ansible output specific to Validated Patterns +description: + - Consolidated Ansible output in the style of LINUX/UNIX startup logs. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout in configuration +""" + +from ansible import constants as C +from ansible.module_utils.common.text.converters import to_text +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default + +NO_STDOUT_TASKS = ( + "debug", + "ansible.builtin.debug", + "assert", + "ansible.builtin.assert", + "command", + "ansible.builtin.command", + "shell", + "ansible.builtin.shell", + "pause", + "ansible.builtin.pause", + "kubernetes.core.k8s_info", + "kubernetes.core.k8s_exec", + "ansible.builtin.pip", +) + + +class CallbackModule(CallbackModule_default): + """ + Design goals: + - Print consolidated output that looks like a *NIX startup log + - Defaults should avoid displaying unnecessary information wherever possible + """ + + CALLBACK_VERSION = 1.0 + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "rhvp.cluster_utils.readable" + + def __init__(self): + super().__init__() + self._loop_item_count = 0 + self._loop_had_change = False + + def _finalize_loop_if_needed(self): + """If we were processing loop items, finalize with ok/done.""" + if self._loop_item_count > 0: + if self._loop_had_change: + self._display.display("done", C.COLOR_CHANGED) + else: + self._display.display("ok", C.COLOR_OK) + self._loop_item_count = 0 + self._loop_had_change = False + + def _run_is_verbose(self, result): + return ( + self._display.verbosity > 0 or "_ansible_verbose_always" in result._result + ) and "_ansible_verbose_override" not in result._result + + def _get_task_display_name(self, task): + """Return task display name, or None for include tasks.""" + name = task.get_name().strip().split(" : ")[-1] + return None if name.startswith("include") else name + + def _preprocess_result(self, result): + self.delegated_vars = result._result.get("_ansible_delegated_vars", None) + self._handle_exception( + result._result, use_stderr=self.get_option("display_failed_stderr") + ) + self._handle_warnings(result._result) + + def _process_result_output(self, result, msg): + # task_host = f"{result._host.get_name()} " + task_host = "" + task_result = f"{task_host}{msg}" + + if self._run_is_verbose(result): + task_result = ( + f"{task_host}{msg}: {self._dump_results(result._result, indent=4)}" + ) + return task_result + + if self.delegated_vars: + task_delegate_host = self.delegated_vars["ansible_host"] + task_result = f"{task_host}-> {task_delegate_host} {msg}" + + if ( + result._result.get("msg") + and result._result.get("msg") != "All items completed" + ): + task_result += f" | msg: {to_text(result._result.get('msg'))}" + + if result._result.get("stdout"): + task_result += f" | stdout: {result._result.get('stdout')}" + + if result._result.get("stderr"): + task_result += f" | stderr: {result._result.get('stderr')}" + + return task_result + + def _display_task_start(self, task, suffix=""): + """Display task start message with optional suffix (e.g., 'via handler').""" + name = self._get_task_display_name(task) + if name is None: + return + check_mode = ( + " (check mode)" + if task.check_mode and self.get_option("check_mode_markers") + else "" + ) + suffix_str = f" ({suffix})" if suffix else "" + self._display.display(f"{name}{suffix_str}{check_mode}...", newline=False) + + def v2_playbook_on_play_start(self, play): + self._finalize_loop_if_needed() + name = play.get_name().strip() + check_mode = play.check_mode and self.get_option("check_mode_markers") + + if name and play.hosts: + check_str = " (in check mode)" if check_mode else "" + msg = f"\n- {name}{check_str} on hosts: {','.join(play.hosts)} -" + else: + msg = "- check mode -" if check_mode else "---" + + self._display.display(msg) + + def v2_runner_on_skipped(self, result, ignore_errors=False): + # Suppress all skipped task output + return + + def _build_msg_with_item(self, base_msg, result): + """Build message with optional item label.""" + item_value = self._get_item_label(result._result) + return f"{base_msg} | item: {item_value}" if item_value else base_msg + + def _is_fail_task(self, result): + """Check if this is a fail task that should use simple message output.""" + return result._task.action in ("fail", "ansible.builtin.fail") + + def _is_quiet_assert_task(self, result): + """Check if this is an assert task with quiet: true.""" + return ( + result._task.action in ("assert", "ansible.builtin.assert") + and result._task.args.get("quiet") is True + ) + + def _handle_exception(self, result, use_stderr=None): + """Override exception handling to suppress for fail tasks and quiet assert tasks.""" + # Skip exception handling for fail tasks - we just want to show the msg + if hasattr(self, "_current_task") and self._current_task: + if self._current_task.action in ("fail", "ansible.builtin.fail"): + return + # Skip exception handling for quiet assert tasks + if ( + self._current_task.action in ("assert", "ansible.builtin.assert") + and self._current_task.args.get("quiet") is True + ): + return + + super()._handle_exception(result, use_stderr) + + def v2_playbook_on_task_start(self, task, is_conditional): + # Store current task for exception handling + self._current_task = task + self._finalize_loop_if_needed() + self._display_task_start(task) + + def v2_playbook_on_handler_task_start(self, task): + # Store current task for exception handling + self._current_task = task + self._finalize_loop_if_needed() + self._display_task_start(task, suffix="via handler") + + def v2_runner_on_failed(self, result, ignore_errors=False): + if ignore_errors: + self._display.display(" error (ignored)", C.COLOR_WARN) + return + + # For quiet assert tasks, just display the fail_msg in normal color + if self._is_quiet_assert_task(result): + if result._result.get("warnings"): + for warning in result._result["warnings"]: + self._display.warning(warning) + msg = result._result.get("msg", "Assertion failed") + self._display.display(f" {msg}") + return + + # For fail tasks, just display the message cleanly + if self._is_fail_task(result): + if result._result.get("warnings"): + for warning in result._result["warnings"]: + self._display.warning(warning) + msg = result._result.get("msg", "Task failed") + self._display.display(f" {msg}") + return + + # Full detailed output for other failures + self._preprocess_result(result) + msg = self._build_msg_with_item("failed", result) + task_result = self._process_result_output(result, msg) + self._display.display( + f" {task_result}", + C.COLOR_ERROR, + stderr=self.get_option("display_failed_stderr"), + ) + + def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK): + self._preprocess_result(result) + + # Skip aggregated loop results - items were already handled + if result._result.get("results"): + return + + # Handle fail tasks that succeeded due to failed_when=false + if self._is_fail_task(result): + # For fail tasks that didn't actually fail, just show ok + self._display.display(" ok", C.COLOR_OK) + return + + # Handle debug tasks specially + if result._task.action in NO_STDOUT_TASKS: + debug_msg = result._result.get("msg", "") + if debug_msg: + self._display.display(f"\n{debug_msg}", C.COLOR_VERBOSE) + return + + if result._result.get("changed"): + msg = self._build_msg_with_item("done", result) + display_color = C.COLOR_CHANGED + elif not self.get_option("display_ok_hosts"): + return + + task_result = self._process_result_output(result, msg) + self._display.display(f" {task_result}", display_color) + + def v2_runner_item_on_skipped(self, result): + self.v2_runner_on_skipped(result) + + def v2_runner_item_on_failed(self, result): + # Reset loop state - failure message will be printed instead + self._loop_item_count = 0 + self._loop_had_change = False + self.v2_runner_on_failed(result, ignore_errors=result._task.ignore_errors) + + def v2_runner_item_on_ok(self, result): + self._preprocess_result(result) + + # Handle debug tasks specially + if result._task.action in NO_STDOUT_TASKS: + debug_msg = result._result.get("msg", "") + if debug_msg: + self._display.display(f"\n{debug_msg}", C.COLOR_VERBOSE) + return + + if result._result.get("changed"): + self._loop_had_change = True + + self._loop_item_count += 1 + self._display.display(".", newline=False) + + def v2_runner_on_unreachable(self, result): + self._preprocess_result(result) + task_result = self._process_result_output(result, "unreachable") + self._display.display( + f" {task_result}", + C.COLOR_UNREACHABLE, + stderr=self.get_option("display_failed_stderr"), + ) + + def v2_on_file_diff(self, result): + return + + def v2_playbook_on_include(self, included_file): + """Suppress 'included:' messages.""" + return + + def v2_playbook_on_stats(self, stats): + self._finalize_loop_if_needed() + return + + def v2_playbook_on_no_hosts_matched(self): + self._display.display(" No hosts found!", color=C.COLOR_DEBUG) + + def v2_playbook_on_no_hosts_remaining(self): + self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR) + + def v2_playbook_on_start(self, playbook): + return + + def v2_runner_retry(self, result): + msg = f" Retrying... ({result._result['attempts']} of {result._result['retries']})" + if self._run_is_verbose(result): + msg += f"Result was: {self._dump_results(result._result)}" + self._display.display(msg, color=C.COLOR_DEBUG) diff --git a/plugins/module_utils/load_secrets_common.py b/plugins/module_utils/load_secrets_common.py index f8ebe5d..9186736 100644 --- a/plugins/module_utils/load_secrets_common.py +++ b/plugins/module_utils/load_secrets_common.py @@ -312,13 +312,13 @@ def _validate_generate_mode(self, f): path = self._get_field_path(f) vault_policy = f.get("vaultPolicy", None) - if value is not None: + if value is not None and value != "": return ( False, "Secret has onMissingValue set to 'generate' but has a value set", ) - if path is not None: + if path is not None and path != "": return ( False, "Secret has onMissingValue set to 'generate' but has a path set", diff --git a/plugins/module_utils/load_secrets_v1.py b/plugins/module_utils/load_secrets_v1.py index ff0b0c9..9701c06 100644 --- a/plugins/module_utils/load_secrets_v1.py +++ b/plugins/module_utils/load_secrets_v1.py @@ -16,6 +16,7 @@ """ Module that implements V1 of the values-secret.yaml spec """ + from __future__ import absolute_import, division, print_function __metaclass__ = type @@ -38,7 +39,6 @@ class LoadSecretsV1: - def __init__( self, module, @@ -125,7 +125,7 @@ def sanitize_values(self): self.check_for_missing_secrets() secrets = self.syaml.get("secrets", {}) - # We need to explicitely check for None because the file might contain the + # We need to explicitly check for None because the file might contain the # top-level 'secrets:' or 'files:' key but have nothing else under it which will # return None and not {} if secrets is None: diff --git a/plugins/modules/vault_load_parsed_secrets.py b/plugins/modules/vault_load_parsed_secrets.py index 0c36b4c..cd06f8c 100644 --- a/plugins/modules/vault_load_parsed_secrets.py +++ b/plugins/modules/vault_load_parsed_secrets.py @@ -22,6 +22,7 @@ version: 2.0 """ + from __future__ import absolute_import, division, print_function __metaclass__ = type @@ -41,7 +42,7 @@ - Martin Jackson (@mhjacks) description: - Takes parsed secrets objects and vault policies (as delivered by parse_secrets_info) and runs the commands to - load them into a vault instance. The relevent metadata will exist in the parsed secrets object. Returns count + load them into a vault instance. The relevant metadata will exist in the parsed secrets object. Returns count of secrets injected. options: parsed_secrets: @@ -98,7 +99,6 @@ class VaultSecretLoader: - def __init__( self, module, diff --git a/requirements.yml b/requirements.yml index 29f0724..fb6b6f2 100644 --- a/requirements.yml +++ b/requirements.yml @@ -1,4 +1,5 @@ --- collections: + - name: community.general - name: kubernetes.core - name: community.okd diff --git a/roles/argo_healthcheck/tasks/main.yml b/roles/argo_healthcheck/tasks/main.yml deleted file mode 100644 index fe9abb3..0000000 --- a/roles/argo_healthcheck/tasks/main.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -- name: Print start message - ansible.builtin.shell: - cmd: | - printf "==> Checking argo applications\n" > /dev/tty - -- name: Get all Argo CD applications as JSON - ansible.builtin.command: oc get applications.argoproj.io -A -o json - register: apps_raw - failed_when: false - -- name: Extract items list - ansible.builtin.set_fact: - apps_items: >- - {{ - (apps_raw.stdout | default('{}')) - | from_json - | json_query('items') - | default([]) - }} - -- name: Build applications summary - ansible.builtin.set_fact: - apps_summary: >- - {{ (apps_summary | default([])) + [ - { - 'namespace': (item.metadata.namespace | default('')), - 'name': (item.metadata.name | default('')), - 'sync': (item.status.sync.status | default('')), - 'health': (item.status.health.status | default('')), - 'bad': ((item.status.sync.status | default('')) != 'Synced') - or ((item.status.health.status | default('')) != 'Healthy') - } - ] - }} - loop: "{{ apps_items }}" - -- name: Print status lines - ansible.builtin.shell: - cmd: | - printf " {{ item.namespace }} {{ item.name }} -> Sync: {{ item.sync }} - Health: {{ item.health }}\n" > /dev/tty - loop: "{{ apps_summary | default([]) }}" - loop_control: - label: "{{ item.namespace }}:{{ item.name }}" - -- name: Determine if any app is not healthy/synced - ansible.builtin.set_fact: - any_bad: "{{ (apps_summary | default([])) | selectattr('bad') | list | length > 0 }}" - -- name: Fail if any app is not healthy/synced - when: any_bad | bool - ansible.builtin.shell: - cmd: | - printf "Some applications are not synced or are unhealthy\n" > /dev/tty - exit 1 diff --git a/roles/find_vp_secrets/tasks/main.yml b/roles/find_vp_secrets/tasks/main.yml index 2c031f1..9c99590 100644 --- a/roles/find_vp_secrets/tasks/main.yml +++ b/roles/find_vp_secrets/tasks/main.yml @@ -15,12 +15,13 @@ register: custom_file_values_secret when: custom_env_values_secret | default('') | length > 0 -- name: Set values-secret yaml file to {{ custom_file_values_secret.stat.path }} +- name: Set values-secret yaml file to {{ custom_file_values_secret.stat.path | default('unset') }} ansible.builtin.set_fact: found_file: "{{ custom_file_values_secret.stat.path }}" when: - - custom_env_values_secret | default('') | length > 0 + - custom_file_values_secret.stat is defined - custom_file_values_secret.stat.exists + - custom_env_values_secret | default('') | length > 0 # FIXME(bandini): Eventually around end of 2023(?) we should drop # ~/values-secret-{{ pattern_name }}.yaml and ~/values-secret.yaml diff --git a/roles/helm_check/tasks/main.yaml b/roles/helm_check/tasks/main.yaml index 35e8542..8342398 100644 --- a/roles/helm_check/tasks/main.yaml +++ b/roles/helm_check/tasks/main.yaml @@ -1,22 +1,18 @@ --- -- name: Announce helm check - ansible.builtin.shell: | - printf "==> Checking Helm availability... " > /dev/tty - -- name: Probe helm version +- name: Check Helm availability ansible.builtin.command: helm version --short register: _helm_probe failed_when: false -- name: Report Helm version to TTY (if installed) - ansible.builtin.shell: | - printf "OK (%s)\n" "{{ _helm_probe.stdout }}" > /dev/tty - when: _helm_probe.rc == 0 - - name: Print Helm missing/error message and exit (if not installed) - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " Helm is not installed or not on PATH.\n" > /dev/tty - printf " Hint: Install Helm 3 and ensure 'helm' is resolvable.\n" > /dev/tty - exit 1 - when: _helm_probe.rc != 0 + ansible.builtin.assert: + that: _helm_probe is not failed + fail_msg: | + Helm is not installed or not on PATH. + Try running your command again with the './pattern.sh' script, inside the + utility-container, or locally after installing Helm 3 and verifying it is available on PATH. + +- name: Report Helm version + ansible.builtin.debug: + msg: | + {{ _helm_probe.stdout | trim }} diff --git a/roles/iib_ci/README.md b/roles/iib_ci/README.md index c2017ce..ac216cb 100644 --- a/roles/iib_ci/README.md +++ b/roles/iib_ci/README.md @@ -50,7 +50,7 @@ make EXTRA_HELM_OPTS="--set main.gitops.operatorSource=iib-${IIB} --set main.git ### ACM operator The advanced-cluster-management operator is a little bit more complex than the others because it -also installes another operator called MCE multicluster-engine. So to install ACM you typically +also installs another operator called MCE multicluster-engine. So to install ACM you typically need two IIBs (one for acm and one for mce). With those two at hand, do the following (the ordering must be consistent: the first IIB corresponds to the first OPERATOR, etc). The following operation needs to be done on both hub *and* spokes: @@ -91,7 +91,7 @@ make EXTRA_HELM_OPTS="--set main.extraParameters[0].name=clusterGroup.subscripti ## Useful commands -* List IIBs for an operator: +- List IIBs for an operator: ```sh ansible-playbook common/ansible/playbooks/iib-ci/lookup.yml @@ -104,7 +104,7 @@ ok: [localhost] => (item=v4.13) => { Override the `operator` value with the desired bundle name to figure out the last IIBs for it. -* List all images uploaded to the internal registry: +- List all images uploaded to the internal registry: ```sh oc exec -it -n openshift-image-registry $(oc get pods -n openshift-image-registry -o json | jq -r '.items[].metadata.name | select(. | test("^image-registry-"))' | head -n1) -- bash -c "curl -k -u kubeadmin:$(oc whoami -t) https://localhost:5000/v2/_catalog" diff --git a/roles/iib_ci/tasks/mirror-related-images.yml b/roles/iib_ci/tasks/mirror-related-images.yml index 74a0bc3..0e2996c 100644 --- a/roles/iib_ci/tasks/mirror-related-images.yml +++ b/roles/iib_ci/tasks/mirror-related-images.yml @@ -131,7 +131,7 @@ mode: "0644" # NOTE(bandini): mirror.map *must* have a tag (we use the IIB number) on the image on the right side -# otherwise, the image will be uplaoded and will exist in S3 but it won't exist in the registry's catalog!! +# otherwise, the image will be uploaded and will exist in S3 but it won't exist in the registry's catalog!! - name: Mirror all the needed images ansible.builtin.shell: | set -o pipefail diff --git a/roles/install_settings/tasks/main.yml b/roles/install_settings/tasks/main.yml index 5184989..b5e0e8b 100644 --- a/roles/install_settings/tasks/main.yml +++ b/roles/install_settings/tasks/main.yml @@ -2,39 +2,35 @@ - name: Resolve defaults with ansible var/env var overrides ansible.builtin.import_tasks: resolve_overrides.yml -- name: Validate target_origin is usable - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " No Git remote found for branch '%s' in '%s'.\n" "{{ target_branch }}" "{{ pattern_dir }}" > /dev/tty - printf " Set TARGET_ORIGIN/TARGET_BRANCH or configure a remote for the branch.\n" > /dev/tty - exit 1 - when: (target_origin | default("") | trim) == "" +- name: Resolve target_remote_url + block: + - name: Derive remote URL via git + ansible.builtin.command: "git remote get-url {{ target_origin }}" # noqa: command-instead-of-module + args: + chdir: "{{ pattern_dir }}" + register: _repo_raw + failed_when: false -- name: Get remote URL for {{ target_origin }} # noqa: command-instead-of-module - ansible.builtin.command: "git remote get-url {{ target_origin }}" - args: - chdir: "{{ pattern_dir }}" - register: _repo_raw - failed_when: false + - name: Set target_remote_url fact + ansible.builtin.set_fact: + target_remote_url: "{{ _repo_raw.stdout | trim }}" + when: _repo_raw is not failed and _repo_raw.stdout | trim != "" -- name: Fail if remote URL cannot be determined - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " Could not resolve URL for remote '%s' in '%s'.\n" "{{ target_origin }}" "{{ pattern_dir }}" > /dev/tty - exit 1 - when: (_repo_raw.rc != 0) or ((_repo_raw.stdout | default("") | trim) == "") + - name: Ensure target_remote_url is set + ansible.builtin.assert: + that: + - target_remote_url is defined + - target_remote_url != "" + fail_msg: | + Could not resolve URL for remote '{{ target_origin }}' in '{{ pattern_dir }}'. + Ensure the remote is configured correctly in git. -- name: Compute _target_repo (convert SSH→HTTPS if no token_secret) - ansible.builtin.set_fact: - _target_repo: >- - {{ - (_repo_raw.stdout | trim) - if (token_secret | default("") | trim != "") - else ( - (_repo_raw.stdout | trim) - | regex_replace('^git@([^:]+):(.+)$', 'https://\1/\2') - ) - }} + - name: Convert SSH git remote URL to HTTPS equivalent when token_secret is not given + ansible.builtin.set_fact: + target_remote_url: "{{ target_remote_url | regex_replace('^git@([^:]+):(.+)$', 'https://\\1/\\2') }}" + when: + - (token_secret | default("") | trim) == "" + - target_remote_url is search('^git@') - name: Build secret_opts ansible.builtin.set_fact: @@ -52,14 +48,34 @@ else "--set main.clusterGroupName=%s" | format(target_clustergroup) }} +- name: Build include_crds_opt + ansible.builtin.set_fact: + include_crds_opt: >- + {{ + "--include-crds" if (include_crds | default(true) | bool) + else "" + }} -- name: Assemble _install_helm_opts (string) +- name: Assemble _install_helm_opts ansible.builtin.set_fact: - _install_helm_opts: >- + install_helm_opts: >- -f values-global.yaml - --set main.git.repoURL="{{ _target_repo }}" + --set main.git.repoURL="{{ target_remote_url }}" --set main.git.revision={{ target_branch }} {{ secret_opts }} {{ clustergroup_opt }} {{ uuid_helm_opts }} {{ extra_helm_opts }} + {{ include_crds_opt }} + +- name: Assemble helm template command + ansible.builtin.set_fact: + helm_template_command: >- + helm template {{ pattern_install_chart }} + --name-template {{ pattern_name }} + {{ install_helm_opts }} + +- name: Print helm template command + ansible.builtin.debug: + msg: | + {{ helm_template_command }} diff --git a/roles/install_settings/tasks/resolve_overrides.yml b/roles/install_settings/tasks/resolve_overrides.yml index 8fb3271..10c788e 100644 --- a/roles/install_settings/tasks/resolve_overrides.yml +++ b/roles/install_settings/tasks/resolve_overrides.yml @@ -5,70 +5,74 @@ pattern_install_chart | default(lookup("env", "PATTERN_INSTALL_CHART"), true) | default("oci://quay.io/validatedpatterns/pattern-install", true) + | trim }} - name: Resolve target_branch block: - - name: Seed from CLI/env + - name: Check CLI/env for target_branch override ansible.builtin.set_fact: target_branch: >- {{ target_branch | default(lookup("env", "TARGET_BRANCH"), true) | default("", false) + | trim }} - - name: Derive via git (when unset) - ansible.builtin.command: "git rev-parse --abbrev-ref HEAD" + - name: Derive target_branch via git (when not overridden) + ansible.builtin.command: "git branch --show-current" # noqa: command-instead-of-module args: chdir: "{{ pattern_dir }}" register: _br - when: target_branch | trim == "" + when: target_branch == "" failed_when: false - - name: Fail if unable to determine target_branch - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " Could not determine target branch in '%s'.\n" "{{ pattern_dir }}" > /dev/tty - exit 1 - when: (target_branch | trim == "") and (_br.rc != 0 or (_br.stdout | default('') | trim == "")) - - - name: Set from git + - name: Set target_branch using git ansible.builtin.set_fact: target_branch: "{{ _br.stdout | trim }}" - when: target_branch | trim == "" + when: target_branch == "" and _br.rc == 0 + + - name: Ensure target_branch is set + ansible.builtin.assert: + that: target_branch != "" + fail_msg: | + Could not determine target branch in '{{ pattern_dir }}'. + Ensure that you are on a git branch or pass explicitly via + the 'target_branch' variable or 'TARGET_BRANCH' environment variable. - name: Resolve target_origin block: - - name: Seed from CLI/env + - name: Check CLI/env for target_origin override ansible.builtin.set_fact: target_origin: >- {{ target_origin | default(lookup("env", "TARGET_ORIGIN"), true) | default("", false) + | trim }} - - name: Derive via git (when unset) # noqa: command-instead-of-module - ansible.builtin.command: "git config branch.{{ target_branch }}.remote" + - name: Derive target_origin via git (when not overridden) + ansible.builtin.command: "git config branch.{{ target_branch }}.remote" # noqa: command-instead-of-module args: chdir: "{{ pattern_dir }}" register: _origin - when: target_origin | trim == "" + when: target_origin == "" failed_when: false - - name: Fail if unable to determine target_origin - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " Could not determine target_origin for branch '%s' in '%s'.\n" "{{ target_branch }}" "{{ pattern_dir }}" > /dev/tty - printf " Ensure the branch has a remote configured or pass TARGET_ORIGIN explicitly.\n" > /dev/tty - exit 1 - when: (target_origin | trim == "") and (_origin.rc != 0 or (_origin.stdout | default('') | trim == "")) - - - name: Set from git + - name: Set target_origin using git ansible.builtin.set_fact: target_origin: "{{ _origin.stdout | trim }}" - when: target_origin | trim == "" + when: target_origin == "" and _origin is not failed + + - name: Ensure target_origin is set + ansible.builtin.assert: + that: target_origin != "" + fail_msg: | + Could not determine target origin for branch '{{ target_branch }}' in '{{ pattern_dir }}'. + Ensure branch '{{ target_branch }}' has a remote configured or pass explicitly via + the 'target_origin' variable or 'TARGET_ORIGIN' environment variable. - name: Resolve target_clustergroup ansible.builtin.set_fact: @@ -77,6 +81,7 @@ target_clustergroup | default(lookup("env", "TARGET_CLUSTERGROUP"), true) | default(main_clustergroup, true) + | trim }} - name: Resolve token_secret @@ -86,6 +91,7 @@ token_secret | default(lookup("env", "TOKEN_SECRET"), true) | default("", false) + | trim }} - name: Resolve token_namespace @@ -95,6 +101,7 @@ token_namespace | default(lookup("env", "TOKEN_NAMESPACE"), true) | default("", false) + | trim }} - name: Resolve extra_helm_opts @@ -104,6 +111,7 @@ extra_helm_opts | default(lookup("env", "EXTRA_HELM_OPTS"), true) | default("", false) + | trim }} - name: Resolve uuid_helm_opts (from UUID_FILE) @@ -115,6 +123,7 @@ uuid_file | default(lookup('env','UUID_FILE'), true) | default((lookup('env','HOME') | default('')) ~ '/.config/validated-patterns/pattern-uuid', true) + | trim }} - name: Stat UUID file diff --git a/roles/load_secrets/tasks/main.yml b/roles/load_secrets/tasks/main.yml index 33e31bc..b7a0f2f 100644 --- a/roles/load_secrets/tasks/main.yml +++ b/roles/load_secrets/tasks/main.yml @@ -12,16 +12,16 @@ - name: Fail if values_secrets_data is missing ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " values_secrets_data was not found.\n" > /dev/tty - printf " The find_vp_secrets role should set it.\n" > /dev/tty - printf " Ensure your values/secret files are present and readable.\n" > /dev/tty + printf "ERROR\n" + printf " values_secrets_data was not found.\n" + printf " The find_vp_secrets role should set it.\n" + printf " Ensure your values/secret files are present and readable.\n" exit 1 when: values_secrets_data is not defined - name: Determine how to load secrets ansible.builtin.set_fact: - secrets_yaml: "{{ values_secrets_data | from_yaml }}" + secrets_yaml: "{{ values_secrets_data if values_secrets_data is not string else values_secrets_data | from_yaml }}" - name: Parse secrets data no_log: "{{ hide_sensitive_output | default(true) }}" diff --git a/roles/oc_check/tasks/main.yml b/roles/oc_check/tasks/main.yml index 9cd275e..7bae8c5 100644 --- a/roles/oc_check/tasks/main.yml +++ b/roles/oc_check/tasks/main.yml @@ -1,27 +1,22 @@ --- -- name: Announce oc check - ansible.builtin.shell: | - printf "==> Checking oc availability... " > /dev/tty - -- name: Probe oc version (YAML output) +- name: Check oc availability ansible.builtin.command: oc version --client=true -o yaml register: _oc_probe failed_when: false +- name: Print oc missing/error message and exit (if not installed) + ansible.builtin.assert: + that: _oc_probe is not failed + fail_msg: | + oc is not installed or not on PATH. + Try running your command again with the './pattern.sh' script, inside the + utility-container, or locally after installing oc and verifying it is available on PATH. + - name: Parse oc version output ansible.builtin.set_fact: _oc_version: "{{ _oc_probe.stdout | from_yaml }}" - when: _oc_probe.rc == 0 -- name: Report oc version to TTY (if installed) - ansible.builtin.shell: | - printf "OK (%s)\n" "{{ _oc_version.releaseClientVersion | default('unknown') }}" > /dev/tty - when: _oc_probe.rc == 0 - -- name: Print oc missing/error message and exit (if not installed) - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " oc is not installed or not on PATH.\n" > /dev/tty - printf " Hint: Install oc and ensure 'oc' is resolvable.\n" > /dev/tty - exit 1 - when: _oc_probe.rc != 0 +- name: Report oc version + ansible.builtin.debug: + msg: | + {{ _oc_version.releaseClientVersion | default('unknown') | trim }} diff --git a/roles/pattern_install_template/tasks/main.yml b/roles/pattern_install_template/tasks/main.yml index 09c3012..6990e2a 100644 --- a/roles/pattern_install_template/tasks/main.yml +++ b/roles/pattern_install_template/tasks/main.yml @@ -3,34 +3,13 @@ ansible.builtin.include_role: name: helm_check -- name: Print helm template command to /dev/tty - ansible.builtin.shell: | - printf "==> Running: helm template %s --name-template %s %s\n" \ - "{{ pattern_install_chart }}" "{{ pattern_name }}" "{{ _install_helm_opts }}" > /dev/tty - -- name: Run helm template - ansible.builtin.command: > - helm template --include-crds {{ pattern_install_chart }} - --name-template {{ pattern_name }} - {{ _install_helm_opts }} +- name: Run helm template command + ansible.builtin.command: | + {{ helm_template_command }} args: chdir: "{{ pattern_dir }}" register: _helm_template - failed_when: false - -- name: Fail if helm template failed - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " Helm template failed in: %s\n" "{{ pattern_dir }}" > /dev/tty - printf " Chart: %s\n" "{{ pattern_install_chart }}" > /dev/tty - printf " Name: %s\n" "{{ pattern_name }}" > /dev/tty - printf " Exit code: %s\n" "{{ _helm_template.rc }}" > /dev/tty - printf " Command output:\n" > /dev/tty - printf "%s\n" "{{ _helm_template.stderr | default(_helm_template.stdout) }}" > /dev/tty - exit 1 - when: _helm_template.rc != 0 - name: Set rendered YAML fact ansible.builtin.set_fact: pattern_install_rendered_yaml: "{{ _helm_template.stdout }}" - when: _helm_template.rc == 0 diff --git a/roles/pattern_settings/tasks/main.yml b/roles/pattern_settings/tasks/main.yml index 93bc71a..211d51d 100644 --- a/roles/pattern_settings/tasks/main.yml +++ b/roles/pattern_settings/tasks/main.yml @@ -6,13 +6,12 @@ ansible.builtin.set_fact: values_global: "{{ lookup('file', pattern_dir + '/values-global.yaml') | from_yaml }}" -- name: Fail if global.pattern is missing - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " values-global.yaml does not define .global.pattern.\n" > /dev/tty - printf " Please set a value for pattern under the 'global' key.\n" > /dev/tty - exit 1 - when: (values_global.global.pattern | default('') | string | trim) == "" +- name: Ensure .global.pattern is set + ansible.builtin.assert: + that: (values_global.global.pattern | default('') | string | trim) != "" + fail_msg: | + values-global.yaml does not define .global.pattern. + Please set a value for pattern under the 'global' key. - name: Resolve pattern_name ansible.builtin.set_fact: @@ -23,13 +22,12 @@ | default((values_global.global.pattern | string | trim), true) }} -- name: Fail if main.clusterGroupName is missing - ansible.builtin.shell: | - printf "ERROR\n" > /dev/tty - printf " values-global.yaml does not define .main.clusterGroupName.\n" > /dev/tty - printf " Please set a value for clusterGroupName under the 'main' key.\n" > /dev/tty - exit 1 - when: (values_global.main.clusterGroupName | default('') | string | trim) == "" +- name: Ensure main.clusterGroupName is set + ansible.builtin.assert: + that: (values_global.main.clusterGroupName | default('') | string | trim) != "" + fail_msg: | + values-global.yaml does not define .main.clusterGroupName. + Please set a value for clusterGroupName under the 'main' key. - name: Set fact for main clustergroup ansible.builtin.set_fact: diff --git a/roles/pattern_settings/tasks/resolve_overrides.yml b/roles/pattern_settings/tasks/resolve_overrides.yml index c372e1e..fe63bf1 100644 --- a/roles/pattern_settings/tasks/resolve_overrides.yml +++ b/roles/pattern_settings/tasks/resolve_overrides.yml @@ -7,4 +7,5 @@ | default(lookup("env","PATTERN_DIR"), true) | default(lookup("env","PWD"), true) | default(lookup("pipe","pwd"), true) + | trim }} diff --git a/roles/validate_cluster/tasks/main.yml b/roles/validate_cluster/tasks/main.yml index fa64fb0..c808c97 100644 --- a/roles/validate_cluster/tasks/main.yml +++ b/roles/validate_cluster/tasks/main.yml @@ -1,29 +1,24 @@ --- -- name: Ensure oc is available - ansible.builtin.include_role: - name: oc_check +- name: Verify cluster connectivity and authentication + kubernetes.core.k8s_cluster_info: + register: cluster_info + failed_when: false -- name: Print cluster validation header - ansible.builtin.shell: | - printf "==> Checking cluster:\n" > /dev/tty +- name: Assert cluster is reachable + ansible.builtin.assert: + that: cluster_info.version is defined + fail_msg: | + Could not connect to the cluster. + Ensure your KUBECONFIG is set or you are logged in (e.g., 'oc login'). + quiet: true -- name: Check that we are logged into a cluster - ansible.builtin.shell: | - printf " cluster-info: " > /dev/tty - if oc cluster-info >/dev/null 2>&1; then - printf "OK\n" > /dev/tty - else - printf "Error\n" > /dev/tty - exit 1 - fi +- name: Fetch StorageClasses + kubernetes.core.k8s_info: + kind: StorageClass + register: sc_list -- name: Ensure we have storage classes defined - ansible.builtin.shell: | - set -o pipefail - printf " storageclass: " > /dev/tty - count="$(oc get storageclass -o name 2>/dev/null | wc -l | tr -d ' ')" - if [ "${count}" -eq 0 ]; then - printf "WARNING: No storageclass found\n" > /dev/tty - else - printf "OK\n" > /dev/tty - fi +- name: StorageClass Status + ansible.builtin.debug: + msg: >- + {{ 'OK: StorageClass found.' if (sc_list.resources | default([]) | length > 0) + else 'WARNING: No storageclass found!' }} diff --git a/roles/validate_origin/tasks/main.yml b/roles/validate_origin/tasks/main.yml index 0e42c0a..1a52ff6 100644 --- a/roles/validate_origin/tasks/main.yml +++ b/roles/validate_origin/tasks/main.yml @@ -1,50 +1,29 @@ --- -- name: Announce repository check - ansible.builtin.shell: | - printf "Checking origin reachability:\n" > /dev/tty - -- name: Set upstream_url from values-global.yaml +- name: Check if upstream_url is set in values-global.yaml ansible.builtin.set_fact: _upstream_url: "{{ values_global.main.git.repoUpstreamURL | default('') | string | trim }}" -- name: Select URL to validate +- name: Select origin URL to validate ansible.builtin.set_fact: _repo_to_check: >- {{ - (_upstream_url if (_upstream_url | trim != '') else _target_repo) | trim + _upstream_url if _upstream_url + else target_remote_url }} -- name: Print upstream notice (if present) - ansible.builtin.shell: | - printf "Upstream URL set to: %s\n" "{{ _upstream_url }}" > /dev/tty - when: _upstream_url | trim != '' - -- name: Fail if repo URL is empty - ansible.builtin.shell: | - printf " (no repository URL available)\n" > /dev/tty - printf "ERROR\n" > /dev/tty - printf " Could not determine repository URL to validate.\n" > /dev/tty - printf " Ensure _target_repo is resolved (install_settings role) or set an upstream URL in values-global.yaml.\n" > /dev/tty - exit 1 - when: _repo_to_check == '' +- name: Print validation message + ansible.builtin.debug: + msg: | + Validating that branch '{{ target_branch }}' is reachable on remote repo '{{ _repo_to_check }}' -- name: Print URL/branch header - ansible.builtin.shell: | - printf " %s - branch '%s': " "{{ _repo_to_check }}" "{{ target_branch }}" > /dev/tty - -- name: Validate remote branch exists # noqa: command-instead-of-module - ansible.builtin.command: > - git ls-remote --exit-code --heads {{ _repo_to_check }} {{ target_branch }} +- name: Validate remote branch exists is reachable on remote repo + ansible.builtin.command: git ls-remote --exit-code --heads {{ _repo_to_check }} {{ target_branch }} # noqa: command-instead-of-module register: _lsremote failed_when: false -- name: Report OK - ansible.builtin.shell: | - printf "OK\n" > /dev/tty - when: _lsremote.rc == 0 - -- name: Report NOT FOUND and exit - ansible.builtin.shell: | - printf "NOT FOUND\n" > /dev/tty - exit 1 - when: _lsremote.rc != 0 +- name: Print error message on failure + ansible.builtin.assert: + that: _lsremote is not failed + fail_msg: | + Branch '{{ target_branch }}' is not reachable on remote repo '{{ _repo_to_check }}'. + Ensure that your branch is pushed to the origin repo. diff --git a/roles/validate_prereq/tasks/main.yml b/roles/validate_prereq/tasks/main.yml index a8ef04a..255ded6 100644 --- a/roles/validate_prereq/tasks/main.yml +++ b/roles/validate_prereq/tasks/main.yml @@ -1,14 +1,6 @@ --- -- name: Announce prerequisite checks (host) - ansible.builtin.shell: | - printf "==> Checking prerequisites...\n" > /dev/tty - - name: Pattern name and clustergroup name length validation block: - - name: Print start message - ansible.builtin.shell: | - printf " Ensuring pattern and clustergroup names are within bounds... " > /dev/tty - - name: Get length of pattern name ansible.builtin.set_fact: _pattern_name_len: "{{ pattern_name | length }}" @@ -17,67 +9,28 @@ ansible.builtin.set_fact: _clustergroup_name_len: "{{ target_clustergroup | length }}" - - name: Calculate DNS part length for ArgoCD deploy - ansible.builtin.set_fact: - _name_valid: >- - {{ (_pattern_name_len | int) + 2 * (_clustergroup_name_len | int) < 47 }} - - - name: Print success message - ansible.builtin.shell: | - printf "OK\n" > /dev/tty - when: _name_valid - - - name: Print failure message - ansible.builtin.shell: | - {% raw %}printf "FAIL\n\n" > /dev/tty - printf " Validation Explanation:\n" > /dev/tty - printf " A DNS-compatible name is constructed in the 'clustergroup' Helm chart using the following pattern:\n" > /dev/tty - printf " -> {{ .Values.clusterGroup.name }}-gitops-server-{{ .Values.global.pattern }}-{{ .Values.clusterGroup.name }}\n\n" > /dev/tty - printf " The total length is calculated as:\n" > /dev/tty - printf " (2 * length of 'clusterGroup.name') + length of 'global.pattern' + 15 (for '-gitops-server-') + 1 (for the namespace separator '-')\n\n" > /dev/tty - printf " To stay under the 63-character limit, the variable part of the name must be less than 47 characters:\n" > /dev/tty - printf " (2 * length of 'clusterGroup.name') + length of 'global.pattern' < 47\n" > /dev/tty - exit 1{% endraw %} - when: not _name_valid - -- name: Detect whether we are running inside a container - ansible.builtin.stat: - path: /run/.containerenv - register: _containerenv - -- name: Host validations (prerequisites on the local machine) - when: not _containerenv.stat.exists - block: - - name: Check python-kubernetes (host) - ansible.builtin.shell: | - printf " Looking for python-kubernetes module... " > /dev/tty - if {{ (ansible_python_interpreter | default('/usr/bin/python3')) }} -c 'import kubernetes' >/dev/null 2>&1; then - printf "OK\n" > /dev/tty - else - printf "Not found\n" > /dev/tty - exit 1 - fi - - - name: Check kubernetes.core collection (host) - ansible.builtin.shell: | - set -o pipefail - printf " Looking for kubernetes.core collection... " > /dev/tty - if ansible-galaxy collection list | grep -q 'kubernetes.core'; then - printf "OK\n" > /dev/tty - else - printf "Not found\n" > /dev/tty - exit 1 - fi - -- name: Container validations - when: _containerenv.stat.exists - block: - - name: Compute multiSourceConfig.enabled (default false) - ansible.builtin.set_fact: - _msc_enabled: "{{ values_global.main.multiSourceConfig.enabled | default(false) | bool }}" - - - name: Enforce multiSourceConfig is enabled (container) - ansible.builtin.shell: | - printf "You must set \".main.multiSourceConfig.enabled: true\" in 'values-global.yaml'.\n" > /dev/tty - exit 1 - when: not _msc_enabled + - name: Ensure ArgoCD will have valid DNS hostname based on pattern name and clustergroup lengths + ansible.builtin.assert: + that: (_pattern_name_len | int) + 2 * (_clustergroup_name_len | int) < 47 + fail_msg: | + A DNS-compatible name is constructed in the 'clustergroup' Helm chart using the following pattern: + -> {%raw%}{{ .Values.clusterGroup.name }}-gitops-server-{{ .Values.global.pattern }}-{{ .Values.clusterGroup.name }}{%endraw%} + To stay under the 63-character DNS limit, the variable part of the name must be less than 47 characters: + (2 * length of 'clusterGroup.name') + length of 'global.pattern' < 47 + With your current values this DNS part is '{{ target_clustergroup }}-gitops-server-{{ pattern_name }}-{{ target_clustergroup }}' and exceeds the 63 character limit. + +- name: Ensure kubernetes python module is installed + ansible.builtin.pip: + name: kubernetes + state: present + extra_args: "-q" + +- name: Ensure kubernetes.core collection is installed + community.general.ansible_galaxy_install: + name: kubernetes.core + type: collection + +- name: Ensure multi source is enabled + ansible.builtin.assert: + that: (values_global.main.multiSourceConfig.enabled | default(false) | bool) == true + fail_msg: "You must set '.main.multiSourceConfig.enabled' to 'true' in 'values-global.yaml'." diff --git a/roles/validate_schema/tasks/main.yml b/roles/validate_schema/tasks/main.yml index 3d3e63a..d0b595f 100644 --- a/roles/validate_schema/tasks/main.yml +++ b/roles/validate_schema/tasks/main.yml @@ -6,31 +6,16 @@ file_type: file register: _values_files -- name: Sort file list +- name: Sort values files and determine clustergroup version cli arg ansible.builtin.set_fact: - values_files_sorted: >- - {{ - _values_files.files - | map(attribute='path') - | list - | sort - }} - -- name: Print start message - ansible.builtin.shell: - cmd: | - printf "==> Validating clustergroup schema of: " > /dev/tty - for f in {{ values_files_sorted | map('basename') | list | join(' ') }}; do printf " $f" > /dev/tty; done - printf "\n" > /dev/tty - -- name: Determine clustergroup chart CLI extras - ansible.builtin.set_fact: - clustergroup_version: "{{ values_global.main.multiSourceConfig.clusterGroupChartVersion | default('') }}" + values_files_sorted: "{{ _values_files.files | map(attribute='path') | sort }}" clustergroup_version_cli: >- - {{ - (values_global.main.multiSourceConfig.clusterGroupChartVersion | default('')) - | ternary('--version ' + values_global.main.multiSourceConfig.clusterGroupChartVersion, '') - }} + {{ ('--version ' + values_global.main.multiSourceConfig.clusterGroupChartVersion) + if (values_global.main.multiSourceConfig.clusterGroupChartVersion | default('')) else '' }} + +- name: Log files being validated + ansible.builtin.debug: + msg: "Validating clustergroup schema against: {{ values_files_sorted | map('basename') | join(', ') }}" - name: Validate each file with helm template ansible.builtin.command: >- @@ -43,16 +28,16 @@ loop_control: label: "{{ item | basename }}" -- name: Exit with error if any validation failed - when: (helm_validate.results | selectattr('rc', 'ne', 0) | list | length) > 0 - ansible.builtin.shell: - cmd: | - printf "Schema validation failed for the following values files:\n" > /dev/tty - {% for result in helm_validate.results %} - {% if result.rc != 0 %} - printf " - {{ result.item | basename }} (exit code: {{ result.rc }})\n" > /dev/tty - printf " To reproduce: cd {{ pattern_dir }} && helm template {{ clustergroup_chart }} {{ clustergroup_version_cli }} {{ extra_helm_opts }} -f \"{{ result.item }}\"\n" > /dev/tty - {% endif %} +- name: Check validation results + vars: + failed_validations: "{{ helm_validate.results | selectattr('rc', 'ne', 0) | list }}" + ansible.builtin.assert: + that: failed_validations | length == 0 + fail_msg: | + Schema validation failed for the following files: + {% for error in failed_validations %} + - {{ error.item | basename }} (RC: {{ error.rc }}) + Reproduction: cd {{ pattern_dir }} && helm template {{ clustergroup_chart }} {{ clustergroup_version_cli }} {{ extra_helm_opts }} -f "{{ error.item | basename }}" + Error: {{ error.stderr }} {% endfor %} - printf "\nRe-run the above commands to see the detailed error output.\n" > /dev/tty - exit 1 + quiet: true diff --git a/roles/vault_utils/tasks/vault_spokes_init.yaml b/roles/vault_utils/tasks/vault_spokes_init.yaml index 40651dd..ae0215c 100644 --- a/roles/vault_utils/tasks/vault_spokes_init.yaml +++ b/roles/vault_utils/tasks/vault_spokes_init.yaml @@ -37,7 +37,7 @@ ansible.builtin.set_fact: clusters: "{{ clusters | default({}) | combine({item.metadata.name: {'server_api': item.spec.managedClusterClientConfigs[0].url, - 'cluster_fqdn': _cluster_fqdn }}, recursive=True) }}" + 'cluster_fqdn': _cluster_fqdn}}, recursive=True) }}" loop: "{{ resources }}" vars: _cluster_fqdn: "{{ item.status.clusterClaims | selectattr('name', 'equalto', 'consoleurl.cluster.open-cluster-management.io')