From 0a0fabf27748a46a654846fbfd4fd509fe7466dc Mon Sep 17 00:00:00 2001 From: sauragar Date: Wed, 18 Feb 2026 11:15:40 +0530 Subject: [PATCH] [test_operator] Add stackviz integration for Tempest test visualization Add comprehensive stackviz integration for Tempest test visualization with support for multi-stage test execution. This generates interactive HTML reports with test results, timing analysis, and failure details. Key features: - Multi-stage stackviz report generation for main and worker jobs - Dark/light mode toggle for better viewing experience - Separate stackviz reports for tempest retry results - Support for both compressed and uncompressed subunit files - Auto-install of RPM dependencies (python3-subunit, python3-testtools) - Summary index page with links to all generated reports Implementation details: - Uses cifmw_repo variable instead of playbook_dir to reference the stackviz generation script, fixing failures in adoption jobs where playbooks are executed from rdo-jobs repository - Simplified dependency installation using single task with RPM packages - Removes unnecessary step-X directory wrapper in log collection - Uses FQCN (ansible.builtin.include_tasks) for ansible-lint compliance - All stackviz variables follow cifmw_test_operator_stackviz_* naming Configuration variables (all in defaults/main.yml): - cifmw_test_operator_stackviz_generate: Enable/disable report generation - cifmw_test_operator_stackviz_auto_install_deps: Auto-install RPM packages - cifmw_test_operator_stackviz_create_index: Create summary index page - cifmw_test_operator_stackviz_debug: Enable debug output Reviewer feedback addressed: Round 1 - Ansible Tasks: - Remove redundant Python script existence check - Remove dependency verification tasks (let Python fail with ImportError) - Move inline HTML content to proper template file (stackviz-index.html.j2) - Remove unused subunit_index from loop control and debug messages - Add comment explaining compressed .subunit.gz files - Add explicit fail task for missing subunit files with clear error message - Remove redundant HTML report verification (Ansible fails on non-zero exit) Round 1 - Python Script Improvements: - Add shebang: #!/usr/bin/env python3 - Remove questionable comments and dead code for 'inprogress' status - Fix bare except clause to catch specific exceptions - Add warning to stderr for tests with missing status - Refactor timestamp formatting into helper function - Move 800-line HTML template to external file (stackviz-report-template.html) Round 2 - Refactoring: - Merge duplicate set_fact tasks in worker file - Move variable definitions from worker to main file using vars block - Define variables at include_tasks level instead of multiple set_fact tasks - Keep only final set_fact for output paths in worker file Files created: - roles/test_operator/tasks/generate-stackviz-main.yml - roles/test_operator/tasks/generate-stackviz-worker.yml - roles/test_operator/templates/stackviz-index.html.j2 - scripts/generate-stackviz-report.py - scripts/stackviz-report-template.html All changes improve code maintainability while maintaining backward compatibility with fallback to embedded template if external file missing. Signed-off-by: Saurabh Agarwal Co-Authored-By: Claude Sonnet 4.5 --- docs/dictionary/en-custom.txt | 3 + roles/test_operator/README.md | 4 + roles/test_operator/defaults/main.yml | 4 + roles/test_operator/tasks/collect-logs.yaml | 4 +- .../tasks/generate-stackviz-main.yml | 102 ++ .../tasks/generate-stackviz-worker.yml | 72 ++ .../tasks/run-test-operator-job.yml | 7 + .../templates/stackviz-index.html.j2 | 86 ++ scripts/generate-stackviz-report.py | 988 ++++++++++++++++++ scripts/stackviz-report-template.html | 806 ++++++++++++++ 10 files changed, 2074 insertions(+), 2 deletions(-) create mode 100644 roles/test_operator/tasks/generate-stackviz-main.yml create mode 100644 roles/test_operator/tasks/generate-stackviz-worker.yml create mode 100644 roles/test_operator/templates/stackviz-index.html.j2 create mode 100644 scripts/generate-stackviz-report.py create mode 100644 scripts/stackviz-report-template.html diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 32efa5e905..a9dca12dbf 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -536,6 +536,7 @@ shiftstack shiftstackclient sig Sinha +Stackviz sizepercent skbg skiplist @@ -556,6 +557,7 @@ str stricthostkeychecking submodule submodules +subunit subnet subnets sudo @@ -573,6 +575,7 @@ tempestconf testcases testenv testproject +testtools timestamper timesync tldca diff --git a/roles/test_operator/README.md b/roles/test_operator/README.md index a66df11c52..4863588201 100644 --- a/roles/test_operator/README.md +++ b/roles/test_operator/README.md @@ -4,6 +4,10 @@ Execute tests via the [test-operator](https://openstack-k8s-operators.github.io/ ## Parameters * `cifmw_test_operator_artifacts_basedir`: (String) Directory where we will have all test-operator related files. Default value: `{{ cifmw_basedir }}/tests/test_operator` which defaults to `~/ci-framework-data/tests/test_operator` +* `cifmw_test_operator_stackviz_generate`: (Boolean) Enable automatic generation of Stackviz HTML reports from Tempest subunit test results. When enabled, generates interactive visualizations of test results that can be viewed in a browser. When `cifmw_test_operator_tempest_rerun_failed_tests` is enabled, stackviz will generate both `tempest-viz.html` (original test run) and `tempest_retry_viz.html` (retry test run) reports for easy comparison. Default value: `true` +* `cifmw_test_operator_stackviz_debug`: (Boolean) Enable debug mode for Stackviz report generation. When enabled, displays detailed information about the generation process. Default value: `false` +* `cifmw_test_operator_stackviz_auto_install_deps`: (Boolean) Automatically install required RPM packages (python3-subunit, python3-testtools) for Stackviz generation. When disabled, the role will fail if the packages are not already installed. Default value: `true` +* `cifmw_test_operator_stackviz_create_index`: (Boolean) Create a summary index page (index.html) when multiple test stages are run. The index provides links to all individual Stackviz reports. Only applicable when using workflows with multiple test stages. Default value: `true` * `cifmw_test_operator_namespace`: (String) Namespace inside which all the resources are created. Default value: `openstack` * `cifmw_test_operator_controller_namespace`: (String) Namespace inside which the test-operator-controller-manager is created. Default value: `openstack-operators` * `cifmw_test_operator_controller_priv_key_file_path`: (String) Specifies the path to the CIFMW private key file. Note: Please ensure this file is available in the environment where the ci-framework test-operator role is executed. Default value: `~/.ssh/id_cifw` diff --git a/roles/test_operator/defaults/main.yml b/roles/test_operator/defaults/main.yml index ecfdc3b4fb..da853a38a5 100644 --- a/roles/test_operator/defaults/main.yml +++ b/roles/test_operator/defaults/main.yml @@ -24,6 +24,10 @@ cifmw_test_operator_stages: type: tempest cifmw_test_operator_fail_on_test_failure: true cifmw_test_operator_artifacts_basedir: "{{ cifmw_basedir }}/tests/test_operator" +cifmw_test_operator_stackviz_generate: true +cifmw_test_operator_stackviz_debug: false +cifmw_test_operator_stackviz_auto_install_deps: true +cifmw_test_operator_stackviz_create_index: true cifmw_test_operator_namespace: openstack cifmw_test_operator_controller_namespace: openstack-operators cifmw_test_operator_bundle: "" diff --git a/roles/test_operator/tasks/collect-logs.yaml b/roles/test_operator/tasks/collect-logs.yaml index 5d847624e8..22acd6ba5b 100644 --- a/roles/test_operator/tasks/collect-logs.yaml +++ b/roles/test_operator/tasks/collect-logs.yaml @@ -75,10 +75,10 @@ KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" PATH: "{{ cifmw_path }}" vars: - pod_path: mnt/logs-{{ test_operator_instance_name }}-step-{{ index }} + pod_path: /mnt/logs-{{ test_operator_instance_name }}-step-{{ index }} ansible.builtin.shell: > oc cp -n {{ stage_vars_dict.cifmw_test_operator_namespace }} - test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}:{{ pod_path }} + test-operator-logs-pod-{{ run_test_fw }}-{{ test_operator_instance_name }}:{{ pod_path }}/. {{ cifmw_test_operator_artifacts_basedir }} loop: "{{ logsPVCs.resources }}" loop_control: diff --git a/roles/test_operator/tasks/generate-stackviz-main.yml b/roles/test_operator/tasks/generate-stackviz-main.yml new file mode 100644 index 0000000000..76d7b49575 --- /dev/null +++ b/roles/test_operator/tasks/generate-stackviz-main.yml @@ -0,0 +1,102 @@ +--- +# Main: Generate stackviz HTML reports from tempest subunit results +# This is the main orchestrator that coordinates stackviz report generation +# Included after log collection when tempest tests complete +# Handles multiple test stages by generating separate reports for each +# Calls generate-stackviz-worker.yml for each subunit file found + +- name: Find all tempest subunit files (results and retries, compressed or uncompressed) + ansible.builtin.find: + paths: "{{ cifmw_test_operator_artifacts_basedir }}" + patterns: + - "tempest_results.subunit*" + - "tempest_retry.subunit*" + recurse: true + register: subunit_gz_files + +- name: Display found subunit files + ansible.builtin.debug: + msg: "Found {{ subunit_gz_files.files | length }} subunit file(s) (results + retries)" + +- name: Process stackviz reports + when: subunit_gz_files.files | length > 0 + block: + - name: Install required RPM packages for stackviz + ansible.builtin.package: + name: + - python3-subunit + - python3-testtools + state: present + become: true + when: cifmw_test_operator_stackviz_auto_install_deps + + # Initialize list to track generated reports + - name: Initialize stackviz reports list + ansible.builtin.set_fact: + _stackviz_generated_reports: [] + + # Process each subunit file + - name: Process each subunit file and generate individual reports + vars: + _current_subunit_source: "{{ subunit_file.path }}" + _current_dir: "{{ subunit_file.path | dirname }}" + _current_stage_name: "{{ subunit_file.path | dirname | basename }}" + _current_is_compressed: "{{ subunit_file.path.endswith('.gz') }}" + _is_retry_file: "{{ 'tempest_retry' in subunit_file.path }}" + _base_filename: "{{ 'tempest_retry' if 'tempest_retry' in subunit_file.path else 'tempest_results' }}" + _html_filename: "{{ 'tempest_retry_viz.html' if 'tempest_retry' in subunit_file.path else 'tempest-viz.html' }}" + ansible.builtin.include_tasks: generate-stackviz-worker.yml + loop: "{{ subunit_gz_files.files }}" + loop_control: + loop_var: subunit_file + + # Create summary index page + - name: Create stackviz summary index + when: + - _stackviz_generated_reports | length > 0 + - cifmw_test_operator_stackviz_create_index + block: + - name: Create stackviz directory + ansible.builtin.file: + path: "{{ cifmw_test_operator_artifacts_basedir }}/stackviz" + state: directory + mode: '0755' + + - name: Generate summary index HTML + ansible.builtin.template: + src: stackviz-index.html.j2 + dest: "{{ cifmw_test_operator_artifacts_basedir }}/stackviz/index.html" + mode: '0644' + + - name: Display stackviz summary + ansible.builtin.debug: + msg: | + =================================================== + Stackviz Report Generation Complete! + + Total Reports Generated: {{ _stackviz_generated_reports | length }} + + {% for report in _stackviz_generated_reports %} + - {{ report.report_label }} + {{ report.html_path }} + {% endfor %} + + {% if cifmw_test_operator_stackviz_create_index %} + Summary Index: {{ cifmw_test_operator_artifacts_basedir }}/stackviz/index.html + + To view all reports: + {% if ansible_os_family == 'Darwin' %} + open {{ cifmw_test_operator_artifacts_basedir }}/stackviz/index.html + {% else %} + xdg-open {{ cifmw_test_operator_artifacts_basedir }}/stackviz/index.html + {% endif %} + {% endif %} + =================================================== + when: _stackviz_generated_reports | length > 0 + +- name: Display warning when no subunit files found + ansible.builtin.debug: + msg: > + WARNING: No tempest subunit files (tempest_results.subunit* or tempest_retry.subunit*) found in {{ cifmw_test_operator_artifacts_basedir }}. + Stackviz report generation skipped. + when: subunit_gz_files.files | length == 0 diff --git a/roles/test_operator/tasks/generate-stackviz-worker.yml b/roles/test_operator/tasks/generate-stackviz-worker.yml new file mode 100644 index 0000000000..2f5e6a045c --- /dev/null +++ b/roles/test_operator/tasks/generate-stackviz-worker.yml @@ -0,0 +1,72 @@ +--- +# Worker: Process a single subunit.gz file and generate HTML report +# This file is included in a loop from generate-stackviz-main.yml +# Variables available: +# - subunit_file: the file object from find results +# - All path and naming variables are passed as vars from the main file + +- name: Set output paths for decompressed file and HTML report + ansible.builtin.set_fact: + _current_subunit: "{{ _current_dir }}/{{ _base_filename }}.subunit" + _current_html: "{{ _current_dir }}/{{ _html_filename }}" + +# Tempest can produce compressed .subunit.gz files to save disk space, +# especially for large test runs. We decompress them for processing. +- name: Decompress subunit file if compressed + ansible.builtin.shell: | + gunzip -c "{{ _current_subunit_source }}" > "{{ _current_subunit }}" + args: + creates: "{{ _current_subunit }}" + when: _current_is_compressed | bool + +- name: Use uncompressed file directly if not compressed + ansible.builtin.set_fact: + _current_subunit: "{{ _current_subunit_source }}" + when: not (_current_is_compressed | bool) + +- name: Verify subunit file exists + ansible.builtin.stat: + path: "{{ _current_subunit }}" + register: _current_subunit_stat + +- name: Fail if subunit file is missing + ansible.builtin.fail: + msg: | + ERROR: Subunit file not found: {{ _current_subunit }} + Expected file after decompression or direct usage. + This may indicate an issue with test execution or file handling. + when: not _current_subunit_stat.stat.exists + +- name: Generate stackviz HTML report for this stage + ansible.builtin.command: + cmd: > + python3 {{ cifmw_repo }}/scripts/generate-stackviz-report.py + {{ _current_subunit }} + {{ _current_html }} + register: _current_stackviz_generation + +- name: Display generation output + ansible.builtin.debug: + var: _current_stackviz_generation.stdout_lines + when: + - _current_stackviz_generation is defined + - _current_stackviz_generation is not skipped + - cifmw_test_operator_stackviz_debug | default(false) + +- name: Track generated report with type metadata + ansible.builtin.set_fact: + _stackviz_generated_reports: >- + {{ + _stackviz_generated_reports + [{ + 'stage_name': _current_stage_name, + 'html_path': _current_html, + 'subunit_path': _current_subunit, + 'directory': _current_dir, + 'is_retry': _is_retry_file | default(false), + 'report_label': ('Retry Results: ' if (_is_retry_file | default(false)) else 'Original Results: ') + _current_stage_name + }] + }} + +- name: Display success for this report + ansible.builtin.debug: + msg: "✓ Generated report for {{ _current_stage_name }}: {{ _current_html }}" diff --git a/roles/test_operator/tasks/run-test-operator-job.yml b/roles/test_operator/tasks/run-test-operator-job.yml index 66e8adaec9..f569b07f62 100644 --- a/roles/test_operator/tasks/run-test-operator-job.yml +++ b/roles/test_operator/tasks/run-test-operator-job.yml @@ -79,6 +79,13 @@ - not testpod_timed_out ansible.builtin.include_tasks: collect-logs.yaml + - name: Generate stackviz HTML report + when: + - not testpod_timed_out + - cifmw_test_operator_stackviz_generate + - run_test_fw == 'tempest' + ansible.builtin.include_tasks: generate-stackviz-main.yml + - name: Get list of all pods kubernetes.core.k8s_info: kubeconfig: "{{ cifmw_openshift_kubeconfig }}" diff --git a/roles/test_operator/templates/stackviz-index.html.j2 b/roles/test_operator/templates/stackviz-index.html.j2 new file mode 100644 index 0000000000..1aeeae8ee9 --- /dev/null +++ b/roles/test_operator/templates/stackviz-index.html.j2 @@ -0,0 +1,86 @@ + + + + + + Tempest Test Reports - Summary + + + +

Tempest Test Reports

+

Total Reports Generated: {{ _stackviz_generated_reports | length }}

+ +

Individual Test Stage Reports

+ + +
+ Generated: {{ ansible_date_time.iso8601 }} +
+ + diff --git a/scripts/generate-stackviz-report.py b/scripts/generate-stackviz-report.py new file mode 100644 index 0000000000..832d79bfd7 --- /dev/null +++ b/scripts/generate-stackviz-report.py @@ -0,0 +1,988 @@ +#!/usr/bin/env python3 +import sys +import json +import datetime +import argparse +from pathlib import Path +from subunit import ByteStreamToStreamResult +from testtools import StreamResult + +# Path to the HTML template file (relative to this script) +TEMPLATE_PATH = Path(__file__).parent / "stackviz-report-template.html" + +REPORT_TEMPLATE_FALLBACK = r""" + + + + + Test Execution Report + + + + +
+

Test Execution Report

+ +
+ +
+
+
-
+
Total Tests
+
+
+
-
+
Passed
+
+
+
-
+
Failed
+
+
+
-
+
Skipped
+
+
+
-
+
Total Duration
+
+
+ +
+
+
+

Execution Timeline (by Worker)

+ Drag Navigator below to Zoom/Scroll +
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + + +
StatusTest NameDuration (s)Worker
+
+
+
+ + + +
+ + + + +""" + + +def format_timestamp(timestamp): + """Convert Unix timestamp to formatted datetime string.""" + return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S.%f") + + +class ReportAccumulator(StreamResult): + def __init__(self): + super().__init__() + self.tests = {} # key: test_id + self.events = [] + + def status( + self, + test_id=None, + test_status=None, + test_tags=None, + runnable=True, + file_name=None, + file_bytes=None, + eof=False, + mime_type=None, + route_code=None, + timestamp=None, + ): + if not test_id: + return + + if test_id not in self.tests: + self.tests[test_id] = { + "id": test_id, + "status": None, + "start_time": None, + "end_time": None, + "worker": "unknown", + "tags": set(), + "details": [], + } + + test = self.tests[test_id] + + if timestamp: + ts = timestamp.timestamp() + if test_status == "inprogress": + test["start_time"] = ts + elif test_status in ("success", "fail", "skip", "xfail", "uxsuccess"): + test["end_time"] = ts + + if test_status and test_status != "inprogress": + test["status"] = test_status + + if test_tags: + test["tags"].update(test_tags) + # Try to extract worker from tags + for tag in test_tags: + if tag.startswith("worker-"): + test["worker"] = tag + + if file_bytes: + # Accumulate output/tracebacks + try: + content = bytes(file_bytes).decode("utf-8", errors="replace") + test["details"].append(content) + except (UnicodeDecodeError, AttributeError, TypeError): + test["details"].append(str(file_bytes)) + + def get_results(self): + # Convert to list and clean up + final_tests = [] + + summary = {"total": 0, "passed": 0, "failed": 0, "skipped": 0, "duration": 0.0} + + # Calculate global duration (min start to max end) + all_starts = [] + all_ends = [] + + for test_id, data in self.tests.items(): + if not data["status"]: + # Tests without status indicate incomplete data or stream parsing issues + print( + f"WARNING: Test '{test_id}' has no status - skipping", + file=sys.stderr, + ) + continue + + entry = { + "id": data["id"], + "status": data["status"], + "worker": data["worker"], + "start_ts": data["start_time"], + "end_ts": data["end_time"], + "duration": 0.0, + "details": "".join(data["details"]), + } + + # Timestamp formatting + if data["start_time"]: + entry["start_time"] = format_timestamp(data["start_time"]) + all_starts.append(data["start_time"]) + + if data["end_time"]: + entry["end_time"] = format_timestamp(data["end_time"]) + all_ends.append(data["end_time"]) + + if data["start_time"] and data["end_time"]: + entry["duration"] = data["end_time"] - data["start_time"] + + # Summary counts + summary["total"] += 1 + if data["status"] == "success": + summary["passed"] += 1 + elif data["status"] == "fail": + summary["failed"] += 1 + elif data["status"] == "skip": + summary["skipped"] += 1 + + final_tests.append(entry) + + if all_starts and all_ends: + summary["duration"] = max(all_ends) - min(all_starts) + + return {"summary": summary, "tests": final_tests} + + +def process_file(input_file, output_file): + # Depending on how the file is opened (binary) + # subunit v2 stream is binary + + accumulator = ReportAccumulator() + + with open(input_file, "rb") as f: + parser = ByteStreamToStreamResult(f) + parser.run(accumulator) + + data = accumulator.get_results() + + # Load template from external file, fallback to embedded if not found + try: + with open(TEMPLATE_PATH, "r") as template_file: + template = template_file.read() + except FileNotFoundError: + print( + f"WARNING: Template file not found at {TEMPLATE_PATH}, using embedded fallback", + file=sys.stderr, + ) + template = REPORT_TEMPLATE_FALLBACK + + # Embed data + json_data = json.dumps(data) + html_content = template.replace("{{ REPORT_DATA }}", json_data) + + with open(output_file, "w") as f: + f.write(html_content) + + print(f"Generated {output_file} with {data['summary']['total']} tests.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("input", help="Input .subunit file") + parser.add_argument( + "output", + nargs="?", + default="tempest-viz.html", + help="Output .html file (default: tempest-viz.html)", + ) + + args = parser.parse_args() + process_file(args.input, args.output) diff --git a/scripts/stackviz-report-template.html b/scripts/stackviz-report-template.html new file mode 100644 index 0000000000..e6a5b3fc04 --- /dev/null +++ b/scripts/stackviz-report-template.html @@ -0,0 +1,806 @@ + + + + + + Test Execution Report + + + + +
+

Test Execution Report

+ +
+ +
+
+
-
+
Total Tests
+
+
+
-
+
Passed
+
+
+
-
+
Failed
+
+
+
-
+
Skipped
+
+
+
-
+
Total Duration
+
+
+ +
+
+
+

Execution Timeline (by Worker)

+ Drag Navigator below to Zoom/Scroll +
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + + +
StatusTest NameDuration (s)Worker
+
+
+
+ + + +
+ + + + +