From f2ec7fd3a477d46d68cedf06ea5d756d0be0dddc Mon Sep 17 00:00:00 2001 From: Emma Foley Date: Wed, 9 Jul 2025 18:11:56 +0100 Subject: [PATCH 1/2] [zuul] Add a CloudKitty CI job to run tempest tests Add a pre_deploy hook to install loki-operator for CloudKitty deployment Add a pre_deploy hook to create the oscp kustomization to enable cloudkitty * add s3storageConfig secret * Enable metricsStorage in telemetry to provide prometheus for storage * Configure storageClass for CloudKitty in configure-cloudkitty hook Set the storageClass to crc-csi-hostpath-provisioner for CloudKitty deployment in CRC-based CI jobs. This prevents the need to increase PVC allocations in install_yamls by using the default storage class available in CRC, which allocates storage as required. --- ci/cloudkitty-pre_deploy-install_loki.yml | 30 +++++ ci/configure-cloudkitty.yml | 44 +++++++ ci/deploy-loki-for-ck.yaml | 136 ++++++++++++++++++++++ ci/vars-cloudkitty-tempest.yml | 65 +++++++++++ zuul.d/projects.yaml | 26 ++++- 5 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 ci/cloudkitty-pre_deploy-install_loki.yml create mode 100644 ci/configure-cloudkitty.yml create mode 100644 ci/deploy-loki-for-ck.yaml create mode 100644 ci/vars-cloudkitty-tempest.yml diff --git a/ci/cloudkitty-pre_deploy-install_loki.yml b/ci/cloudkitty-pre_deploy-install_loki.yml new file mode 100644 index 000000000..08bd280e8 --- /dev/null +++ b/ci/cloudkitty-pre_deploy-install_loki.yml @@ -0,0 +1,30 @@ +--- +- name: "Install loki for cloudkitty" + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: true + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig | default(ansible_env.HOME + '/.kube.config' ) }}" + PATH: "{{ cifmw_path | default(ansible_env.PATH) }}" + tasks: + - name: Set zuul + when: not zuul is defined + ansible.builtin.set_fact: + zuul: + projects: + github.com/openstack-k8s-operators/telemetry-operator: + src_dir: "{{ telemetry_operator_dir | default('telemetry-operator/') }}" + + - name: Deploy loki operator + ansible.builtin.shell: + cmd: | + oc apply -f {{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/deploy-loki-for-ck.yaml + + - name: Wait up to 5 minutes until the Loki CSV is Succeeded + ansible.builtin.shell: + cmd: | + oc get csv | grep loki-operator + ignore_errors: true + register: output + until: output.stdout_lines | length == 1 and "Succeeded" in output.stdout + retries: 30 + delay: 10 diff --git a/ci/configure-cloudkitty.yml b/ci/configure-cloudkitty.yml new file mode 100644 index 000000000..5b82500f5 --- /dev/null +++ b/ci/configure-cloudkitty.yml @@ -0,0 +1,44 @@ +--- +- name: "Create the kustomization for deploying CloudKitty" + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: false + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + tasks: + - name: Copy controlplane kustomization + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/90-kustomize-controlplane-cloudkitty.yaml" + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: openstack + patches: + - patch: |- + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: unused + spec: + # Set overall storage class so we don't need to increase the + # number of PVCs that install_yamls creates + # this is only applicable to crc-based jobs, it is not in + # openshift by default, but is included in the crc distribution + storageClass: crc-csi-hostpath-provisioner + telemetry: + enabled: true + template: + logging: + enabled: false + autoscaling: + enabled: false + cloudkitty: + enabled: true + s3StorageConfig: + secret: + name: cloudkitty-loki-s3 + type: s3 + metricStorage: + enabled: true + target: + kind: OpenStackControlPlane diff --git a/ci/deploy-loki-for-ck.yaml b/ci/deploy-loki-for-ck.yaml new file mode 100644 index 000000000..7045434fb --- /dev/null +++ b/ci/deploy-loki-for-ck.yaml @@ -0,0 +1,136 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-operators-redhat + labels: + name: openshift-operators-redhat +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: loki-operator + namespace: openshift-operators-redhat +spec: + upgradeStrategy: Default +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: loki-operator + namespace: openshift-operators-redhat +spec: + channel: stable-6.3 + name: loki-operator + source: redhat-operators + sourceNamespace: openshift-marketplace +--- +# Deploys a new Namespace for the MinIO Pod +apiVersion: v1 +kind: Namespace +metadata: + name: minio-dev # Change this value if you want a different namespace name + labels: + name: minio-dev # Change this value to match metadata.name +--- +# Deploys a new MinIO Pod into the metadata.namespace Kubernetes namespace +# +apiVersion: v1 +kind: Pod +metadata: + labels: + app: minio + name: minio + namespace: minio-dev # Change this value to match the namespace metadata.name +spec: + containers: + - name: minio + image: quay.io/minio/minio:latest + command: + - /bin/bash + - -c + - | + mkdir -p /data/loki && \ + minio server /data + env: + - name: MINIO_ACCESS_KEY + value: minio + - name: MINIO_SECRET_KEY + value: minio123 + volumeMounts: + - mountPath: /data + name: storage # Corresponds to the `spec.volumes` Persistent Volume + volumes: + - name: storage + persistentVolumeClaim: + claimName: minio-pvc +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: minio-pvc + namespace: minio-dev +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: crc-csi-hostpath-provisioner +--- +apiVersion: v1 +kind: Service +metadata: + name: minio + namespace: minio-dev +spec: + selector: + app: minio + ports: + - name: api + protocol: TCP + port: 9000 + - name: console + protocol: TCP + port: 9090 +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: minio-console + namespace: minio-dev +spec: + host: console-minio-dev.apps-crc.testing + to: + kind: Service + name: minio + weight: 100 + port: + targetPort: console + wildcardPolicy: None +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: minio-api + namespace: minio-dev +spec: + host: api-minio-dev.apps-crc.testing + to: + kind: Service + name: minio + weight: 100 + port: + targetPort: api + wildcardPolicy: None +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloudkitty-loki-s3 + namespace: openstack +stringData: + access_key_id: minio + access_key_secret: minio123 + bucketnames: loki + endpoint: http://minio.minio-dev.svc.cluster.local:9000 diff --git a/ci/vars-cloudkitty-tempest.yml b/ci/vars-cloudkitty-tempest.yml new file mode 100644 index 000000000..140b80d58 --- /dev/null +++ b/ci/vars-cloudkitty-tempest.yml @@ -0,0 +1,65 @@ +--- +cifmw_deploy_obs: true +cifmw_openshift_obs_definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: observability-operator + namespace: openshift-operators + spec: + channel: stable + installPlanApproval: Automatic + name: cluster-observability-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + +pre_deploy_kustomize_cloudkitty: + source: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/configure-cloudkitty.yml" + type: playbook + +pre_deploy_loki_setup: + source: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/cloudkitty-pre_deploy-install_loki.yml" + type: playbook +# test cloudkitty +cifmw_run_tests: true +cifmw_run_test_role: test_operator +# TODO: Consider switching to podified-master-centos10 for features that patch master +cifmw_test_operator_tempest_namespace: podified-antelope-centos9 +# cloudkitty tempest plugin is not part of the tempest rpm. +# https://review.rdoproject.org/cgit/openstack/tempest-distgit/tree/openstack-tempest.spec +# We need to add the cloudkitty-tempest-plugin package to RDO, same as TTTP +# https://review.rdoproject.org/cgit/openstack/telemetry-tempest-plugin-distgit/# +# For now, we can force install using the cifmw_test_operator_tempest_external_plugin below. +cifmw_test_operator_tempest_container: openstack-tempest-all +cifmw_test_operator_tempest_image_tag: 'current-podified' +# This value is used to populate the `tempestconfRun` parameter of the Tempest CR: https://openstack-k8s-operators.github.io/test-operator/crds.html#tempest-custom-resource +# https://github.com/openstack-k8s-operators/ci-framework/blob/main/roles/test_operator/defaults/main.yml +# TODO: Refine this tempest config +tempest_conf: + overrides: | + validation.run_validation true + identity.v3_endpoint_type public + service_available.ceilometer true + service_available.sg_core true + service_available.aodh false + service_available.cinder false + telemetry.sg_core_service_url "https://ceilometer-internal.openstack.svc.cluster.local:3000" + telemetry.prometheus_service_url "https://metric-storage-prometheus.openstack.svc.cluster.local:9090" + telemetry.ceilometer_polling_interval 120 + telemetry.prometheus_scrape_interval 30 + telemetry.alarm_threshold 50000000000 + +cifmw_test_operator_tempest_tempestconf_config: "{{ tempest_conf }}" +cifmw_test_operator_tempest_include_list: | + ^tempest.*\[.*\bsmoke\b.*\] + cloudkitty_tempest_plugin.* + telemetry_tempest_plugin.* +cifmw_test_operator_tempest_exclude_list: | + telemetry_tempest_plugin.scenario.test_telemetry_integration_prometheus.PrometheusGabbiTest.test_autoscaling + +# TODO: update this to allow multiple external plugins to be listed with Depends-On. +# Potentially, this can be done via the meta content provider, by adding the tempest images to the list. +external_plugin: "opendev.org/openstack/cloudkitty-tempest-plugin" +change_item: "{{ zuul['items'] | selectattr('project.canonical_name', 'equalto', external_plugin) }}" +# WORKAROUND: CloudKitty tempest is not packaged in RDO. Typically, the default would be [], since we would not require an external installation. +cifmw_test_operator_tempest_external_plugin: "{{ [ {'repository': 'https://' + external_plugin + '.git'} ] if change_item | length < 1 else [ { 'repository': 'https://' + external_plugin + '.git', 'changeRepository': 'https://review' + external_plugin, 'changeRefspec': [ 'refs/changes', change_item[0].change[-2:], change_item[0].change, change_item[0].patchset ] | join('/') } ] }}" diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 95ded5fa2..9469c869f 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -105,6 +105,27 @@ - "@{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/vars-power-monitoring.yml" irrelevant-files: *irrelevant_files +- job: + name: telemetry-operator-multinode-cloudkitty + dependencies: ["telemetry-openstack-meta-content-provider-master"] + parent: telemetry-operator-multinode-autoscaling + description: | + Deploy CloudKitty and run tempest tests + required-projects: + - name: infrawatch/feature-verification-tests + override-checkout: master + extra-vars: *mcp_extra_vars + vars: + #patch_observabilityclient: true + cifmw_update_containers: false + cifmw_extras: + - "@{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}/scenarios/centos-9/multinode-ci.yml" + - "@{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/vars-cloudkitty-tempest.yml" + - "@{{ ansible_user_dir }}/{{ zuul.projects['github.com/infrawatch/feature-verification-tests'].src_dir }}/ci/vars-use-master-containers.yml" + roles: + - zuul: github.com/openstack-k8s-operators/ci-framework + irrelevant-files: *irrelevant_files + - project-template: name: rdo-telemetry-tempest-plugin-jobs openstack-experimental: @@ -134,13 +155,14 @@ - project: name: openstack-k8s-operators/telemetry-operator - templates: - - podified-multinode-edpm-pipeline github-check: jobs: - openstack-k8s-operators-content-provider: vars: cifmw_install_yamls_sdk_version: v1.41.1 + - telemetry-operator-multinode-cloudkitty: + dependencies: + - telemetry-openstack-meta-content-provider-master - telemetry-openstack-meta-content-provider-master - telemetry-operator-multinode-default-telemetry - functional-tests-osp18: &fvt_jobs_config From 54fb11ea8809fd8bea493d01956f071b4451c61b Mon Sep 17 00:00:00 2001 From: Emma Foley Date: Mon, 24 Nov 2025 14:40:12 -0500 Subject: [PATCH 2/2] [ci] Pin loki-operator version to 6.3.0 Loki-operator needs to be pinned to 6.3.0, because 6.3.1 has this issue: https://issues.redhat.com/browse/LOG-7752 --- ci/cloudkitty-pre_deploy-install_loki.yml | 27 +++++++++++++++++++++++ ci/deploy-loki-for-ck.yaml | 2 ++ 2 files changed, 29 insertions(+) diff --git a/ci/cloudkitty-pre_deploy-install_loki.yml b/ci/cloudkitty-pre_deploy-install_loki.yml index 08bd280e8..21be07c99 100644 --- a/ci/cloudkitty-pre_deploy-install_loki.yml +++ b/ci/cloudkitty-pre_deploy-install_loki.yml @@ -14,11 +14,38 @@ github.com/openstack-k8s-operators/telemetry-operator: src_dir: "{{ telemetry_operator_dir | default('telemetry-operator/') }}" + # NOTE: The value doesn't get used unless the deploy-loki-for-ck is converted into a template and rendered. + # TODO: Update the yaml to a template + - name: Set the loki-operator version to pin the version + ansible.builtin.set_fact: + loki_operator_version: "v6.3.0" + - name: Deploy loki operator ansible.builtin.shell: cmd: | oc apply -f {{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/deploy-loki-for-ck.yaml + - name: Get and approve the installplan when the version is pinned + when: loki_operator_version is defined + block: + - name: Get the installplan from the loki-operator subscription + ansible.builtin.shell: + cmd: | + oc get installplan -n openshift-operators-redhat | grep "loki-operator.{{ loki_operator_version }}" | awk '{print $1}' + retries: 10 + delay: 10 + register: loki_installplan + until: loki_installplan.stdout_lines | length != 0 + + - name: Show the loki_installplan from oc get installplan + ansible.builtin.debug: + var: loki_installplan + + - name: Approve the installation + ansible.builtin.shell: + cmd: | + oc patch -n openshift-operators-redhat installplan {{ loki_installplan.stdout }} --type='json' -p='[{"op": "replace", "path": "/spec/approved", "value":true}]' + - name: Wait up to 5 minutes until the Loki CSV is Succeeded ansible.builtin.shell: cmd: | diff --git a/ci/deploy-loki-for-ck.yaml b/ci/deploy-loki-for-ck.yaml index 7045434fb..8bf23f2b0 100644 --- a/ci/deploy-loki-for-ck.yaml +++ b/ci/deploy-loki-for-ck.yaml @@ -21,6 +21,8 @@ metadata: namespace: openshift-operators-redhat spec: channel: stable-6.3 + installPlanApproval: Manual + startingCSV: loki-operator.v6.3.0 name: loki-operator source: redhat-operators sourceNamespace: openshift-marketplace