diff --git a/ansible/init-data-gzipper.yaml b/ansible/init-data-gzipper.yaml index 20459b84..1d22dfea 100644 --- a/ansible/init-data-gzipper.yaml +++ b/ansible/init-data-gzipper.yaml @@ -1,4 +1,4 @@ -- name: Gzip initdata +- name: Gzip initdata and register init data become: false connection: local hosts: localhost @@ -7,6 +7,7 @@ kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" cluster_platform: "{{ global.clusterPlatform | default('none') | lower }}" hub_domain: "{{ global.hubClusterDomain | default('none') | lower}}" + security_policy_flavour: "{{ global.coco.securityPolicyFlavour | default('insecure') }}" template_src: "initdata-default.toml.tpl" tasks: - name: Create temporary working directory @@ -55,6 +56,22 @@ path: "{{ gz_path }}" register: gz_slurped + # This block runs a shell script that calculates a hash value (PCR8_HASH) derived from the contents of 'initdata.toml'. + # The script performs the following steps: + # 1. hash=$(sha256sum initdata.toml | cut -d' ' -f1): Computes the sha256 hash of 'initdata.toml' and assigns it to $hash. + # 2. initial_pcr=000000000000000000000000000000000000000000000000000000000000000: Initializes a string of zeros as the initial PCR value. + # 3. PCR8_HASH=$(echo -n "$initial_pcr$hash" | xxd -r -p | sha256sum | cut -d' ' -f1): Concatenates initial_pcr and $hash, converts from hex to binary, computes its sha256 hash, and stores the result as PCR8_HASH. + # 4. echo $PCR8_HASH: Outputs the PCR hash value. + # The important part: The 'register: pcr8_hash' registers the **stdout of the command**, which is the value output by 'echo $PCR8_HASH', as 'pcr8_hash.stdout' in Ansible. + # It does NOT register an environment variable, but rather the value actually printed by 'echo'. + - name: Register init data pcr into a var + ansible.builtin.shell: | + hash=$(sha256sum initdata.toml | cut -d' ' -f1) + initial_pcr=000000000000000000000000000000000000000000000000000000000000000 + PCR8_HASH=$(echo -n "$initial_pcr$hash" | xxd -r -p | sha256sum | cut -d' ' -f1) && echo $PCR8_HASH + register: pcr8_hash + + - name: Create/update ConfigMap with gzipped+base64 content kubernetes.core.k8s: kubeconfig: "{{ kubeconfig | default(omit) }}" @@ -67,3 +84,4 @@ namespace: "imperative" data: INITDATA: "{{ gz_slurped.content }}" + PCR8_HASH: "{{ pcr8_hash.stdout }}" diff --git a/ansible/initdata-default.toml.tpl b/ansible/initdata-default.toml.tpl index 9cadbc1c..47246798 100644 --- a/ansible/initdata-default.toml.tpl +++ b/ansible/initdata-default.toml.tpl @@ -24,6 +24,10 @@ url = "https://kbs.{{ hub_domain }}" kbs_cert = """ {{ trustee_cert }} """ + + +[image] +image_security_policy_uri = 'kbs:///default/security-policy/{{ security_policy_flavour }}' ''' "policy.rego" = ''' @@ -36,7 +40,6 @@ default CopyFileRequest := true default CreateContainerRequest := true default CreateSandboxRequest := true default DestroySandboxRequest := true -default ExecProcessRequest := false default GetMetricsRequest := true default GetOOMEventRequest := true default GuestDetailsRequest := true @@ -52,7 +55,6 @@ default RemoveStaleVirtiofsShareMountsRequest := true default ReseedRandomDevRequest := true default ResumeContainerRequest := true default SetGuestDateTimeRequest := true -default SetPolicyRequest := true default SignalProcessRequest := true default StartContainerRequest := true default StartTracingRequest := true @@ -64,5 +66,20 @@ default UpdateEphemeralMountsRequest := true default UpdateInterfaceRequest := true default UpdateRoutesRequest := true default WaitProcessRequest := true -default WriteStreamRequest := true +default ExecProcessRequest := false +default SetPolicyRequest := false +default WriteStreamRequest := false + +ExecProcessRequest if { + input_command = concat(" ", input.process.Args) + some allowed_command in policy_data.allowed_commands + input_command == allowed_command +} + +policy_data := { + "allowed_commands": [ + "curl http://127.0.0.1:8006/cdh/resource/default/attestation-status/status", + "curl http://127.0.0.1:8006/cdh/resource/default/attestation-status/random" + ] +} ''' \ No newline at end of file diff --git a/overrides/values-trustee.yaml b/overrides/values-trustee.yaml index ee42e416..03dd120a 100644 --- a/overrides/values-trustee.yaml +++ b/overrides/values-trustee.yaml @@ -6,4 +6,9 @@ kbs: - name: "kbsres1" # name is the name of the k8s secret that will be presented to trustee and accessible via the CDH key: "secret/data/hub/kbsres1" # this is the path to the secret in vault. - name: "passphrase" - key: "secret/data/hub/passphrase" \ No newline at end of file + key: "secret/data/hub/passphrase" +# Override the default values for the coco pattern this is because when testing against a branch strange stuff happens +# FIXME: Don't commit this to main +global: + coco: + secured: true # true or false. If true, the cluster will be secured. If false, the cluster will be insecure. \ No newline at end of file diff --git a/rhdp/rhdp-cluster-define.py b/rhdp/rhdp-cluster-define.py index 522c0bb3..b65cb4b3 100644 --- a/rhdp/rhdp-cluster-define.py +++ b/rhdp/rhdp-cluster-define.py @@ -13,8 +13,22 @@ from typing_extensions import Annotated -def get_default_cluster_configs() -> List[Dict]: - """Get default cluster configurations""" +def get_default_cluster_configs(prefix: str = "") -> List[Dict]: + """Get default cluster configurations + + Args: + prefix: Optional prefix to add to cluster name and directory + """ + if prefix: + return [ + { + "name": f"coco-{prefix}", + "directory": f"openshift-install-{prefix}", + "cluster_network_cidr": "10.128.0.0/14", + "machine_network_cidr": "10.0.0.0/16", + "service_network_cidr": "172.30.0.0/16", + } + ] return [ { "name": "coco", @@ -135,6 +149,9 @@ def run( multicluster: Annotated[ bool, typer.Option("--multicluster", help="Deploy hub and spoke clusters") ] = False, + prefix: Annotated[ + str, typer.Option("--prefix", help="Prefix for cluster name and directory") + ] = "", ): """ Region flag requires an azure region key which can be (authoritatively) @@ -142,16 +159,25 @@ def run( Use --multicluster flag to deploy both hub (coco-hub) and spoke (coco-spoke) clusters. + + Use --prefix to add a prefix to cluster name and install directory, enabling + multiple cluster deployments (e.g., --prefix cluster1 creates coco-cluster1 + in openshift-install-cluster1). """ validate_dir() # Choose cluster configurations based on multicluster flag if multicluster: + if prefix: + rprint("WARNING: --prefix is ignored when using --multicluster") cluster_configs = get_multicluster_configs() rprint("Setting up multicluster deployment (hub and spoke)") else: - cluster_configs = get_default_cluster_configs() - rprint("Setting up single cluster deployment") + cluster_configs = get_default_cluster_configs(prefix) + if prefix: + rprint(f"Setting up single cluster deployment with prefix: {prefix}") + else: + rprint("Setting up single cluster deployment") cleanup(pathlib.Path.cwd(), cluster_configs) setup_install( diff --git a/rhdp/wrapper.sh b/rhdp/wrapper.sh index 5fbf6994..5bc1f992 100755 --- a/rhdp/wrapper.sh +++ b/rhdp/wrapper.sh @@ -14,13 +14,56 @@ get_python_cmd() { fi } -if [ "$#" -ne 1 ]; then - echo "Error: Exactly one argument is required." - echo "Usage: $0 {azure-region-code}" +# Parse arguments +AZUREREGION="" +PREFIX="" + +while [[ $# -gt 0 ]]; do + case $1 in + --prefix) + PREFIX="$2" + shift 2 + ;; + --prefix=*) + PREFIX="${1#*=}" + shift + ;; + -*) + echo "Error: Unknown option $1" + echo "Usage: $0 [--prefix ] {azure-region-code}" + echo "Example: $0 eastasia" + echo "Example: $0 --prefix cluster1 eastasia" + exit 1 + ;; + *) + if [ -z "$AZUREREGION" ]; then + AZUREREGION="$1" + else + echo "Error: Too many positional arguments." + echo "Usage: $0 [--prefix ] {azure-region-code}" + exit 1 + fi + shift + ;; + esac +done + +if [ -z "$AZUREREGION" ]; then + echo "Error: Azure region is required." + echo "Usage: $0 [--prefix ] {azure-region-code}" echo "Example: $0 eastasia" + echo "Example: $0 --prefix cluster1 eastasia" exit 1 fi -AZUREREGION=$1 + +# Set install directory based on prefix +if [ -n "$PREFIX" ]; then + INSTALL_DIR="openshift-install-${PREFIX}" + echo "Using prefix: $PREFIX" + echo "Install directory: $INSTALL_DIR" +else + INSTALL_DIR="openshift-install" +fi echo "---------------------" echo "Validating configuration" @@ -113,7 +156,11 @@ echo "---------------------" echo "defining cluster" echo "---------------------" PYTHON_CMD=$(get_python_cmd) -$PYTHON_CMD rhdp/rhdp-cluster-define.py ${AZUREREGION} +if [ -n "$PREFIX" ]; then + $PYTHON_CMD rhdp/rhdp-cluster-define.py --prefix "${PREFIX}" ${AZUREREGION} +else + $PYTHON_CMD rhdp/rhdp-cluster-define.py ${AZUREREGION} +fi echo "---------------------" echo "cluster defined" echo "---------------------" @@ -121,7 +168,7 @@ sleep 10 echo "---------------------" echo "openshift-install" echo "---------------------" -openshift-install create cluster --dir=./openshift-install +openshift-install create cluster --dir=./${INSTALL_DIR} echo "openshift-install done" echo "---------------------" echo "setting up secrets" @@ -133,7 +180,7 @@ sleep 60 echo "---------------------" echo "pattern install" echo "---------------------" -export KUBECONFIG="$(pwd)/openshift-install/auth/kubeconfig" +export KUBECONFIG="$(pwd)/${INSTALL_DIR}/auth/kubeconfig" ./pattern.sh make install diff --git a/scripts/gen-secrets.sh b/scripts/gen-secrets.sh index 25c4713a..1196b51d 100755 --- a/scripts/gen-secrets.sh +++ b/scripts/gen-secrets.sh @@ -4,8 +4,6 @@ echo "Creating secrets as required" echo COCO_SECRETS_DIR="${HOME}/.coco-pattern" -SECURITY_POLICY_FILE="${COCO_SECRETS_DIR}/security-policy-config.json" -SSH_KEY_FILE="${COCO_SECRETS_DIR}/id_rsa" KBS_PRIVATE_KEY="${COCO_SECRETS_DIR}/kbsPrivateKey" KBS_PUBLIC_KEY="${COCO_SECRETS_DIR}/kbsPublicKey" SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" @@ -13,19 +11,6 @@ VALUES_FILE="${HOME}/values-secret-coco-pattern.yaml" mkdir -p ${COCO_SECRETS_DIR} -if [ ! -f "${SECURITY_POLICY_FILE}" ]; then -echo "Creating security policy" -cat > ${SECURITY_POLICY_FILE} < /dev/null; then + echo "ERROR: yq is required but not installed" + echo "Please install yq: https://github.com/mikefarah/yq#install" + exit 1 +fi + +# 3. Check values-global.yaml exists +if [ ! -f "values-global.yaml" ]; then + echo "ERROR: values-global.yaml not found in current directory" + echo "Please run this script from the root directory of the project" + exit 1 +fi + +# 4. Get the active clusterGroupName from values-global.yaml +CLUSTER_GROUP_NAME=$(yq eval '.main.clusterGroupName' values-global.yaml) + +if [ -z "$CLUSTER_GROUP_NAME" ] || [ "$CLUSTER_GROUP_NAME" == "null" ]; then + echo "ERROR: Could not determine clusterGroupName from values-global.yaml" + echo "Expected: main.clusterGroupName to be set" + exit 1 +fi + +echo "Active clusterGroup: $CLUSTER_GROUP_NAME" + +# 5. Locate the values file for the active clusterGroup +VALUES_FILE="values-${CLUSTER_GROUP_NAME}.yaml" + +if [ ! -f "$VALUES_FILE" ]; then + echo "ERROR: Values file for clusterGroup not found: $VALUES_FILE" + exit 1 +fi + +# 6. Get the sandboxed container operator CSV from the clusterGroup values +SANDBOX_CSV=$(yq eval '.clusterGroup.subscriptions.sandbox.csv' "$VALUES_FILE") + +if [ -z "$SANDBOX_CSV" ] || [ "$SANDBOX_CSV" == "null" ]; then + echo "WARNING: No sandboxed container operator CSV found in $VALUES_FILE" + echo "The subscription clusterGroup.subscriptions.sandbox.csv is not defined" + exit 0 +fi + +# Extract version from CSV (e.g., "sandboxed-containers-operator.v1.11.0" -> "1.11.0") +# Remove everything up to and including ".v" +SANDBOX_VERSION="${SANDBOX_CSV##*.v}" + +echo "Sandboxed container operator CSV: $SANDBOX_CSV" +echo "Version: $SANDBOX_VERSION" +# alternatively, use the operator-version tag. +# OSC_VERSION=1.11.1 +VERITY_IMAGE=registry.redhat.io/openshift-sandboxed-containers/osc-dm-verity-image + +TAG=$(skopeo inspect --authfile $PULL_SECRET_PATH docker://${VERITY_IMAGE}:${SANDBOX_VERSION} | jq -r .Digest) + +IMAGE=${VERITY_IMAGE}@${TAG} + +echo "IMAGE: $IMAGE" + +curl -L https://tuf-default.apps.rosa.rekor-prod.2jng.p3.openshiftapps.com/targets/rekor.pub -o rekor.pub +curl -L https://security.access.redhat.com/data/63405576.txt -o cosign-pub-key.pem +# export REGISTRY_AUTH_FILE=${PULL_SECRET_PATH} +# echo "REGISTRY_AUTH_FILE: $REGISTRY_AUTH_FILE" +# export SIGSTORE_REKOR_PUBLIC_KEY=${PWD}/rekor.pub +# echo "SIGSTORE_REKOR_PUBLIC_KEY: $SIGSTORE_REKOR_PUBLIC_KEY" +# cosign verify --key cosign-pub-key.pem --output json --rekor-url=https://rekor-server-default.apps.rosa.rekor-prod.2jng.p3.openshiftapps.com $IMAGE > cosign_verify.log + + +# Ensure output directory exists +mkdir -p ~/.coco-pattern + +# Clean up any existing measurement files +rm -f ~/.coco-pattern/measurements-raw.json ~/.coco-pattern/measurements.json + +# Download the measurements using podman cp (works on macOS with remote podman) +podman pull --authfile $PULL_SECRET_PATH $IMAGE + +cid=$(podman create --entrypoint /bin/true $IMAGE) +echo "CID: ${cid}" +podman cp $cid:/image/measurements.json ~/.coco-pattern/measurements-raw.json +podman rm $cid + +# Trim leading "0x" from all measurement values +jq 'walk(if type == "string" and startswith("0x") then .[2:] else . end)' \ + ~/.coco-pattern/measurements-raw.json > ~/.coco-pattern/measurements.json + +echo "Measurements saved to ~/.coco-pattern/measurements.json (0x prefixes removed)" \ No newline at end of file diff --git a/values-global.yaml b/values-global.yaml index e91c7f0c..ceeb1241 100644 --- a/values-global.yaml +++ b/values-global.yaml @@ -11,6 +11,8 @@ global: # This defines whether or not to use upstream resources for CoCo. # Defines whether or not the hub cluster can be used for confidential containers coco: + securityPolicyFlavour: "insecure" # insecure, signed or reject is expected. + secured: true # true or false. If true, the cluster will be secured. If false, the cluster will be insecure. azure: defaultVMFlavour: "Standard_DC2as_v5" VMFlavours: "Standard_DC2as_v5,Standard_DC4as_v5,Standard_DC8as_v5,Standard_DC16as_v5" @@ -24,6 +26,7 @@ main: clusterGroupChartVersion: 0.9.* # Common secret store configuration used across multiple charts +# Warning do not rely on this. it does not consistently apply. secretStore: name: vault-backend kind: ClusterSecretStore diff --git a/values-secret.yaml.template b/values-secret.yaml.template index fe410d42..b3df87dd 100644 --- a/values-secret.yaml.template +++ b/values-secret.yaml.template @@ -6,21 +6,80 @@ version: "2.0" # automatically generated inside the vault this should not really matter) secrets: - - name: 'sshKey' + + + - name: securityPolicyConfig vaultPrefixes: - - global + - hub fields: - - name: id_rsa.pub - path: ~/.coco-pattern/id_rsa.pub - - name: id_rsa - path: ~/.coco-pattern/id_rsa + # Accept all images without verification (INSECURE - dev/testing only) + - name: insecure + value: | + { + "default": [{"type": "insecureAcceptAnything"}], + "transports": {} + } + # Reject all images (useful for testing policy enforcement) + - name: reject + value: | + { + "default": [{"type": "reject"}], + "transports": {} + } + # Only accept signed images (production) + # Edit the transports section to add your signed images. + # Each image needs a corresponding cosign public key in cosign-keys secret. + # The keys much line up with the keys below + - name: signed + value: | + { + "default": [{"type": "reject"}], + "transports": { + "docker": { + "registry.example.com/my-image": [ + { + "type": "sigstoreSigned", + "keyPath": "kbs:///default/cosign-keys/key-0" + } + ] + } + } + } + + # Cosign public keys for image signature verification + # Required when using the "signed" policy above. + # Add your cosign public key files here. + # Generate a cosign key pair: cosign generate-key-pair + #- name: cosign-keys + # vaultPrefixes: + # - hub + # fields: + # - name: key-0 + # path: ~/.coco-pattern/cosign-key-0.pub + + + # Cosign public keys for image signature verification + # Required when using the "signed" policy above. + # Add your cosign public key files here. + # Generate a cosign key pair: cosign generate-key-pair + #- name: pcrStash + # vaultPrefixes: + # - hub + # fields: + # - name: json + # path: ~/.coco-pattern/measurements.json + - - name: 'securityPolicyConfig' + - name: attestationStatus vaultPrefixes: - hub fields: - - name: osc - path: ~/.coco-pattern/security-policy-config.json + - name: status + value: 'attested' + - name: random + value: '' + onMissingValue: generate + vaultPolicy: validatedPatternDefaultPolicy - name: kbsPublicKey vaultPrefixes: @@ -29,12 +88,6 @@ secrets: - name: publicKey path: ~/.coco-pattern/kbsPublicKey - - name: kbsPrivateKey - vaultPrefixes: - - global - fields: - - name: privateKey - path: ~/.coco-pattern/kbsPrivateKey - name: kbsres1 vaultPrefixes: @@ -61,3 +114,4 @@ secrets: value: '' onMissingValue: generate vaultPolicy: validatedPatternDefaultPolicy + diff --git a/values-simple.yaml b/values-simple.yaml index a6405a2d..4689805d 100644 --- a/values-simple.yaml +++ b/values-simple.yaml @@ -3,6 +3,36 @@ clusterGroup: name: simple isHubCluster: true + # Override health check for Subscriptions to treat UpgradePending as healthy + # Only applies to pinned CSV subscriptions (sandbox and trustee) + argoCD: + resourceHealthChecks: + - group: operators.coreos.com + kind: Subscription + check: | + local hs = {} + -- Only apply custom logic to pinned subscriptions + local isPinned = (obj.metadata.name == "sandboxed-containers-operator" or + obj.metadata.name == "trustee-operator") + if obj.status ~= nil and obj.status.state ~= nil then + local state = obj.status.state + if state == "AtLatestKnown" then + hs.status = "Healthy" + hs.message = state + return hs + elseif state == "UpgradePending" and isPinned then + hs.status = "Healthy" + hs.message = "Pinned subscription at desired version" + return hs + elseif state == "UpgradePending" then + hs.status = "Progressing" + hs.message = "Upgrade pending approval" + return hs + end + end + hs.status = "Progressing" + hs.message = "Waiting for Subscription to be ready" + return hs namespaces: - open-cluster-management - vault @@ -26,14 +56,14 @@ clusterGroup: source: redhat-operators channel: stable installPlanApproval: Manual - csv: sandboxed-containers-operator.v1.10.1 + csv: sandboxed-containers-operator.v1.11.0 trustee: name: trustee-operator namespace: trustee-operator-system source: redhat-operators channel: stable installPlanApproval: Manual - csv: trustee-operator.v0.4.1 + csv: trustee-operator.v1.0.0 cert-manager: name: openshift-cert-manager-operator namespace: cert-manager-operator @@ -77,23 +107,45 @@ clusterGroup: name: trustee namespace: trustee-operator-system #upstream config project: trustee - chart: trustee - chartVersion: 0.1.* - # Use the override file to specify the list of secrets accessible to trustee from the ESO backend (today by default, Vault). - extraValueFiles: - - '$patternref/overrides/values-trustee.yaml' + repoURL: https://github.com/butler54/trustee-chart.git + targetRevision: merge-certs + path: ./ + # Note: extraValueFiles with $patternref don't work for external repoURL (single-source app) + # Using overrides instead to pass values directly + overrides: + - name: global.coco.secured + value: "true" + - name: kbs.secretResources[0].name + value: kbsres1 + - name: kbs.secretResources[0].key + value: secret/data/hub/kbsres1 + - name: kbs.secretResources[1].name + value: passphrase + - name: kbs.secretResources[1].key + value: secret/data/hub/passphrase + + # sandbox: + # name: sandbox + # namespace: openshift-sandboxed-containers-operator #upstream config + # project: sandbox + # chart: sandboxed-containers + # chartVersion: 0.0.* sandbox: name: sandbox namespace: openshift-sandboxed-containers-operator #upstream config project: sandbox - chart: sandboxed-containers - chartVersion: 0.0.* + repoURL: https://github.com/butler54/sandboxed-containers-chart.git + targetRevision: remove-ssh + path: ./ + sandbox-policies: name: sandbox-policies namespace: openshift-sandboxed-containers-operator #upstream config chart: sandboxed-policies chartVersion: 0.0.* - +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: stable # Letsencrypt is not required anymore for trustee. # It's only here if you need it for your needs. letsencrypt: @@ -117,7 +169,6 @@ clusterGroup: project: workloads path: charts/coco-supported/kbs-access - imperative: # NOTE: We *must* use lists and not hashes. As hashes lose ordering once parsed by helm # The default schedule is every 10 minutes: imperative.schedule