From 6644ac7da6d78cbf7a0d8e04e5abda8a8d1788f4 Mon Sep 17 00:00:00 2001 From: Douglas Hensel Date: Thu, 18 Dec 2025 13:09:39 -0500 Subject: [PATCH] suggested edits to deploy two instances --- CLAUDE.md | 56 +++++++++++++++++- deploy/Makefile | 31 ++++++++++ deploy/README.md | 59 +++++++++++++++++++ deploy/aws-hypervisor/.gitignore | 1 + deploy/aws-hypervisor/instance.env.template | 8 ++- deploy/aws-hypervisor/scripts/common.sh | 6 +- deploy/aws-hypervisor/scripts/create.sh | 4 ++ deploy/aws-hypervisor/scripts/destroy.sh | 2 + deploy/aws-hypervisor/scripts/inventory.sh | 13 ++-- deploy/aws-hypervisor/scripts/ssh.sh | 2 + deploy/aws-hypervisor/scripts/start.sh | 2 + deploy/openshift-clusters/.gitignore | 3 + .../roles/proxy-setup/defaults/main.yml | 7 ++- .../roles/proxy-setup/tasks/credentials.yml | 11 +++- .../roles/proxy-setup/tasks/environment.yml | 2 +- .../scripts/deploy-arbiter-ipi.sh | 30 ++++++---- .../scripts/deploy-fencing-ipi.sh | 30 ++++++---- 17 files changed, 229 insertions(+), 38 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 9fc8463..10b4da8 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -12,17 +12,29 @@ Two-Node Toolbox (TNF) is a comprehensive deployment automation framework for Op ```bash # From the deploy/ directory: -# Deploy AWS hypervisor and cluster in one command -make deploy arbiter-ipi # Deploy arbiter topology cluster +# Single deployment (default) +make deploy arbiter-ipi # Deploy arbiter topology cluster make deploy fencing-ipi # Deploy fencing topology cluster -# Instance lifecycle management +# Multi-deployment support (create separate parallel deployments) +# Specify DEPLOYMENT_ID to manage multiple environments +make deploy DEPLOYMENT_ID=dev1 arbiter-ipi # First deployment +make deploy DEPLOYMENT_ID=dev2 fencing-ipi # Second deployment + +# List all deployments +make list-deployments + +# Instance lifecycle management (use DEPLOYMENT_ID for specific deployment) make create # Create new EC2 instance make init # Initialize deployed instance make start # Start stopped EC2 instance make stop # Stop running EC2 instance make destroy # Destroy EC2 instance and resources +# Example: Manage specific deployment +make ssh DEPLOYMENT_ID=dev1 # SSH into dev1 deployment +make destroy DEPLOYMENT_ID=dev2 # Destroy dev2 deployment + # Cluster operations make redeploy-cluster # Redeploy OpenShift cluster using dev-scripts make shutdown-cluster # Shutdown cluster VMs @@ -37,6 +49,44 @@ make info # Display instance information make inventory # Update inventory.ini with current instance IP ``` +### Multi-Deployment Workflows + +The toolbox supports managing multiple parallel deployments, useful for QE testing and comparison scenarios. + +```bash +# Example: Compare RHEL 9.6 vs RHEL 10 +# Edit instance.env.template for each deployment to set RHEL_VERSION + +# Create first deployment (RHEL 9.6) +DEPLOYMENT_ID=rhel96 make deploy fencing-ipi + +# Create second deployment (RHEL 10) +DEPLOYMENT_ID=rhel10 make deploy fencing-ipi + +# Access specific deployment +source deploy/openshift-clusters/deployments/rhel96/proxy.env +export KUBECONFIG=deploy/openshift-clusters/deployments/rhel96/kubeconfig +oc get nodes + +# Switch to other deployment +source deploy/openshift-clusters/deployments/rhel10/proxy.env +export KUBECONFIG=deploy/openshift-clusters/deployments/rhel10/kubeconfig +oc get nodes + +# List all deployments +make list-deployments + +# Cleanup specific deployment +make destroy DEPLOYMENT_ID=rhel96 +``` + +**Important Notes:** +- Each deployment gets its own EC2 instance (separate hypervisors) +- Deployment state stored in `instance-data-${DEPLOYMENT_ID}/` +- Cluster configs stored in `deployments/${DEPLOYMENT_ID}/` +- Default DEPLOYMENT_ID: `${USER}-dev` +``` + ### Ansible Deployment Methods #### Dev-scripts Method (Traditional) diff --git a/deploy/Makefile b/deploy/Makefile index a6fd992..76f41f3 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -1,3 +1,9 @@ +# Multi-deployment support: specify DEPLOYMENT_ID to manage multiple environments +# Examples: make deploy DEPLOYMENT_ID=dev1 +# make ssh DEPLOYMENT_ID=dev2 +DEPLOYMENT_ID ?= ${USER}-dev +export DEPLOYMENT_ID + create: @./aws-hypervisor/scripts/create.sh @@ -60,9 +66,34 @@ patch-nodes: get-tnf-logs: @./openshift-clusters/scripts/get-tnf-logs.sh +list-deployments: + @echo "Listing all deployments:" + @for dir in aws-hypervisor/instance-data-*; do \ + if [ -d "$$dir" ]; then \ + deployment=$$(basename $$dir | sed 's/instance-data-//'); \ + echo " - $$deployment"; \ + if [ -f "$$dir/aws-instance-id" ]; then \ + instance_id=$$(cat $$dir/aws-instance-id); \ + echo " Instance ID: $$instance_id"; \ + fi; \ + if [ -f "$$dir/public_address" ]; then \ + ip=$$(cat $$dir/public_address); \ + echo " Public IP: $$ip"; \ + fi; \ + fi; \ + done + help: @echo "Available commands:" @echo "" + @echo "Multi-Deployment Support:" + @echo " Use DEPLOYMENT_ID parameter to manage multiple parallel deployments" + @echo " Example: make deploy DEPLOYMENT_ID=dev1" + @echo " make ssh DEPLOYMENT_ID=dev1" + @echo " Default: DEPLOYMENT_ID=${USER}-dev" + @echo "" + @echo " list-deployments - List all existing deployments with their details" + @echo "" @echo "Instance Lifecycle Management:" @echo " deploy - Create, initialize, and update inventory for new EC2 instance" @echo " create - Create new EC2 instance" diff --git a/deploy/README.md b/deploy/README.md index f910805..62466fe 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -59,6 +59,65 @@ $ make deploy This will create the instance, initialize it, and update the inventory in one command, placing you in a login shell for the EC2 instance. +### Multi-Deployment Support + +The toolbox supports managing multiple parallel deployments, useful for QE testing and comparison scenarios (e.g., testing different RHEL versions or cluster topologies simultaneously). + +#### Creating Multiple Deployments + +Specify a `DEPLOYMENT_ID` parameter to create and manage separate deployments: + +```bash +# Create first deployment +$ make deploy DEPLOYMENT_ID=dev1 + +# Create second deployment +$ make deploy DEPLOYMENT_ID=dev2 + +# List all deployments +$ make list-deployments +``` + +#### Managing Specific Deployments + +All commands accept the `DEPLOYMENT_ID` parameter: + +```bash +# SSH into specific deployment +$ make ssh DEPLOYMENT_ID=dev1 + +# Deploy cluster to specific deployment +$ make arbiter-ipi DEPLOYMENT_ID=dev1 +$ make fencing-ipi DEPLOYMENT_ID=dev2 + +# Destroy specific deployment +$ make destroy DEPLOYMENT_ID=dev1 +``` + +#### Accessing Cluster Credentials + +Each deployment has its own directory with cluster credentials: + +```bash +# Access first deployment's cluster +$ source openshift-clusters/deployments/dev1/proxy.env +$ export KUBECONFIG=openshift-clusters/deployments/dev1/kubeconfig +$ oc get nodes + +# Switch to second deployment's cluster +$ source openshift-clusters/deployments/dev2/proxy.env +$ export KUBECONFIG=openshift-clusters/deployments/dev2/kubeconfig +$ oc get nodes +``` + +#### Important Notes + +- Each deployment creates a separate EC2 instance (separate hypervisors) +- Default DEPLOYMENT_ID: `${USER}-dev` (maintains backward compatibility) +- State stored in `aws-hypervisor/instance-data-${DEPLOYMENT_ID}/` +- Cluster configs stored in `openshift-clusters/deployments/${DEPLOYMENT_ID}/` +- Proxy containers are deployment-specific: `external-squid-${DEPLOYMENT_ID}` + ### Recommended Instance Reuse Workflow For quickly reusing an existing instance with a fresh cluster deployment: diff --git a/deploy/aws-hypervisor/.gitignore b/deploy/aws-hypervisor/.gitignore index 8b0059a..df09294 100644 --- a/deploy/aws-hypervisor/.gitignore +++ b/deploy/aws-hypervisor/.gitignore @@ -1,3 +1,4 @@ instance.env instance-data +instance-data-* pull_secret.json \ No newline at end of file diff --git a/deploy/aws-hypervisor/instance.env.template b/deploy/aws-hypervisor/instance.env.template index da7746d..e94025e 100644 --- a/deploy/aws-hypervisor/instance.env.template +++ b/deploy/aws-hypervisor/instance.env.template @@ -1,7 +1,11 @@ -export SHARED_DIR="instance-data" +# Deployment identifier - change this to create separate deployments +# Examples: dev1, dev2, qa1, rhel96, rhel10 +export DEPLOYMENT_ID="${DEPLOYMENT_ID:-${USER}-dev}" + +export SHARED_DIR="instance-data-${DEPLOYMENT_ID}" export AWS_PROFILE=microshift-dev -export STACK_NAME=${USER}-dev +export STACK_NAME="${DEPLOYMENT_ID}" export RHEL_HOST_ARCHITECTURE=x86_64 export REGION=us-west-2 export EC2_INSTANCE_TYPE="c5n.metal" diff --git a/deploy/aws-hypervisor/scripts/common.sh b/deploy/aws-hypervisor/scripts/common.sh index bbdb212..53b47e0 100755 --- a/deploy/aws-hypervisor/scripts/common.sh +++ b/deploy/aws-hypervisor/scripts/common.sh @@ -4,8 +4,10 @@ SCRIPT_DIR=$(dirname "$0") source "${SCRIPT_DIR}/../instance.env" # Set defaults -export STACK_NAME="${STACK_NAME:-${USER}-dev}" -export SHARED_DIR="${SHARED_DIR:-instance-data}" +# Support multi-deployment via DEPLOYMENT_ID +export DEPLOYMENT_ID="${DEPLOYMENT_ID:-${USER}-dev}" +export STACK_NAME="${STACK_NAME:-${DEPLOYMENT_ID}}" +export SHARED_DIR="${SHARED_DIR:-instance-data-${DEPLOYMENT_ID}}" export RHEL_HOST_ARCHITECTURE="${RHEL_HOST_ARCHITECTURE:-x86_64}" export EC2_INSTANCE_TYPE="${EC2_INSTANCE_TYPE:-c5n.metal}" export RHEL_VERSION="${RHEL_VERSION:-9.6}" diff --git a/deploy/aws-hypervisor/scripts/create.sh b/deploy/aws-hypervisor/scripts/create.sh index 99d5737..811b3e4 100755 --- a/deploy/aws-hypervisor/scripts/create.sh +++ b/deploy/aws-hypervisor/scripts/create.sh @@ -11,6 +11,10 @@ set -o pipefail #Save stacks events trap 'save_stack_events' EXIT TERM INT +msg_info "Creating deployment: ${DEPLOYMENT_ID}" +msg_info "Stack name: ${STACK_NAME}" +msg_info "State directory: ${SHARED_DIR}" + mkdir -p "${SCRIPT_DIR}/../${SHARED_DIR}" cf_tpl_file="${SCRIPT_DIR}/../${SHARED_DIR}/${STACK_NAME}-cf-tpl.yaml" diff --git a/deploy/aws-hypervisor/scripts/destroy.sh b/deploy/aws-hypervisor/scripts/destroy.sh index 9bd8d3d..719b64b 100755 --- a/deploy/aws-hypervisor/scripts/destroy.sh +++ b/deploy/aws-hypervisor/scripts/destroy.sh @@ -4,6 +4,8 @@ SCRIPT_DIR=$(dirname "$0") # shellcheck source=/dev/null source "${SCRIPT_DIR}/common.sh" +msg_info "Destroying deployment: ${DEPLOYMENT_ID}" + # Check if instance data directory exists and has the required files instance_data_dir="${SCRIPT_DIR}/../${SHARED_DIR}" public_address_file="${instance_data_dir}/public_address" diff --git a/deploy/aws-hypervisor/scripts/inventory.sh b/deploy/aws-hypervisor/scripts/inventory.sh index b186ab6..f7a9092 100755 --- a/deploy/aws-hypervisor/scripts/inventory.sh +++ b/deploy/aws-hypervisor/scripts/inventory.sh @@ -8,10 +8,14 @@ set -o nounset set -o errexit set -o pipefail -# Paths -INVENTORY_DIR="${SCRIPT_DIR}/../../openshift-clusters" +# Paths - use deployment-specific directory +INVENTORY_BASE_DIR="${SCRIPT_DIR}/../../openshift-clusters" +INVENTORY_DIR="${INVENTORY_BASE_DIR}/deployments/${DEPLOYMENT_ID}" INVENTORY_FILE="${INVENTORY_DIR}/inventory.ini" -INVENTORY_TEMPLATE="${INVENTORY_DIR}/inventory.ini.sample" +INVENTORY_TEMPLATE="${INVENTORY_BASE_DIR}/inventory.ini.sample" + +# Create deployment-specific directory if it doesn't exist +mkdir -p "${INVENTORY_DIR}" # Check if instance data exists if [[ ! -f "${SCRIPT_DIR}/../${SHARED_DIR}/public_address" ]]; then @@ -28,9 +32,10 @@ fi PUBLIC_IP="$(< "${SCRIPT_DIR}/../${SHARED_DIR}/public_address" tr -d '\n')" SSH_USER="$(< "${SCRIPT_DIR}/../${SHARED_DIR}/ssh_user" tr -d '\n')" -echo "Updating inventory with:" +echo "Updating inventory for deployment '${DEPLOYMENT_ID}' with:" echo " User: ${SSH_USER}" echo " IP: ${PUBLIC_IP}" +echo " Inventory: ${INVENTORY_FILE}" # Function to update inventory file using Python ConfigParser function update_config() { diff --git a/deploy/aws-hypervisor/scripts/ssh.sh b/deploy/aws-hypervisor/scripts/ssh.sh index 30a94cf..7889772 100755 --- a/deploy/aws-hypervisor/scripts/ssh.sh +++ b/deploy/aws-hypervisor/scripts/ssh.sh @@ -3,6 +3,8 @@ SCRIPT_DIR=$(dirname "$0") # shellcheck source=/dev/null source "${SCRIPT_DIR}/common.sh" +msg_info "Connecting to deployment: ${DEPLOYMENT_ID}" + instance_ip="$(cat "${SCRIPT_DIR}/../${SHARED_DIR}/ssh_user")@$(cat "${SCRIPT_DIR}/../${SHARED_DIR}/public_address")" # Use the private key corresponding to the configured public key diff --git a/deploy/aws-hypervisor/scripts/start.sh b/deploy/aws-hypervisor/scripts/start.sh index b2445fc..96e4660 100755 --- a/deploy/aws-hypervisor/scripts/start.sh +++ b/deploy/aws-hypervisor/scripts/start.sh @@ -8,6 +8,8 @@ set -o nounset set -o errexit set -o pipefail +msg_info "Starting deployment: ${DEPLOYMENT_ID}" + # Check if the instance exists and get its ID if [[ ! -f "${SCRIPT_DIR}/../${SHARED_DIR}/aws-instance-id" ]]; then echo "Error: No instance found. Please run 'make deploy' first." diff --git a/deploy/openshift-clusters/.gitignore b/deploy/openshift-clusters/.gitignore index 7c14539..669dc3c 100644 --- a/deploy/openshift-clusters/.gitignore +++ b/deploy/openshift-clusters/.gitignore @@ -5,6 +5,9 @@ kubeadmin-password *.pyc *.pyo +# Deployment-specific directories (multi-deployment support) +deployments/ + # Local variable override files vars/*.local vars/*-local.yml diff --git a/deploy/openshift-clusters/roles/proxy-setup/defaults/main.yml b/deploy/openshift-clusters/roles/proxy-setup/defaults/main.yml index cc2e7a4..c70d5f9 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/defaults/main.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/defaults/main.yml @@ -1,11 +1,14 @@ --- # Default variables for proxy-setup role +# Deployment identifier for multi-deployment support +deployment_id: "{{ lookup('env', 'DEPLOYMENT_ID') | default(lookup('env', 'USER') + '-dev', true) }}" + # Proxy configuration proxy_port: 8213 -# Container configuration -proxy_container_name: external-squid +# Container configuration - deployment-specific to avoid conflicts +proxy_container_name: "external-squid-{{ deployment_id }}" proxy_image: quay.io/openshifttest/squid-proxy:multiarch # Network configuration diff --git a/deploy/openshift-clusters/roles/proxy-setup/tasks/credentials.yml b/deploy/openshift-clusters/roles/proxy-setup/tasks/credentials.yml index 63c6c31..9d172b1 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/tasks/credentials.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/tasks/credentials.yml @@ -1,14 +1,21 @@ --- # Fetch cluster credentials for proxy setup +- name: Ensure deployment-specific directory exists + file: + path: "./deployments/{{ deployment_id }}" + state: directory + mode: '0755' + delegate_to: localhost + - name: Get kubeconfig fetch: src: "{{ kubeconfig_path }}" - dest: ./kubeconfig + dest: "./deployments/{{ deployment_id }}/kubeconfig" flat: true - name: Get kubeadmin-password fetch: src: "{{ kubeadmin_password_path }}" - dest: ./kubeadmin-password + dest: "./deployments/{{ deployment_id }}/kubeadmin-password" flat: true \ No newline at end of file diff --git a/deploy/openshift-clusters/roles/proxy-setup/tasks/environment.yml b/deploy/openshift-clusters/roles/proxy-setup/tasks/environment.yml index 0240d57..e185956 100644 --- a/deploy/openshift-clusters/roles/proxy-setup/tasks/environment.yml +++ b/deploy/openshift-clusters/roles/proxy-setup/tasks/environment.yml @@ -34,5 +34,5 @@ else echo "✓ KUBECONFIG file found" fi - dest: "./proxy.env" + dest: "./deployments/{{ deployment_id }}/proxy.env" delegate_to: localhost \ No newline at end of file diff --git a/deploy/openshift-clusters/scripts/deploy-arbiter-ipi.sh b/deploy/openshift-clusters/scripts/deploy-arbiter-ipi.sh index 50b9fe2..1771219 100755 --- a/deploy/openshift-clusters/scripts/deploy-arbiter-ipi.sh +++ b/deploy/openshift-clusters/scripts/deploy-arbiter-ipi.sh @@ -9,19 +9,28 @@ set -o nounset set -o errexit set -o pipefail +# Get deployment ID from environment or use default +DEPLOYMENT_ID="${DEPLOYMENT_ID:-${USER}-dev}" +INSTANCE_DATA_DIR="${DEPLOY_DIR}/aws-hypervisor/instance-data-${DEPLOYMENT_ID}" +DEPLOYMENT_DIR="${DEPLOY_DIR}/openshift-clusters/deployments/${DEPLOYMENT_ID}" +INVENTORY_FILE="${DEPLOYMENT_DIR}/inventory.ini" + +echo "Deployment ID: ${DEPLOYMENT_ID}" + # Check if instance data exists -if [[ ! -f "${DEPLOY_DIR}/aws-hypervisor/instance-data/aws-instance-id" ]]; then - echo "Error: No instance found. Please run 'make deploy' first." +if [[ ! -f "${INSTANCE_DATA_DIR}/aws-instance-id" ]]; then + echo "Error: No instance found for deployment '${DEPLOYMENT_ID}'." + echo "Please run 'make deploy DEPLOYMENT_ID=${DEPLOYMENT_ID}' first." exit 1 fi -echo "Deploying arbiter IPI cluster..." +echo "Deploying arbiter IPI cluster for deployment '${DEPLOYMENT_ID}'..." -# Check if inventory.ini exists in the openshift-clusters directory -if [[ ! -f "${DEPLOY_DIR}/openshift-clusters/inventory.ini" ]]; then - echo "Error: inventory.ini not found in ${DEPLOY_DIR}/openshift-clusters/" +# Check if inventory.ini exists +if [[ ! -f "${INVENTORY_FILE}" ]]; then + echo "Error: inventory.ini not found at ${INVENTORY_FILE}" echo "Please ensure the inventory file is properly configured." - echo "You can run 'make inventory' to update it with current instance information." + echo "You can run 'make inventory DEPLOYMENT_ID=${DEPLOYMENT_ID}' to update it." exit 1 fi @@ -30,15 +39,14 @@ echo "Running Ansible setup playbook with arbiter topology in non-interactive mo cd "${DEPLOY_DIR}/openshift-clusters" # Run the setup playbook with arbiter topology and non-interactive mode -if ansible-playbook setup.yml -e "topology=arbiter" -e "interactive_mode=false" -i inventory.ini; +if ansible-playbook setup.yml -e "topology=arbiter" -e "interactive_mode=false" -i "${INVENTORY_FILE}"; then echo "" echo "✓ OpenShift arbiter cluster deployment completed successfully!" echo "" echo "Next steps:" - echo "1. Source the proxy environment from anywhere:" - echo " source ${DEPLOY_DIR}/openshift-clusters/proxy.env" - echo " (or from openshift-clusters directory: source proxy.env)" + echo "1. Source the proxy environment for deployment '${DEPLOYMENT_ID}':" + echo " source ${DEPLOYMENT_DIR}/proxy.env" echo "2. Verify cluster access: oc get nodes" echo "3. Access the cluster console if needed" else diff --git a/deploy/openshift-clusters/scripts/deploy-fencing-ipi.sh b/deploy/openshift-clusters/scripts/deploy-fencing-ipi.sh index 967da8c..2e2633c 100755 --- a/deploy/openshift-clusters/scripts/deploy-fencing-ipi.sh +++ b/deploy/openshift-clusters/scripts/deploy-fencing-ipi.sh @@ -9,19 +9,28 @@ set -o nounset set -o errexit set -o pipefail +# Get deployment ID from environment or use default +DEPLOYMENT_ID="${DEPLOYMENT_ID:-${USER}-dev}" +INSTANCE_DATA_DIR="${DEPLOY_DIR}/aws-hypervisor/instance-data-${DEPLOYMENT_ID}" +DEPLOYMENT_DIR="${DEPLOY_DIR}/openshift-clusters/deployments/${DEPLOYMENT_ID}" +INVENTORY_FILE="${DEPLOYMENT_DIR}/inventory.ini" + +echo "Deployment ID: ${DEPLOYMENT_ID}" + # Check if instance data exists -if [[ ! -f "${DEPLOY_DIR}/aws-hypervisor/instance-data/aws-instance-id" ]]; then - echo "Error: No instance found. Please run 'make deploy' first." +if [[ ! -f "${INSTANCE_DATA_DIR}/aws-instance-id" ]]; then + echo "Error: No instance found for deployment '${DEPLOYMENT_ID}'." + echo "Please run 'make deploy DEPLOYMENT_ID=${DEPLOYMENT_ID}' first." exit 1 fi -echo "Deploying fencing IPI cluster..." +echo "Deploying fencing IPI cluster for deployment '${DEPLOYMENT_ID}'..." -# Check if inventory.ini exists in the openshift-clusters directory -if [[ ! -f "${DEPLOY_DIR}/openshift-clusters/inventory.ini" ]]; then - echo "Error: inventory.ini not found in ${DEPLOY_DIR}/openshift-clusters/" +# Check if inventory.ini exists +if [[ ! -f "${INVENTORY_FILE}" ]]; then + echo "Error: inventory.ini not found at ${INVENTORY_FILE}" echo "Please ensure the inventory file is properly configured." - echo "You can run 'make inventory' to update it with current instance information." + echo "You can run 'make inventory DEPLOYMENT_ID=${DEPLOYMENT_ID}' to update it." exit 1 fi @@ -30,15 +39,14 @@ echo "Running Ansible setup playbook with fencing topology in non-interactive mo cd "${DEPLOY_DIR}/openshift-clusters" # Run the setup playbook with fencing topology and non-interactive mode -if ansible-playbook setup.yml -e "topology=fencing" -e "interactive_mode=false" -i inventory.ini; +if ansible-playbook setup.yml -e "topology=fencing" -e "interactive_mode=false" -i "${INVENTORY_FILE}"; then echo "" echo "✓ OpenShift fencing cluster deployment completed successfully!" echo "" echo "Next steps:" - echo "1. Source the proxy environment from anywhere:" - echo " source ${DEPLOY_DIR}/openshift-clusters/proxy.env" - echo " (or from openshift-clusters directory: source proxy.env)" + echo "1. Source the proxy environment for deployment '${DEPLOYMENT_ID}':" + echo " source ${DEPLOYMENT_DIR}/proxy.env" echo "2. Verify cluster access: oc get nodes" echo "3. Access the cluster console if needed" else