Skip to content

Commit aea6c7f

Browse files
committed
Merge branch 'feat/enhanced-pipeline-cleanup-and-notifications' into 'develop'
Enhanced Pipeline Cleanup and Notification Improvements See merge request genaiic-reusable-assets/engagement-artifacts/genaiic-idp-accelerator!403
2 parents 0df559e + 65e6d1e commit aea6c7f

File tree

3 files changed

+80
-3
lines changed

3 files changed

+80
-3
lines changed

.gitlab-ci.yml

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,3 +136,43 @@ integration_tests:
136136

137137
# Run integration test deployment
138138
- python3 scripts/integration_test_deployment.py
139+
140+
after_script:
141+
# Capture CodeBuild logs using the tracked execution ID
142+
- |
143+
echo "=== IDP Pipeline Results ===" > pipeline_summary.txt
144+
echo "Branch: $CI_COMMIT_REF_NAME" >> pipeline_summary.txt
145+
echo "Commit: $CI_COMMIT_SHA" >> pipeline_summary.txt
146+
echo "Status: $CI_JOB_STATUS" >> pipeline_summary.txt
147+
echo "" >> pipeline_summary.txt
148+
149+
# Get CodeBuild logs using the exact execution ID from Python script
150+
if [ -f "pipeline_execution_id.txt" ]; then
151+
EXECUTION_ID=$(cat pipeline_execution_id.txt)
152+
echo "Pipeline Execution: $EXECUTION_ID" >> pipeline_summary.txt
153+
echo "" >> pipeline_summary.txt
154+
155+
# Get CodeBuild ID from the pipeline execution
156+
BUILD_ID=$(aws codepipeline list-action-executions --pipeline-name ${IDP_PIPELINE_NAME:-idp-sdlc-deploy-pipeline} --filter pipelineExecutionId=$EXECUTION_ID --query 'actionExecutionDetails[?actionName==`BuildAction`].output.executionResult.externalExecutionId' --output text 2>/dev/null || echo "")
157+
158+
if [ "$BUILD_ID" != "" ] && [ "$BUILD_ID" != "None" ]; then
159+
echo "CodeBuild ID: $BUILD_ID" >> pipeline_summary.txt
160+
# Extract just the build ID part (after the colon)
161+
LOG_STREAM_NAME="${BUILD_ID#*:}"
162+
echo "Log Stream: $LOG_STREAM_NAME" >> pipeline_summary.txt
163+
echo "" >> pipeline_summary.txt
164+
echo "=== CODEBUILD LOGS ===" >> pipeline_summary.txt
165+
aws logs get-log-events --log-group-name "/aws/codebuild/app-sdlc" --log-stream-name "$LOG_STREAM_NAME" --limit 100 --query 'events[].message' --output text 2>/dev/null >> pipeline_summary.txt || echo "Could not retrieve CodeBuild logs" >> pipeline_summary.txt
166+
else
167+
echo "Could not find CodeBuild execution" >> pipeline_summary.txt
168+
fi
169+
else
170+
echo "No pipeline execution ID found" >> pipeline_summary.txt
171+
fi
172+
173+
artifacts:
174+
when: always
175+
paths:
176+
- pipeline_summary.txt
177+
- pipeline_execution_id.txt
178+
expire_in: 1 week

scripts/codebuild_deployment.py

Lines changed: 35 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,8 @@ def get_env_var(name, default=None):
6060

6161

6262
def generate_stack_prefix():
63-
"""Generate unique stack prefix with timestamp"""
64-
timestamp = datetime.now().strftime("%m%d-%H%M") # Shorter format: MMDD-HHMM
63+
"""Generate unique stack prefix with timestamp including seconds"""
64+
timestamp = datetime.now().strftime("%m%d-%H%M%S") # Format: MMDD-HHMMSS
6565
return f"idp-{timestamp}"
6666

6767

@@ -222,7 +222,39 @@ def cleanup_stack(stack_name, pattern_name):
222222
"""Clean up a deployed stack"""
223223
print(f"[{pattern_name}] Cleaning up: {stack_name}")
224224
try:
225-
run_command(f"idp-cli delete --stack-name {stack_name} --force", check=False)
225+
# Check stack status first
226+
result = run_command(f"aws cloudformation describe-stacks --stack-name {stack_name} --query 'Stacks[0].StackStatus' --output text", check=False)
227+
stack_status = result.stdout.strip() if result.returncode == 0 else "NOT_FOUND"
228+
229+
print(f"[{pattern_name}] Stack status: {stack_status}")
230+
231+
# Delete the stack and wait for completion
232+
print(f"[{pattern_name}] Attempting stack deletion...")
233+
run_command(f"idp-cli delete --stack-name {stack_name} --force --empty-buckets --wait", check=False)
234+
235+
# Always clean up orphaned resources after deletion attempt
236+
print(f"[{pattern_name}] Cleaning up orphaned resources...")
237+
238+
# Set AWS retry configuration to handle throttling
239+
os.environ['AWS_MAX_ATTEMPTS'] = '10'
240+
os.environ['AWS_RETRY_MODE'] = 'adaptive'
241+
242+
# ECR repositories
243+
stack_name_lower = stack_name.lower()
244+
run_command(f"aws ecr describe-repositories --query 'repositories[?contains(repositoryName, `{stack_name_lower}`)].repositoryName' --output text | xargs -r -n1 aws ecr delete-repository --repository-name --force", check=False)
245+
246+
# S3 buckets (empty and delete orphaned buckets)
247+
run_command(f"aws s3api list-buckets --query 'Buckets[?contains(Name, `{stack_name}`)].Name' --output text | xargs -r -n1 -I {{}} sh -c 'aws s3 rm s3://{{}} --recursive && aws s3api delete-bucket --bucket {{}}'", check=False)
248+
249+
# CloudWatch log groups (single comprehensive search)
250+
run_command(f"aws logs describe-log-groups --query 'logGroups[?contains(logGroupName, `{stack_name}`)].logGroupName' --output text | xargs -r -n1 aws logs delete-log-group --log-group-name", check=False)
251+
252+
# AppSync logs (requires separate handling due to random API IDs)
253+
run_command(f"aws appsync list-graphql-apis --query 'graphqlApis[?contains(name, `{stack_name}`)].apiId' --output text | xargs -r -I {{}} aws logs delete-log-group --log-group-name '/aws/appsync/apis/{{}}'", check=False)
254+
255+
# Clean up CloudWatch Logs Resource Policy (ignore errors if policy doesn't exist)
256+
run_command(f"aws logs describe-resource-policies --query 'resourcePolicies[0].policyName' --output text | xargs -r aws logs delete-resource-policy --policy-name || true", check=False)
257+
226258
print(f"[{pattern_name}] ✅ Cleanup completed")
227259
except Exception as e:
228260
print(f"[{pattern_name}] ⚠️ Cleanup failed: {e}")

scripts/integration_test_deployment.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,11 @@ def monitor_pipeline(pipeline_name, version_id, max_wait=7200):
173173

174174
if not execution_id:
175175
return False
176+
177+
# Write execution ID to file for GitLab CI to use
178+
with open("pipeline_execution_id.txt", "w") as f:
179+
f.write(execution_id)
180+
print(f"Pipeline execution ID written to file: {execution_id}")
176181

177182
# Then monitor that specific execution
178183
return monitor_pipeline_execution(pipeline_name, execution_id, max_wait)

0 commit comments

Comments
 (0)