@@ -60,8 +60,8 @@ def get_env_var(name, default=None):
6060
6161
6262def generate_stack_prefix ():
63- """Generate unique stack prefix with timestamp"""
64- timestamp = datetime .now ().strftime ("%m%d-%H%M" ) # Shorter format : MMDD-HHMM
63+ """Generate unique stack prefix with timestamp including seconds """
64+ timestamp = datetime .now ().strftime ("%m%d-%H%M%S " ) # Format : MMDD-HHMMSS
6565 return f"idp-{ timestamp } "
6666
6767
@@ -222,7 +222,39 @@ def cleanup_stack(stack_name, pattern_name):
222222 """Clean up a deployed stack"""
223223 print (f"[{ pattern_name } ] Cleaning up: { stack_name } " )
224224 try :
225- run_command (f"idp-cli delete --stack-name { stack_name } --force" , check = False )
225+ # Check stack status first
226+ result = run_command (f"aws cloudformation describe-stacks --stack-name { stack_name } --query 'Stacks[0].StackStatus' --output text" , check = False )
227+ stack_status = result .stdout .strip () if result .returncode == 0 else "NOT_FOUND"
228+
229+ print (f"[{ pattern_name } ] Stack status: { stack_status } " )
230+
231+ # Delete the stack and wait for completion
232+ print (f"[{ pattern_name } ] Attempting stack deletion..." )
233+ run_command (f"idp-cli delete --stack-name { stack_name } --force --empty-buckets --wait" , check = False )
234+
235+ # Always clean up orphaned resources after deletion attempt
236+ print (f"[{ pattern_name } ] Cleaning up orphaned resources..." )
237+
238+ # Set AWS retry configuration to handle throttling
239+ os .environ ['AWS_MAX_ATTEMPTS' ] = '10'
240+ os .environ ['AWS_RETRY_MODE' ] = 'adaptive'
241+
242+ # ECR repositories
243+ stack_name_lower = stack_name .lower ()
244+ run_command (f"aws ecr describe-repositories --query 'repositories[?contains(repositoryName, `{ stack_name_lower } `)].repositoryName' --output text | xargs -r -n1 aws ecr delete-repository --repository-name --force" , check = False )
245+
246+ # S3 buckets (empty and delete orphaned buckets)
247+ run_command (f"aws s3api list-buckets --query 'Buckets[?contains(Name, `{ stack_name } `)].Name' --output text | xargs -r -n1 -I {{}} sh -c 'aws s3 rm s3://{{}} --recursive && aws s3api delete-bucket --bucket {{}}'" , check = False )
248+
249+ # CloudWatch log groups (single comprehensive search)
250+ run_command (f"aws logs describe-log-groups --query 'logGroups[?contains(logGroupName, `{ stack_name } `)].logGroupName' --output text | xargs -r -n1 aws logs delete-log-group --log-group-name" , check = False )
251+
252+ # AppSync logs (requires separate handling due to random API IDs)
253+ run_command (f"aws appsync list-graphql-apis --query 'graphqlApis[?contains(name, `{ stack_name } `)].apiId' --output text | xargs -r -I {{}} aws logs delete-log-group --log-group-name '/aws/appsync/apis/{{}}'" , check = False )
254+
255+ # Clean up CloudWatch Logs Resource Policy (ignore errors if policy doesn't exist)
256+ run_command (f"aws logs describe-resource-policies --query 'resourcePolicies[0].policyName' --output text | xargs -r aws logs delete-resource-policy --policy-name || true" , check = False )
257+
226258 print (f"[{ pattern_name } ] ✅ Cleanup completed" )
227259 except Exception as e :
228260 print (f"[{ pattern_name } ] ⚠️ Cleanup failed: { e } " )
@@ -232,7 +264,7 @@ def main():
232264 """Main execution function"""
233265 print ("Starting CodeBuild deployment process..." )
234266
235- admin_email = get_env_var ("IDP_ADMIN_EMAIL" , "strahanr @amazon.com" )
267+ admin_email = get_env_var ("IDP_ADMIN_EMAIL" , "tanimath @amazon.com" )
236268 stack_prefix = generate_stack_prefix ()
237269
238270 print (f"Stack Prefix: { stack_prefix } " )
0 commit comments