diff --git a/.github/workflows/update-md-date.yml b/.github/workflows/update-md-date.yml
new file mode 100644
index 0000000..5b2a19b
--- /dev/null
+++ b/.github/workflows/update-md-date.yml
@@ -0,0 +1,48 @@
+name: Update Last Modified Date
+
+on:
+ pull_request:
+ branches:
+ - main
+
+permissions:
+ contents: write
+ pull-requests: write
+
+jobs:
+ update-date:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout PR branch
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ ref: ${{ github.event.pull_request.head.ref }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+
+ - name: Install dependencies
+ run: pip install python-dateutil
+
+ - name: Configure Git
+ run: |
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
+ git config --global user.name "github-actions[bot]"
+
+ - name: Update last modified date in Markdown files
+ run: python .github/workflows/update_date.py
+
+ - name: Pull (merge) remote changes, commit, and push if needed
+ env:
+ TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ BRANCH="${{ github.event.pull_request.head.ref }}"
+ git pull origin "$BRANCH" || echo "No merge needed"
+ git add -A
+ git commit -m "Update last modified date in Markdown files" || echo "No changes to commit"
+ git remote set-url origin https://x-access-token:${TOKEN}@github.com/${{ github.repository }}
+ git push origin HEAD:"$BRANCH"
diff --git a/.github/workflows/update_date.py b/.github/workflows/update_date.py
new file mode 100644
index 0000000..ab86df5
--- /dev/null
+++ b/.github/workflows/update_date.py
@@ -0,0 +1,49 @@
+import os
+import subprocess
+from datetime import datetime, timezone
+
+# Get the list of modified files
+result = subprocess.run(['git', 'diff', '--name-only', 'HEAD~1'], stdout=subprocess.PIPE)
+modified_files = result.stdout.decode('utf-8').split()
+
+# Debugging: Print the list of modified files
+print("Modified files:", modified_files)
+
+# Filter for Markdown files
+modified_md_files = [f for f in modified_files if f.endswith('.md')]
+
+# Debugging: Print the list of modified Markdown files
+print("Modified Markdown files:", modified_md_files)
+
+# Current date
+current_date = datetime.now(timezone.utc).strftime('%Y-%m-%d')
+
+# Function to update the last modified date in a file
+def update_date_in_file(file_path):
+ with open(file_path, 'r') as file:
+ lines = file.readlines()
+
+ updated = False
+ with open(file_path, 'w') as file:
+ for line in lines:
+ if line.startswith('Last updated:'):
+ file.write(f'Last updated: {current_date}\n')
+ updated = True
+ else:
+ file.write(line)
+ if not updated:
+ file.write(f'\nLast updated: {current_date}\n')
+
+# Check if there are any modified Markdown files
+if not modified_md_files:
+ print("No modified Markdown files found.")
+ exit(0)
+
+# Update the date in each modified Markdown file
+for file_path in modified_md_files:
+ print(f"Updating file: {file_path}") # Debugging: Print the file being updated
+ update_date_in_file(file_path)
+
+# Add and commit changes
+subprocess.run(['git', 'add', '-A'])
+subprocess.run(['git', 'commit', '-m', 'Update last modified date in Markdown files'])
diff --git a/.github/workflows/use-visitor-counter.yml b/.github/workflows/use-visitor-counter.yml
new file mode 100644
index 0000000..b865948
--- /dev/null
+++ b/.github/workflows/use-visitor-counter.yml
@@ -0,0 +1,86 @@
+name: Use Visitor Counter Logic
+
+on:
+ pull_request:
+ branches:
+ - main
+ schedule:
+ - cron: '0 0 * * *' # Runs daily at midnight
+ workflow_dispatch: # Allows manual triggering
+
+permissions:
+ contents: write
+ pull-requests: write
+
+jobs:
+ update-visitor-count:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout current repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Shallow clone visitor counter logic
+ run: git clone --depth=1 https://github.com/brown9804/github-visitor-counter.git
+
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '20'
+
+ - name: Install dependencies for github-visitor-counter
+ run: |
+ cd github-visitor-counter
+ npm ci
+
+ - name: Run visitor counter logic (updates markdown badges and metrics.json)
+ run: node github-visitor-counter/update_repo_views_counter.js
+ env:
+ TRAFFIC_TOKEN: ${{ secrets.TRAFFIC_TOKEN }}
+ REPO: ${{ github.repository }}
+
+ - name: Move generated metrics.json to root
+ run: mv github-visitor-counter/metrics.json .
+
+ - name: List files for debugging
+ run: |
+ ls -l
+ ls -l github-visitor-counter
+
+ - name: Clean up visitor counter logic
+ run: rm -rf github-visitor-counter
+
+ - name: Configure Git author
+ run: |
+ git config --global user.name "github-actions[bot]"
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
+
+ # Commit and push logic for PR events (merge, not rebase)
+ - name: Commit and push changes (PR)
+ if: github.event_name == 'pull_request'
+ env:
+ TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ git fetch origin
+ git checkout ${{ github.head_ref }}
+ git pull origin ${{ github.head_ref }} || echo "No merge needed"
+ git add -A
+ git commit -m "Update visitor count" || echo "No changes to commit"
+ git remote set-url origin https://x-access-token:${TOKEN}@github.com/${{ github.repository }}
+ git push origin HEAD:${{ github.head_ref }}
+
+ # Commit and push logic for non-PR events (merge, not rebase)
+ - name: Commit and push changes (non-PR)
+ if: github.event_name != 'pull_request'
+ env:
+ TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ git fetch origin
+ git checkout ${{ github.ref_name }} || git checkout -b ${{ github.ref_name }} origin/${{ github.ref_name }}
+ git pull origin ${{ github.ref_name }} || echo "No merge needed"
+ git add -A
+ git commit -m "Update visitor count" || echo "No changes to commit"
+ git remote set-url origin https://x-access-token:${TOKEN}@github.com/${{ github.repository }}
+ git push origin HEAD:${{ github.ref_name }}
diff --git a/README.md b/README.md
index 99bd324..fb01a9a 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@ Last updated: 2025-08-27
-

-
Refresh Date: 2025-08-29
+

+
Refresh Date: 2025-09-05
diff --git a/metrics.json b/metrics.json
new file mode 100644
index 0000000..8665541
--- /dev/null
+++ b/metrics.json
@@ -0,0 +1,37 @@
+[
+ {
+ "date": "2025-07-07",
+ "count": 330,
+ "uniques": 20
+ },
+ {
+ "date": "2025-07-08",
+ "count": 159,
+ "uniques": 6
+ },
+ {
+ "date": "2025-07-10",
+ "count": 482,
+ "uniques": 1
+ },
+ {
+ "date": "2025-07-11",
+ "count": 170,
+ "uniques": 4
+ },
+ {
+ "date": "2025-07-12",
+ "count": 7,
+ "uniques": 1
+ },
+ {
+ "date": "2025-07-14",
+ "count": 130,
+ "uniques": 2
+ },
+ {
+ "date": "2025-07-15",
+ "count": 2,
+ "uniques": 1
+ }
+]
\ No newline at end of file
diff --git a/scenario1-high-decay/README.md b/scenario1-high-decay/README.md
index c4cb33c..21ff413 100644
--- a/scenario1-high-decay/README.md
+++ b/scenario1-high-decay/README.md
@@ -12,71 +12,100 @@ Last updated: 2025-08-27
> This scenario is intended to demonstrate rapid temporary file accumulation and disk degradation in Azure Functions.
+
+List of References (Click to expand)
+
+- [Kudu service overview](https://learn.microsoft.com/en-us/azure/app-service/resources-kudu)
+- [log levels types](https://learn.microsoft.com/en-us/azure/azure-functions/configure-monitoring?tabs=v2#configure-log-levels)
+- [How to configure monitoring for Azure Functions](https://learn.microsoft.com/en-us/azure/azure-functions/configure-monitoring?tabs=v2)
+- [host.json reference for Azure Functions 2.x and later](https://learn.microsoft.com/en-us/azure/azure-functions/functions-host-json#override-hostjson-values)
+- [Sampling overrides %](https://learn.microsoft.com/en-us/azure/azure-monitor/app/java-standalone-config#sampling-overrides)
+- [Sampling in Azure Monitor Application Insights with OpenTelemetry](https://learn.microsoft.com/en-us/azure/azure-monitor/app/opentelemetry-sampling)
+
+
+
+> [!NOTE]
+> Expected Results:
+> - Rapid temp file accumulation in `C:\local\Temp`
+> - Disk decay within 1-2 days
+> - Restart clears only partial space due to locked files
## Infrastructure Setup
- **App Service Plan (Windows)** - P1v3 tier for high-load testing
- - See: [./terraform-infrastructure/variables.tf](./terraform-infrastructure/variables.tf) (lines 12-21)
- - See: [./terraform-infrastructure/main.tf](./terraform-infrastructure/main.tf) (lines 35-45)
-- **Deployment Method**: Standard deployment (extracted .zip)
- - See: [./terraform-infrastructure/main.tf](./terraform-infrastructure/main.tf) (line 115)
+
+ ```terraform
+ # Service Plan
+ sku_name = "P1v3"
+ ```
+
+
+
+- **Deployment Method (Function App Environment Variables)**: Standard deployment (extracted .zip)
+
```terraform
# Force standard deployment instead of mounted package
"WEBSITE_RUN_FROM_PACKAGE" = "0"
```
-- **Application Insights**: Full logging (no sampling)
- - See: [./terraform-infrastructure/main.tf](./terraform-infrastructure/main.tf) (lines 47-56)
+
+
+
+- **Diagnostics Settings (Function App Environment Variables)**: Detailed diagnostics enabled
+
```terraform
- # No sampling configured - full logging
- sampling_percentage = 100
+ # Enable full diagnostics for troubleshooting
+ "WEBSITE_ENABLE_DETAILED_DIAGNOSTICS" = "true"
```
-- **Verbose Diagnostics**: Enabled
- - See: [./terraform-infrastructure/main.tf](./terraform-infrastructure/main.tf) (line 118)
+
+
+
+- **Logging Configuration (Function App Environment Variables)**: Verbose logging enabled. Click here to understand more about [log levels types](https://learn.microsoft.com/en-us/azure/azure-functions/configure-monitoring?tabs=v2#configure-log-levels)
+
```terraform
- # Enable full diagnostics
- "WEBSITE_ENABLE_DETAILED_DIAGNOSTICS" = "true"
+ # Set verbose logging level for better diagnostics but higher disk usage
+ "AzureFunctionsJobHost__logging__LogLevel__Default" = "Information"
+ ```
+
+
+
+- **SCM Separation (Function App Environment Variables)**: Enabled to ensure Kudu and function app run as separate processes
+
+ ```terraform
+ # Enable SCM separation for diagnostics
+ "WEBSITE_DISABLE_SCM_SEPARATION" = "false"
```
-- **Storage Logging**: Enabled
- - See: [./terraform-infrastructure/main.tf](./terraform-infrastructure/main.tf) (line 124)
+
+
+
+- **Temp Access (Function App Environment Variables)**: Explicitly enabled for diagnostics and reporting
+
```terraform
- # Log to storage account (increases I/O operations)
- "AzureWebJobsDashboard" = azurerm_storage_account.storage.primary_connection_string
+ # Enable temp file access for diagnostics
+ "WEBSITE_ENABLE_TEMP_ACCESS" = "true"
```
-- **WEBSITE_RUN_FROM_PACKAGE**: Disabled (set to "0")
- - See: [./terraform-infrastructure/main.tf](./terraform-infrastructure/main.tf) (line 115)
-- **Key Vault Integration**: Secrets stored in Azure Key Vault
- - See: [./terraform-infrastructure/main.tf](./terraform-infrastructure/main.tf) (lines 147-193)
-- **Managed Identity**: System-assigned identity for Key Vault access
- - See: [./terraform-infrastructure/main.tf](./terraform-infrastructure/main.tf) (lines 96-98)
-## Expected Results
+
-- Rapid temp file accumulation in `C:\local\Temp`
-- Disk decay within 1-2 days
-- Restart clears only partial space due to locked files
+> Overall:
-## Deployment Instructions
+
-> For detailed deployment instructions including VS Code deployment and Azure DevOps pipeline samples, see the [DEPLOYMENT.md](./DEPLOYMENT.md) guide.
+- **Application Insights**: Full logging (no sampling). Click here to understand more about [Sampling overrides %](https://learn.microsoft.com/en-us/azure/azure-monitor/app/java-standalone-config#sampling-overrides)
-1. Go to the terraform-infrastructure directory:
- ```
- cd scenario-1-high-decay/terraform-infrastructure
- ```
+ ```terraform
+ # No sampling configured - full logging
+ sampling_percentage = 100
+ ```
-2. Update the `terraform.tfvars` file with your Azure subscription ID and preferred configuration values.
+
-3. Initialize Terraform:
- ```
- terraform init
- ```
+- **Key Vault Integration**: Secrets stored in Azure Key Vault
+- **Managed Identity**: System-assigned identity for Key Vault access
-4. Apply the Terraform configuration:
- ```
- terraform apply
- ```
+## Deployment Instructions
-5. After infrastructure deployment, follow the deployment approaches in [DEPLOYMENT.md](./DEPLOYMENT.md) to publish the function app.
+1. Please follow the [Terraform Deployment guide](./terraform-infrastructure/README.md) to deploy the necessary Azure resources for the workshop.
+2. After infrastructure deployment, follow the deployment approaches in [Deployment Guide](./DEPLOYMENT.md) to publish the function app.
## Testing
@@ -86,13 +115,16 @@ Last updated: 2025-08-27
> Monitor the function app using:
-- Azure Portal > Function App > Platform features > Advanced tools (Kudu)
+- Azure Portal > Function App > Development Tools > Advanced tools ([Kudu](https://learn.microsoft.com/en-us/azure/app-service/resources-kudu))
+
+ https://github.com/user-attachments/assets/0e529115-13ae-4a2f-83ad-35c33be8bb67
+
- Application Insights
- Azure Monitor metrics
-

-
Refresh Date: 2025-08-29
+

+
Refresh Date: 2025-09-05
diff --git a/scenario1-high-decay/terraform-infrastructure/README.md b/scenario1-high-decay/terraform-infrastructure/README.md
new file mode 100644
index 0000000..60286ed
--- /dev/null
+++ b/scenario1-high-decay/terraform-infrastructure/README.md
@@ -0,0 +1,115 @@
+# Azure Infrastructure Terraform Template
+
+Costa Rica
+
+[](https://github.com/)
+[brown9804](https://github.com/brown9804)
+
+Last updated: 2025-06-04
+
+----------
+
+
+

+
+
+
+

+
+
+## Overview
+
+Templates structure:
+
+```
+.
+├── README.md
+├────── main.tf
+├────── variables.tf
+├────── provider.tf
+├────── terraform.tfvars
+├────── outputs.tf
+```
+
+- main.tf `(Main Terraform configuration file)`: This file contains the core infrastructure code. It defines the resources you want to create, such as virtual machines, networks, and storage. It's the primary file where you describe your infrastructure in a declarative manner.
+- variables.tf `(Variable definitions)`: This file is used to define variables that can be used throughout your Terraform configuration. By using variables, you can make your configuration more flexible and reusable. For example, you can define variables for resource names, sizes, and other parameters that might change between environments.
+- provider.tf `(Provider configurations)`: Providers are plugins that Terraform uses to interact with cloud providers, SaaS providers, and other APIs. This file specifies which providers (e.g., AWS, Azure, Google Cloud) you are using and any necessary configuration for them, such as authentication details.
+- terraform.tfvars `(Variable values)`: This file contains the actual values for the variables defined in `variables.tf`. By separating variable definitions and values, you can easily switch between different sets of values for different environments (e.g., development, staging, production) without changing the main configuration files.
+- outputs.tf `(Output values)`: This file defines the output values that Terraform should return after applying the configuration. Outputs are useful for displaying information about the resources created, such as IP addresses, resource IDs, and other important details. They can also be used as inputs for other Terraform configurations or scripts.
+
+## How to execute it
+
+```mermaid
+graph TD;
+ A[az login] --> B(terraform init)
+ B --> C{Terraform provisioning stage}
+ C -->|Review| D[terraform plan]
+ C -->|Order Now| E[terraform apply]
+ C -->|Delete Resource if needed| F[terraform destroy]
+```
+
+> [!IMPORTANT]
+> Please modify `terraform.tfvars` with your information, then run the following flow. If you need more visual guidance, please check the video that illustrates the provisioning steps.
+
+1. **Login to Azure**: This command logs you into your Azure account. It opens a browser window where you can enter your Azure credentials. Once logged in, you can manage your Azure resources from the command line.
+
+ > Go to the path where Terraform files are located:
+
+ ```sh
+ cd scenario1-high-decay/terraform-infrastructure
+ ```
+
+ ```sh
+ az login
+ ```
+
+
+
+
+
+2. **Initialize Terraform**: Initializes the working directory containing the Terraform configuration files. It downloads the necessary provider plugins and sets up the backend for storing the state.
+
+ ``` sh
+ terraform init
+ ```
+
+
+
+3. **Terraform Provisioning Stage**:
+
+ - **Review**: Creates an execution plan, showing what actions Terraform will take to achieve the desired state defined in your configuration files. It uses the variable values specified in `terraform.tfvars`.
+
+ ```sh
+ terraform plan -var-file terraform.tfvars
+ ```
+
+ > At the end, you will see a message in green if everything was executed successfully:
+
+
+
+ - **Order Now**: Applies the changes required to reach the desired state of the configuration. It prompts for confirmation before making any changes. It also uses the variable values specified in `terraform.tfvars`.
+
+ ```sh
+ terraform apply -var-file terraform.tfvars
+ ```
+
+ > At the end, you will see a message in green if everything was executed successfully:
+
+
+
+ - **Remove**: Destroys the infrastructure managed by Terraform. It prompts for confirmation before deleting any resources. It also uses the variable values specified in `terraform.tfvars`.
+
+ ```sh
+ terraform destroy -var-file terraform.tfvars
+ ```
+
+ > At the end, you will see a message in green if everything was executed successfully:
+
+
+
+
+
+

+
Refresh Date: 2025-09-05
+
+
diff --git a/scenario1-high-decay/terraform-infrastructure/main.tf b/scenario1-high-decay/terraform-infrastructure/main.tf
new file mode 100644
index 0000000..4620b1e
--- /dev/null
+++ b/scenario1-high-decay/terraform-infrastructure/main.tf
@@ -0,0 +1,343 @@
+# main.tf
+# Scenario 1: High-Load Writable Deployment with Aggressive Logging
+# Test rapid temp file accumulation and disk decay
+
+# Get current client configuration
+data "azurerm_client_config" "current" {}
+
+# Resource Group
+resource "azurerm_resource_group" "rg" {
+ name = var.resource_group_name
+ location = var.location
+
+ # Output the resource group name
+ provisioner "local-exec" {
+ command = "echo Resource Group: ${self.name}"
+ }
+}
+
+# Storage Account for Function App Runtime using Azure CLI
+locals {
+ runtime_storage_name = "struntime${var.resource_suffix}"
+}
+
+resource "null_resource" "runtime_storage_account" {
+ # Create storage account using Azure CLI
+ provisioner "local-exec" {
+ command = "az storage account create --name ${local.runtime_storage_name} --resource-group ${azurerm_resource_group.rg.name} --location eastus2 --sku Standard_LRS --https-only true --min-tls-version TLS1_2"
+ }
+
+ depends_on = [azurerm_resource_group.rg]
+}
+
+# Use data source to get info about the created storage account
+data "azurerm_storage_account" "runtime" {
+ name = local.runtime_storage_name
+ resource_group_name = azurerm_resource_group.rg.name
+
+ depends_on = [null_resource.runtime_storage_account]
+}
+
+# Storage Account for Data using Azure CLI
+locals {
+ data_storage_name = "${var.base_data_storage_name}${var.resource_suffix}"
+}
+
+resource "null_resource" "data_storage_account" {
+ # Create storage account using Azure CLI
+ provisioner "local-exec" {
+ command = "az storage account create --name ${local.data_storage_name} --resource-group ${azurerm_resource_group.rg.name} --location eastus2 --sku Standard_LRS --https-only true --min-tls-version TLS1_2"
+ }
+
+ depends_on = [azurerm_resource_group.rg]
+}
+
+# Use data source to get info about the created storage account
+data "azurerm_storage_account" "storage" {
+ name = local.data_storage_name
+ resource_group_name = azurerm_resource_group.rg.name
+
+ depends_on = [null_resource.data_storage_account]
+}
+
+# Blob Container for Output Files - create with Azure CLI
+resource "null_resource" "output_container" {
+ # Create blob container using Azure CLI
+ provisioner "local-exec" {
+ command = "az storage container create --name output --account-name ${local.data_storage_name} --auth-mode login"
+ }
+
+ depends_on = [null_resource.data_storage_account, data.azurerm_storage_account.storage]
+}
+
+# Service Plan
+resource "azurerm_service_plan" "asp" {
+ name = "${var.base_service_plan_name}${var.resource_suffix}"
+ location = azurerm_resource_group.rg.location
+ resource_group_name = azurerm_resource_group.rg.name
+ os_type = "Windows"
+ sku_name = "P1v3"
+
+ depends_on = [azurerm_resource_group.rg]
+
+ # Output the service plan name
+ provisioner "local-exec" {
+ command = "echo Service Plan: ${self.name}"
+ }
+}
+
+# Log Analytics Workspace
+resource "azurerm_log_analytics_workspace" "loganalytics" {
+ name = "${var.base_log_analytics_name}${var.resource_suffix}"
+ location = azurerm_resource_group.rg.location
+ resource_group_name = azurerm_resource_group.rg.name
+ sku = "PerGB2018"
+ retention_in_days = 30
+
+ depends_on = [azurerm_resource_group.rg]
+
+ # Output the log analytics workspace name
+ provisioner "local-exec" {
+ command = "echo Log Analytics Workspace: ${self.name}"
+ }
+}
+
+# Application Insights
+resource "azurerm_application_insights" "appinsights" {
+ name = "${var.base_app_insights_name}${var.resource_suffix}"
+ location = azurerm_resource_group.rg.location
+ resource_group_name = azurerm_resource_group.rg.name
+ application_type = "web"
+ sampling_percentage = 100 # No sampling - full logging for high decay scenario test
+ workspace_id = azurerm_log_analytics_workspace.loganalytics.id
+
+ depends_on = [azurerm_log_analytics_workspace.loganalytics]
+
+ provisioner "local-exec" {
+ command = "echo Application Insights: ${self.name}"
+ }
+}
+
+# Key Vault
+resource "azurerm_key_vault" "kv" {
+ name = "${var.base_key_vault_name}${var.resource_suffix}"
+ location = azurerm_resource_group.rg.location
+ resource_group_name = azurerm_resource_group.rg.name
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ sku_name = "standard"
+ soft_delete_retention_days = 7
+ purge_protection_enabled = false
+
+ access_policy {
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ object_id = data.azurerm_client_config.current.object_id
+
+ secret_permissions = [
+ "Get", "List", "Set", "Delete", "Purge"
+ ]
+ }
+
+ depends_on = [azurerm_resource_group.rg]
+
+ # Output the key vault name
+ provisioner "local-exec" {
+ command = "echo Key Vault: ${self.name}"
+ }
+}
+
+# SQL Server
+resource "azurerm_mssql_server" "sql_server" {
+ name = "${var.base_sql_server_name}${var.resource_suffix}"
+ resource_group_name = azurerm_resource_group.rg.name
+ location = azurerm_resource_group.rg.location
+ version = "12.0"
+ administrator_login = var.sql_server_admin_login
+ administrator_login_password = var.sql_server_admin_password
+
+ tags = {
+ Environment = var.environment
+ Scenario = "High-Decay"
+ }
+}
+
+# SQL Database
+resource "azurerm_mssql_database" "sql_db" {
+ name = var.sql_database_name
+ server_id = azurerm_mssql_server.sql_server.id
+ collation = "SQL_Latin1_General_CP1_CI_AS"
+ sku_name = "Basic"
+ max_size_gb = 2
+
+ tags = {
+ Environment = var.environment
+ Scenario = "High-Decay"
+ }
+}
+
+# Store SQL connection string in Key Vault
+resource "azurerm_key_vault_secret" "sql_connection_string" {
+ name = "sql-connection-string"
+ value = "Server=tcp:${azurerm_mssql_server.sql_server.fully_qualified_domain_name},1433;Initial Catalog=${azurerm_mssql_database.sql_db.name};Persist Security Info=False;User ID=${var.sql_server_admin_login};Password=${var.sql_server_admin_password};MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;"
+ key_vault_id = azurerm_key_vault.kv.id
+}
+
+# SQL Firewall rule to allow Azure services
+resource "azurerm_mssql_firewall_rule" "allow_azure_services" {
+ name = "AllowAzureServices"
+ server_id = azurerm_mssql_server.sql_server.id
+ start_ip_address = "0.0.0.0"
+ end_ip_address = "0.0.0.0"
+}
+
+# Azure Function App
+resource "azurerm_windows_function_app" "function_app" {
+ name = "${var.base_function_app_name}${var.resource_suffix}"
+ location = azurerm_resource_group.rg.location
+ resource_group_name = azurerm_resource_group.rg.name
+ service_plan_id = azurerm_service_plan.asp.id
+
+ # Use the runtime storage account for Function App requirements
+ storage_account_name = data.azurerm_storage_account.runtime.name
+ storage_uses_managed_identity = true
+
+ identity {
+ type = "SystemAssigned"
+ }
+
+ site_config {
+ application_stack {
+ dotnet_version = "v6.0" # .NET 6
+ }
+
+ ftps_state = "FtpsOnly"
+ }
+
+ # App settings for high decay scenario test
+ app_settings = {
+ "FUNCTIONS_WORKER_RUNTIME" = "dotnet"
+ "FUNCTIONS_EXTENSION_VERSION" = "~4"
+
+ # Force standard deployment instead of mounted package for high decay test
+ "WEBSITE_RUN_FROM_PACKAGE" = "0"
+
+ # Enable full diagnostics
+ "WEBSITE_ENABLE_DETAILED_DIAGNOSTICS" = "true"
+
+ # Set verbose logging
+ "AzureFunctionsJobHost__logging__LogLevel__Default" = "Information"
+
+ # Use managed identity for storage access
+ "AzureWebJobsStorage__accountName" = data.azurerm_storage_account.runtime.name
+
+ # SQL connection string - Reference to Key Vault
+ "SqlConnectionString" = "@Microsoft.KeyVault(SecretUri=${azurerm_key_vault_secret.sql_connection_string.id})"
+
+ # Application Insights settings
+ "APPINSIGHTS_INSTRUMENTATIONKEY" = azurerm_application_insights.appinsights.instrumentation_key
+ "APPLICATIONINSIGHTS_CONNECTION_STRING" = azurerm_application_insights.appinsights.connection_string
+
+ # Enable SCM separation for diagnostics
+ "WEBSITE_DISABLE_SCM_SEPARATION" = "false"
+
+ # Enable temp file access for diagnostics
+ "WEBSITE_ENABLE_TEMP_ACCESS" = "true"
+
+ # Key Vault reference
+ "AZURE_KEY_VAULT_ENDPOINT" = azurerm_key_vault.kv.vault_uri
+
+ # Data storage connection settings
+ "DataStorageConnection__accountName" = data.azurerm_storage_account.storage.name
+ }
+
+ # Ensure dependent resources are provisioned before the function app
+ depends_on = [
+ azurerm_resource_group.rg,
+ null_resource.runtime_storage_account,
+ data.azurerm_storage_account.runtime,
+ null_resource.data_storage_account,
+ data.azurerm_storage_account.storage,
+ azurerm_service_plan.asp,
+ azurerm_application_insights.appinsights,
+ azurerm_key_vault.kv
+ ]
+
+ # Force default deployment method (zip without run from package)
+ lifecycle {
+ ignore_changes = [
+ app_settings["WEBSITE_RUN_FROM_PACKAGE"]
+ ]
+ }
+}
+
+# Grant the Function App's managed identity access to Key Vault
+resource "azurerm_key_vault_access_policy" "function_app_policy" {
+ key_vault_id = azurerm_key_vault.kv.id
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ object_id = azurerm_windows_function_app.function_app.identity[0].principal_id
+
+ secret_permissions = [
+ "Get", "List"
+ ]
+
+ depends_on = [
+ azurerm_windows_function_app.function_app,
+ azurerm_key_vault.kv
+ ]
+}
+
+# Grant the Function App's managed identity access to Runtime Storage
+resource "azurerm_role_assignment" "function_runtime_storage_blob_data" {
+ scope = data.azurerm_storage_account.runtime.id
+ role_definition_name = "Storage Blob Data Contributor"
+ principal_id = azurerm_windows_function_app.function_app.identity[0].principal_id
+
+ depends_on = [
+ azurerm_windows_function_app.function_app,
+ data.azurerm_storage_account.runtime
+ ]
+}
+
+resource "azurerm_role_assignment" "function_runtime_storage_queue_data" {
+ scope = data.azurerm_storage_account.runtime.id
+ role_definition_name = "Storage Queue Data Contributor"
+ principal_id = azurerm_windows_function_app.function_app.identity[0].principal_id
+
+ depends_on = [
+ azurerm_windows_function_app.function_app,
+ data.azurerm_storage_account.runtime
+ ]
+}
+
+resource "azurerm_role_assignment" "function_runtime_storage_table_data" {
+ scope = data.azurerm_storage_account.runtime.id
+ role_definition_name = "Storage Table Data Contributor"
+ principal_id = azurerm_windows_function_app.function_app.identity[0].principal_id
+
+ depends_on = [
+ azurerm_windows_function_app.function_app,
+ data.azurerm_storage_account.runtime
+ ]
+}
+
+# Grant the Function App's managed identity access to Data Storage
+resource "azurerm_role_assignment" "function_data_storage_blob_data" {
+ scope = data.azurerm_storage_account.storage.id
+ role_definition_name = "Storage Blob Data Contributor"
+ principal_id = azurerm_windows_function_app.function_app.identity[0].principal_id
+
+ depends_on = [
+ azurerm_windows_function_app.function_app,
+ data.azurerm_storage_account.storage
+ ]
+}
+
+resource "azurerm_role_assignment" "function_data_storage_queue_data" {
+ scope = data.azurerm_storage_account.storage.id
+ role_definition_name = "Storage Queue Data Contributor"
+ principal_id = azurerm_windows_function_app.function_app.identity[0].principal_id
+
+ depends_on = [
+ azurerm_windows_function_app.function_app,
+ data.azurerm_storage_account.storage
+ ]
+}
diff --git a/scenario1-high-decay/terraform-infrastructure/outputs.tf b/scenario1-high-decay/terraform-infrastructure/outputs.tf
new file mode 100644
index 0000000..79ba14c
--- /dev/null
+++ b/scenario1-high-decay/terraform-infrastructure/outputs.tf
@@ -0,0 +1,58 @@
+# outputs.tf
+# This file contains the outputs from the Terraform deployment
+
+output "resource_group_name" {
+ value = azurerm_resource_group.rg.name
+ description = "The name of the resource group"
+}
+
+output "function_app_name" {
+ value = azurerm_windows_function_app.function_app.name
+ description = "The name of the function app"
+}
+
+output "function_app_default_hostname" {
+ value = azurerm_windows_function_app.function_app.default_hostname
+ description = "The default hostname of the function app"
+}
+
+output "storage_account_name" {
+ value = data.azurerm_storage_account.storage.name
+ description = "The name of the storage account"
+}
+
+output "application_insights_name" {
+ value = azurerm_application_insights.appinsights.name
+ description = "The name of the Application Insights instance"
+}
+
+output "application_insights_instrumentation_key" {
+ value = azurerm_application_insights.appinsights.instrumentation_key
+ description = "The instrumentation key of the Application Insights instance"
+ sensitive = true
+}
+
+output "sql_server_name" {
+ value = azurerm_mssql_server.sql_server.name
+ description = "The name of the SQL server"
+}
+
+output "sql_server_fqdn" {
+ value = azurerm_mssql_server.sql_server.fully_qualified_domain_name
+ description = "The fully qualified domain name of the SQL server"
+}
+
+output "sql_database_name" {
+ value = azurerm_mssql_database.sql_db.name
+ description = "The name of the SQL database"
+}
+
+output "key_vault_name" {
+ value = azurerm_key_vault.kv.name
+ description = "The name of the Key Vault"
+}
+
+output "key_vault_uri" {
+ value = azurerm_key_vault.kv.vault_uri
+ description = "The URI of the Key Vault"
+}
diff --git a/scenario1-high-decay/terraform-infrastructure/provider.tf b/scenario1-high-decay/terraform-infrastructure/provider.tf
new file mode 100644
index 0000000..6dfce2e
--- /dev/null
+++ b/scenario1-high-decay/terraform-infrastructure/provider.tf
@@ -0,0 +1,25 @@
+# provider.tf
+# This file configures the Azure provider to interact with Azure resources.
+# It specifies the required provider and its version, along with provider-specific configurations.
+
+terraform {
+ required_version = ">= 1.8, < 2.0"
+ # Specify the required provider and its version
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm" # Source of the AzureRM provider
+ version = "~> 4.16.0" # Version of the AzureRM provider
+ }
+ }
+}
+
+provider "azurerm" {
+ features { # Enable features for the AzureRM provider
+ key_vault {
+ recover_soft_deleted_key_vaults = false
+ purge_soft_delete_on_destroy = true
+ }
+ }
+
+ subscription_id = var.subscription_id
+}
diff --git a/scenario1-high-decay/terraform-infrastructure/terraform.tfvars b/scenario1-high-decay/terraform-infrastructure/terraform.tfvars
new file mode 100644
index 0000000..a9fd9e9
--- /dev/null
+++ b/scenario1-high-decay/terraform-infrastructure/terraform.tfvars
@@ -0,0 +1,7 @@
+# Sample values
+subscription_id = "your-subscription-id" # Desired subscription ID
+resource_group_name = "RG-FA-high-decayx2" # Desired resource group name
+location = "East US 2" # Desired location
+environment = "dev" # Desired environment tag
+# SQL credentials automatically generated and stored in Key Vault
+resource_suffix = "x2x" # Unique suffix for resource names to avoid conflicts (used for demos)
diff --git a/scenario1-high-decay/terraform-infrastructure/variables.tf b/scenario1-high-decay/terraform-infrastructure/variables.tf
new file mode 100644
index 0000000..9644ccf
--- /dev/null
+++ b/scenario1-high-decay/terraform-infrastructure/variables.tf
@@ -0,0 +1,129 @@
+variable "subscription_id" {
+ type = string
+ description = "Azure subscription ID"
+}
+
+variable "resource_group_name" {
+ type = string
+ description = "Name of the resource group"
+ default = "rg-func-high-decay"
+}
+
+variable "location" {
+ type = string
+ description = "Azure region for resources"
+ default = "West US"
+}
+
+variable "environment" {
+ type = string
+ description = "Environment name"
+ default = "dev"
+}
+
+variable "storage_account_tier" {
+ type = string
+ description = "Storage account tier"
+ default = "Standard"
+}
+
+variable "storage_account_replication_type" {
+ type = string
+ description = "Storage account replication type"
+ default = "LRS"
+}
+
+variable "app_service_plan_tier" {
+ type = string
+ description = "App Service Plan tier (PremiumV3 tier for high performance)"
+ default = "P1v3"
+}
+
+variable "app_service_plan_size" {
+ type = string
+ description = "App Service Plan size (PremiumV3 tier for high performance)"
+ default = "P1v3"
+}
+
+variable "sql_server_admin_login" {
+ type = string
+ description = "SQL Server admin login"
+ default = "sqladmin"
+}
+
+variable "sql_server_admin_password" {
+ type = string
+ description = "SQL Server admin password"
+ sensitive = true
+}
+
+variable "sql_database_name" {
+ type = string
+ description = "SQL Database name"
+ default = "funcdb"
+}
+
+# Optional: provide an existing storage account name to avoid creating a new one
+variable "override_storage_account_name" {
+ type = string
+ description = "If set, use this existing storage account name instead of creating a new one"
+ default = ""
+}
+
+# Optional: provide the access key for the existing storage account (sensitive)
+variable "override_storage_account_key" {
+ type = string
+ description = "If set, use this storage account access key instead of the created account's key"
+ sensitive = true
+ default = ""
+}
+
+# Optional: if you already have an App Service Plan in-region, set its resource id here to avoid creating one
+variable "existing_service_plan_id" {
+ type = string
+ description = "Resource ID of an existing App Service Plan to use instead of creating a new one"
+ default = ""
+}
+
+# Resource name suffix for demo environments
+variable "resource_suffix" {
+ description = "Suffix to append to all resource names for uniqueness."
+ type = string
+}
+
+# Base names for resources (optional, for further flexibility)
+variable "base_service_plan_name" {
+ description = "Base name for the service plan."
+ type = string
+ default = "asp-high-decay"
+}
+variable "base_app_insights_name" {
+ description = "Base name for Application Insights."
+ type = string
+ default = "appi-high-decay"
+}
+variable "base_log_analytics_name" {
+ description = "Base name for Log Analytics Workspace."
+ type = string
+ default = "log-high-decay"
+}
+variable "base_key_vault_name" {
+ description = "Base name for Key Vault."
+ type = string
+ default = "kv-highdecay"
+}
+variable "base_sql_server_name" {
+ description = "Base name for SQL Server."
+ type = string
+ default = "sql-high-decay"
+}
+variable "base_function_app_name" {
+ description = "Base name for Function App."
+ type = string
+ default = "func-high-decay"
+}
+variable "base_data_storage_name" {
+ description = "Base name for Data Storage Account."
+ type = string
+ default = "sthighdecay"
+}