Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 1 addition & 14 deletions terraform/account-wide-infrastructure/modules/athena/athena.tf
Original file line number Diff line number Diff line change
@@ -1,16 +1,3 @@
resource "aws_athena_database" "reporting-db" {
name = var.database

bucket = var.target_bucket_name

encryption_configuration {
encryption_option = "SSE_KMS"
kms_key = aws_kms_key.athena.arn
}

force_destroy = true
}

resource "aws_athena_workgroup" "athena" {
name = "${var.name_prefix}-athena-wg"

Expand All @@ -19,7 +6,7 @@ resource "aws_athena_workgroup" "athena" {
publish_cloudwatch_metrics_enabled = true

result_configuration {
output_location = "s3://{aws_s3_bucket.athena.bucket}/output/"
output_location = "s3://${aws_s3_bucket.athena.id}/output/"

encryption_configuration {
encryption_option = "SSE_KMS"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,3 @@ output "workgroup" {
output "bucket" {
value = aws_s3_bucket.athena
}

output "database" {
value = aws_athena_database.reporting-db
}
24 changes: 12 additions & 12 deletions terraform/account-wide-infrastructure/modules/glue/glue.tf
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
# Create Glue Data Catalog Database
resource "aws_glue_catalog_database" "raw_log_database" {
name = "${var.name_prefix}-raw_log"
location_uri = "${aws_s3_bucket.source-data-bucket.id}/"
resource "aws_glue_catalog_database" "log_database" {
name = "${var.name_prefix}-reporting"
location_uri = "${aws_s3_bucket.target-data-bucket.id}/logs/"
}

# Create Glue Crawler
resource "aws_glue_crawler" "raw_log_crawler" {
name = "${var.name_prefix}-raw-log-crawler"
database_name = aws_glue_catalog_database.raw_log_database.name
resource "aws_glue_crawler" "log_crawler" {
name = "${var.name_prefix}-log-crawler"
database_name = aws_glue_catalog_database.log_database.name
role = aws_iam_role.glue_service_role.name
s3_target {
path = "${aws_s3_bucket.source-data-bucket.id}/"
path = "${aws_s3_bucket.target-data-bucket.id}/logs/"
}
schema_change_policy {
delete_behavior = "LOG"
Expand All @@ -22,11 +22,11 @@ resource "aws_glue_crawler" "raw_log_crawler" {
}
})
}
resource "aws_glue_trigger" "raw_log_trigger" {
resource "aws_glue_trigger" "log_trigger" {
name = "${var.name_prefix}-org-report-trigger"
type = "ON_DEMAND"
actions {
crawler_name = aws_glue_crawler.raw_log_crawler.name
crawler_name = aws_glue_crawler.log_crawler.name
}
}

Expand All @@ -49,9 +49,9 @@ resource "aws_glue_job" "glue_job" {
"--enable-auto-scaling" = "true"
"--enable-continous-cloudwatch-log" = "true"
"--datalake-formats" = "delta"
"--source-path" = "s3://${aws_s3_bucket.source-data-bucket.id}/" # Specify the source S3 path
"--destination-path" = "s3://${aws_s3_bucket.target-data-bucket.id}/" # Specify the destination S3 path
"--job-name" = "poc-glue-job"
"--source_path" = "s3://${aws_s3_bucket.source-data-bucket.id}/" # Specify the source S3 path
"--target_path" = "s3://${aws_s3_bucket.target-data-bucket.id}/logs" # Specify the destination S3 path
"--job_name" = "poc-glue-job"
"--enable-continuous-log-filter" = "true"
"--enable-metrics" = "true"
"--extra-py-files" = "s3://${aws_s3_bucket.code-bucket.id}/src.zip"
Expand Down
76 changes: 74 additions & 2 deletions terraform/account-wide-infrastructure/modules/glue/iam.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,79 @@ resource "aws_iam_role" "glue_service_role" {
})
}

data "aws_iam_policy_document" "glue_service" {
statement {
actions = [
"s3:AbortMultipartUpload",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:PutObject",
"s3:DeleteObject",
]

resources = compact([
aws_s3_bucket.source-data-bucket.arn,
"${aws_s3_bucket.source-data-bucket.arn}/*",
aws_s3_bucket.target-data-bucket.arn,
"${aws_s3_bucket.target-data-bucket.arn}/*",
aws_s3_bucket.code-bucket.arn,
"${aws_s3_bucket.code-bucket.arn}/*",
])
effect = "Allow"
}

statement {
actions = [
"kms:DescribeKey",
"kms:GenerateDataKey*",
"kms:Encrypt",
"kms:ReEncrypt*",
"kms:Decrypt",
]

resources = [
aws_kms_key.glue.arn,
]

effect = "Allow"
}

statement {
actions = [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
]

resources = [
"arn:aws:logs:*:*:*:/aws-glue/*",
# "arn:aws:logs:*:*:*:/customlogs/*"
]

effect = "Allow"
}

statement {
actions = [
"glue:*",
]

resources = [
"*"
]

effect = "Allow"
}
}

resource "aws_iam_policy" "glue_service" {
name = "${var.name_prefix}-glue"
policy = data.aws_iam_policy_document.glue_service.json
}

resource "aws_iam_role_policy_attachment" "glue_service" {
role = aws_iam_role.glue_service_role.id
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole"
role = aws_iam_role.glue_service_role.name
policy_arn = aws_iam_policy.glue_service.arn
}
5 changes: 3 additions & 2 deletions terraform/account-wide-infrastructure/modules/glue/s3.tf
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ data "archive_file" "python" {

resource "aws_s3_object" "zip" {
bucket = aws_s3_bucket.code-bucket.bucket
key = "main.py"
source = "${path.module}/files/src.zip"
key = "src.zip"
source = data.archive_file.python.output_path
etag = filemd5(data.archive_file.python.output_path)
}
17 changes: 8 additions & 9 deletions terraform/account-wide-infrastructure/modules/glue/src/main.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,26 @@
import sys

from awsglue.utils import getResolvedOptions
from pipeline import LogPipeline
from pyspark.context import SparkContext
from src.pipeline import LogPipeline
from src.transformations import placeholder
from transformations import dtype_conversion, flatten_df, logSchema

# Get arguments from AWS Glue job
args = getResolvedOptions(
sys.argv, ["JOB_NAME", "SOURCE_PATH", "TARGET_PATH", "PARTITION_COLS"]
)
args = getResolvedOptions(sys.argv, ["job_name", "source_path", "target_path"])

# Start Glue context
sc = SparkContext()

partition_cols = args["PARTITION_COLS"].split(",") if "PARTITION_COLS" in args else []
partition_cols = args["partition_cols"].split(",") if "partition_cols" in args else []

# Initialize ETL process
etl_job = LogPipeline(
spark_context=sc,
source_path=args["SOURCE_PATH"],
target_path=args["TARGET_PATH"],
source_path=args["source_path"],
target_path=args["target_path"],
schema=logSchema,
partition_cols=partition_cols,
transformations=[placeholder],
transformations=[flatten_df, dtype_conversion],
)

# Run the job
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from src.instances import GlueContextSingleton, LoggerSingleton
from instances import GlueContextSingleton, LoggerSingleton


class LogPipeline:
Expand All @@ -7,7 +7,8 @@ def __init__(
spark_context,
source_path,
target_path,
partition_cols=None,
schema,
partition_cols=[],
transformations=[],
):
"""Initialize Glue context, Spark session, logger, and paths"""
Expand All @@ -16,6 +17,7 @@ def __init__(
self.logger = LoggerSingleton().logger
self.source_path = source_path
self.target_path = target_path
self.schema = schema
self.partition_cols = partition_cols
self.transformations = transformations

Expand All @@ -36,7 +38,11 @@ def run(self):
def extract(self):
"""Extract JSON data from S3"""
self.logger.info(f"Extracting data from {self.source_path} as JSON")
return self.spark.read.json(self.source_path)
return (
self.spark.read.option("recursiveFileLookup", "true")
.schema(self.schema)
.json(self.source_path)
)

def transform(self, dataframe):
"""Apply a list of transformations on the dataframe"""
Expand All @@ -48,6 +54,6 @@ def transform(self, dataframe):
def load(self, dataframe):
"""Load transformed data into Parquet format"""
self.logger.info(f"Loading data into {self.target_path} as Parquet")
dataframe.write.mode("overwrite").partitionBy(*self.partition_cols).parquet(
dataframe.write.mode("append").partitionBy(*self.partition_cols).parquet(
self.target_path
)
Original file line number Diff line number Diff line change
@@ -1 +1,76 @@
def placeholder(): ...
from pyspark.sql.functions import to_timestamp
from pyspark.sql.types import (
BooleanType,
StringType,
StructField,
StructType,
TimestampType,
)

logSchema = StructType(
[
StructField("time", TimestampType(), True),
StructField("index", StringType(), True),
StructField("host", StringType(), True),
StructField("source", StringType(), True),
StructField(
"event",
StructType(
[
StructField("level", StringType(), True),
StructField("location", StringType(), True),
StructField("message", StringType(), True),
StructField("timestamp", StringType(), True),
StructField("service", StringType(), True),
StructField("cold_start", BooleanType(), True),
StructField("function_name", StringType(), True),
StructField("function_memory_size", StringType(), True),
StructField("function_arn", StringType(), True),
StructField("function_request_id", StringType(), True),
StructField("correlation_id", StringType(), True),
StructField("method", StringType(), True),
StructField("path", StringType(), True),
StructField(
"headers",
StructType(
[
StructField("accept", StringType(), True),
StructField("accept-encoding", StringType(), True),
StructField("Authorization", StringType(), True),
StructField("Host", StringType(), True),
StructField(
"NHSD-Connection-Metadata", StringType(), True
),
StructField("NHSD-Correlation-Id", StringType(), True),
StructField("User-Agent", StringType(), True),
StructField("X-Forwarded-For", StringType(), True),
StructField("X-Request-Id", StringType(), True),
]
),
True,
),
StructField("log_reference", StringType(), True),
StructField("xray_trace_id", StringType(), True),
]
),
True,
),
]
)


def flatten_df(df):
cols = []
for c in df.dtypes:
if "struct" in c[1]:
nested_col = c[0]
else:
cols.append(c[0])
return df.select(*cols, f"{nested_col}.*")


def dtype_conversion(df):
df = df.withColumn(
"timestamp", to_timestamp(df["timestamp"], "yyyy-MM-dd HH:mm:ss,SSSXXX")
)
return df
7 changes: 6 additions & 1 deletion terraform/infrastructure/data.tf
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,11 @@ data "external" "current-info" {
}

data "aws_s3_bucket" "source-data-bucket" {
count = local.is_dev_env ? 1 : 0
count = local.is_dev_env && !local.is_sandbox_env ? 1 : 0
bucket = "${local.shared_prefix}-source-data-bucket"
}

data "aws_kms_key" "glue" {
count = local.is_dev_env && !local.is_sandbox_env ? 1 : 0
key_id = "alias/${local.shared_prefix}-glue"
}
3 changes: 2 additions & 1 deletion terraform/infrastructure/firehose.tf
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,6 @@ module "firehose__processor" {
splunk_index = local.splunk_index
destination = "splunk"
reporting_bucket_arn = local.reporting_bucket_arn
reporting_infra_toggle = local.is_dev_env
reporting_kms_arn = local.reporting_kms_arn
reporting_infra_toggle = local.is_dev_env && !local.is_sandbox_env
}
Loading
Loading