diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b1fb6e0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,51 @@ +# Python +__pycache__/ +*.py[cod] +*.pyo +*.pyd +*.so +*.egg-info/ +.eggs/ + +# Virtualenv +.venv/ +venv/ +ENV/ + +# Pytest +.pytest_cache/ + +# Coverage +.coverage +.coverage.* +htmlcov/ + +# Logs +*.log + +# OS +.DS_Store + +# Editor +.vscode/ +.idea/ + +# Terraform +.terraform/ +*.tfstate +*.tfstate.* +.terraform.lock.hcl + +# Terraform crash logs +crash.log +crash.*.log + +# Docker +*.pid + +# Local env files +.env +.env.* + +# Scripts tmp +/tmp/ diff --git a/API.md b/API.md new file mode 100644 index 0000000..f60a2d8 --- /dev/null +++ b/API.md @@ -0,0 +1,96 @@ +# API and CLI Guide + +## Run with Docker Compose + +```bash +cp .env.example .env +# edit .env and set POSTGRES_PASSWORD +docker compose up --build +``` + +API will be available at `http://localhost:8000`. +The `tests` service runs `pytest` automatically during startup and then exits. +If you want the stack to stop when tests finish, run: +```bash +docker compose up --build --abort-on-container-exit --exit-code-from tests +``` + +## Run locally + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +export DATABASE_URL=postgresql://postgres:postgres@localhost:5432/inventory +uvicorn app.main:app --reload +``` + +## API Spec + +### Create server +`POST /servers` + +Request: +```json +{"hostname":"srv-1","ip_address":"10.0.0.1","state":"active"} +``` + +Responses: +- `201` server object +- `400` hostname must be unique or invalid payload + +### List servers +`GET /servers` + +Responses: +- `200` list of server objects + +### Get server +`GET /servers/{id}` + +Responses: +- `200` server object +- `404` not found + +### Update server +`PUT /servers/{id}` + +Request: +```json +{"hostname":"srv-1","ip_address":"10.0.0.2","state":"offline"} +``` + +Responses: +- `200` server object +- `400` hostname must be unique or invalid payload +- `404` not found + +### Delete server +`DELETE /servers/{id}` + +Responses: +- `204` deleted +- `404` not found + +## CLI Spec + +The CLI talks to the API. Set `API_URL` if needed (default `http://localhost:8000`). + +```bash +python -m cli list +python -m cli get 1 +python -m cli create srv-1 10.0.0.1 active +python -m cli update 1 srv-1b 10.0.0.2 offline +python -m cli delete 1 +``` + +## Tests + +Make sure PostgreSQL is running locally, then: + +```bash +export DATABASE_URL=postgresql://postgres:postgres@localhost:5432/inventory +pytest +``` + +Tests skip automatically if PostgreSQL is unavailable. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3b3af8b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.11-slim + +WORKDIR /app + +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt + +COPY app ./app +COPY tests ./tests + +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/README.md b/README.md index 3145d38..700c125 100644 --- a/README.md +++ b/README.md @@ -29,3 +29,155 @@ Validate that: State is one of: active, offline, retired +# Project Usage + +## Run with Docker Compose (default) + +```bash +cp .env.example .env +# edit .env and set POSTGRES_PASSWORD +docker compose up --build +``` + +This starts `api` and `db`. The `tests` service runs once with verbose output and exits; it does not stop the stack. If you want the stack to stop right after tests, run: + +```bash +docker compose up --build --abort-on-container-exit --exit-code-from tests +``` + +API is available at `http://localhost:8000`. + +## CLI + +```bash +python -m cli list +python -m cli get 1 +python -m cli create srv-1 10.0.0.1 active +python -m cli update 1 srv-1b 10.0.0.2 offline +python -m cli delete 1 +``` + +# Tests + +Tests run automatically during `docker compose up --build`. You can also run them locally: + +```bash +export DATABASE_URL=postgresql://postgres:postgres@localhost:5432/inventory +pytest +``` + +Note: during tests you may see a PostgreSQL "duplicate key value violates unique constraint" log entry. +This is expected and comes from the unique-hostname validation test. + +# Security, Lint, and Dependency Checks + +All tools below are free/open-source. Run them locally: + +```bash +./scripts/security_checks.sh +``` + +What it checks: +- Python linting with `ruff` +- Static security analysis with `bandit` +- Dependency vulnerability scan with `pip-audit` +- Dockerfile linting with `hadolint` (via container) +- Repo vulnerability scan with `trivy` (via container) + +To see outdated dependencies: + +```bash +./scripts/check_updates.sh +``` + +# Optional AWS Deploy Switch + +Default deployment is local Docker Compose. For AWS, use the deploy switch: + +```bash +DEPLOY_TARGET=aws \\ +AWS_REGION=us-east-1 \\ +AWS_ACCOUNT_ID=123456789012 \\ +ECR_REPO=inventory-api \\ +ECS_CLUSTER=your-cluster \\ +ECS_SERVICE=your-service \\ +DATABASE_URL=postgresql://user:pass@your-rds:5432/inventory \\ +EXECUTION_ROLE_ARN=arn:aws:iam::123456789012:role/ecsTaskExecutionRole \\ +TASK_ROLE_ARN=arn:aws:iam::123456789012:role/ecsTaskRole \\ +./scripts/deploy.sh +``` + +Notes: +- Requires AWS CLI, Docker, and an existing ECS cluster/service. +- Task definition template lives in `deploy/aws/task-def.json`. +- The script builds and pushes the image to ECR, then updates the ECS service. + - This path expects an explicit `DATABASE_URL` (no Secrets Manager integration). Use the Terraform path if you want Secrets Manager + full infra provisioning. + +# AWS Terraform Deployment (provision everything) + +This path provisions the VPC, subnets, ALB, ECS Fargate, ECR repo, and RDS PostgreSQL via Terraform. +You only provide AWS credentials and a DB password. + +Minimal (uses defaults): + +```bash +./scripts/deploy_terraform_aws.sh +``` + +Optional overrides: + +```bash +export AWS_REGION=us-east-1 +export PROJECT_NAME=inventory +export DB_USERNAME=inventory +# Optional: pin Postgres engine version (otherwise latest available in region) +export DB_ENGINE_VERSION=18.1 +# Optional HTTPS (requires domain + ACM certificate) +export ACM_CERT_ARN=arn:aws:acm:us-east-1:123456789012:certificate/your-cert-id +# Optional: attach a domain and create Route53 record automatically +export API_DOMAIN_NAME=api.example.com +export ROUTE53_ZONE_ID=Z1234567890 +./scripts/deploy_terraform_aws.sh +``` + +After deploy, run a smoke test: + +```bash +export API_URL=http:// +./scripts/aws_smoke_test.sh +``` + +Outputs are available in `deploy/aws/terraform/outputs.tf`. + +Notes: +- This Terraform stack uses private subnets for ECS/RDS, NAT for egress, and a public ALB. +- If you do not provide `ACM_CERT_ARN`, the ALB runs HTTP only. When you have a domain, add ACM to enable HTTPS. +- Destroy with: `cd deploy/aws/terraform && terraform destroy`. +- The RDS password is generated randomly and stored in AWS Secrets Manager. +- ECS pulls `DATABASE_URL` directly from Secrets Manager at runtime. +- Terraform state will contain the generated secret value; store state securely (e.g., S3 + KMS). +- SSL/TLS requires a valid ACM certificate for your domain. For the automatic smoke test over HTTPS, set `API_DOMAIN_NAME` + `ROUTE53_ZONE_ID` so the script can hit a matching cert. +- RDS deletion protection and performance insights are enabled for security; disable them before `terraform destroy` if needed (cost impact). + +# Terraform Security Checks + +Run Terraform linting + security scans: + +```bash +./scripts/terraform_security_checks.sh +``` + +This runs: +- `terraform fmt -check` and `terraform validate` +- `tfsec` and `checkov` via Docker for security posture checks + +Notes: +- Some tfsec checks are intentionally suppressed for the public ALB and optional HTTP-only mode when ACM is not provided. + +# Terraform Provider Version Check + +To check for provider updates and refresh the lockfile: + +```bash +./scripts/terraform_update_check.sh +``` diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/db.py b/app/db.py new file mode 100644 index 0000000..7a33d7c --- /dev/null +++ b/app/db.py @@ -0,0 +1,94 @@ +import os +from contextlib import contextmanager + +import psycopg2 +from psycopg2.extras import RealDictCursor + + +def _database_url() -> str: + return os.getenv( + "DATABASE_URL", "postgresql://postgres:postgres@localhost:5432/inventory" + ) + + +@contextmanager +def get_conn(): + conn = psycopg2.connect(_database_url()) + try: + yield conn + finally: + conn.close() + + +def init_db(): + ddl = """ + CREATE TABLE IF NOT EXISTS servers ( + id SERIAL PRIMARY KEY, + hostname TEXT NOT NULL UNIQUE, + ip_address TEXT NOT NULL, + state TEXT NOT NULL + ) + """ + with get_conn() as conn: + with conn.cursor() as cur: + cur.execute(ddl) + conn.commit() + + +def fetch_all(): + with get_conn() as conn: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + cur.execute("SELECT id, hostname, ip_address, state FROM servers ORDER BY id") + return cur.fetchall() + + +def fetch_one(server_id: int): + with get_conn() as conn: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + cur.execute( + "SELECT id, hostname, ip_address, state FROM servers WHERE id = %s", + (server_id,), + ) + return cur.fetchone() + + +def insert_one(hostname: str, ip_address: str, state: str): + with get_conn() as conn: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + cur.execute( + """ + INSERT INTO servers (hostname, ip_address, state) + VALUES (%s, %s, %s) + RETURNING id, hostname, ip_address, state + """, + (hostname, ip_address, state), + ) + row = cur.fetchone() + conn.commit() + return row + + +def update_one(server_id: int, hostname: str, ip_address: str, state: str): + with get_conn() as conn: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + cur.execute( + """ + UPDATE servers + SET hostname = %s, ip_address = %s, state = %s + WHERE id = %s + RETURNING id, hostname, ip_address, state + """, + (hostname, ip_address, state, server_id), + ) + row = cur.fetchone() + conn.commit() + return row + + +def delete_one(server_id: int): + with get_conn() as conn: + with conn.cursor() as cur: + cur.execute("DELETE FROM servers WHERE id = %s", (server_id,)) + deleted = cur.rowcount + conn.commit() + return deleted diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..f3b1243 --- /dev/null +++ b/app/main.py @@ -0,0 +1,95 @@ +from contextlib import asynccontextmanager +from ipaddress import ip_address + +from fastapi import FastAPI, HTTPException +from fastapi.responses import JSONResponse +from pydantic import BaseModel, field_validator +from psycopg2 import errors + +from app import db + + +class ServerBase(BaseModel): + hostname: str + ip_address: str + state: str + + @field_validator("ip_address") + @classmethod + def valid_ip(cls, value: str) -> str: + ip_address(value) + return value + + @field_validator("state") + @classmethod + def valid_state(cls, value: str) -> str: + allowed = {"active", "offline", "retired"} + if value not in allowed: + raise ValueError("state must be one of: active, offline, retired") + return value + + +class ServerCreate(ServerBase): + pass + + +class ServerUpdate(ServerBase): + pass + + +class ServerOut(ServerBase): + id: int + + +@asynccontextmanager +async def lifespan(_app: FastAPI): + db.init_db() + yield + + +app = FastAPI(title="Inventory API", lifespan=lifespan) + + +@app.post("/servers", response_model=ServerOut, status_code=201) +def create_server(payload: ServerCreate): + try: + row = db.insert_one(payload.hostname, payload.ip_address, payload.state) + except errors.UniqueViolation: + return JSONResponse( + status_code=400, content={"detail": "hostname must be unique"} + ) + return row + + +@app.get("/servers", response_model=list[ServerOut]) +def list_servers(): + return db.fetch_all() + + +@app.get("/servers/{server_id}", response_model=ServerOut) +def get_server(server_id: int): + row = db.fetch_one(server_id) + if not row: + raise HTTPException(status_code=404, detail="server not found") + return row + + +@app.put("/servers/{server_id}", response_model=ServerOut) +def update_server(server_id: int, payload: ServerUpdate): + try: + row = db.update_one(server_id, payload.hostname, payload.ip_address, payload.state) + except errors.UniqueViolation: + return JSONResponse( + status_code=400, content={"detail": "hostname must be unique"} + ) + if not row: + raise HTTPException(status_code=404, detail="server not found") + return row + + +@app.delete("/servers/{server_id}", status_code=204) +def delete_server(server_id: int): + deleted = db.delete_one(server_id) + if not deleted: + raise HTTPException(status_code=404, detail="server not found") + return None diff --git a/cli/__init__.py b/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cli/__main__.py b/cli/__main__.py new file mode 100644 index 0000000..54eb2a3 --- /dev/null +++ b/cli/__main__.py @@ -0,0 +1,95 @@ +import argparse +import json +import os +import sys + +import requests + + +API_URL = os.getenv("API_URL", "http://localhost:8000") + + +def request(method: str, path: str, payload=None): + url = f"{API_URL}{path}" + resp = requests.request(method, url, json=payload, timeout=10) + if resp.status_code >= 400: + print(resp.text) + sys.exit(1) + if resp.status_code == 204: + return None + return resp.json() + + +def cmd_list(_args): + data = request("GET", "/servers") + print(json.dumps(data, indent=2)) + + +def cmd_get(args): + data = request("GET", f"/servers/{args.id}") + print(json.dumps(data, indent=2)) + + +def cmd_create(args): + payload = { + "hostname": args.hostname, + "ip_address": args.ip, + "state": args.state, + } + data = request("POST", "/servers", payload) + print(json.dumps(data, indent=2)) + + +def cmd_update(args): + payload = { + "hostname": args.hostname, + "ip_address": args.ip, + "state": args.state, + } + data = request("PUT", f"/servers/{args.id}", payload) + print(json.dumps(data, indent=2)) + + +def cmd_delete(args): + request("DELETE", f"/servers/{args.id}") + print("deleted") + + +def build_parser(): + parser = argparse.ArgumentParser(description="Inventory CLI") + sub = parser.add_subparsers(dest="command", required=True) + + sub.add_parser("list", help="List servers").set_defaults(func=cmd_list) + + get_p = sub.add_parser("get", help="Get server") + get_p.add_argument("id", type=int) + get_p.set_defaults(func=cmd_get) + + create_p = sub.add_parser("create", help="Create server") + create_p.add_argument("hostname") + create_p.add_argument("ip") + create_p.add_argument("state", choices=["active", "offline", "retired"]) + create_p.set_defaults(func=cmd_create) + + update_p = sub.add_parser("update", help="Update server") + update_p.add_argument("id", type=int) + update_p.add_argument("hostname") + update_p.add_argument("ip") + update_p.add_argument("state", choices=["active", "offline", "retired"]) + update_p.set_defaults(func=cmd_update) + + delete_p = sub.add_parser("delete", help="Delete server") + delete_p.add_argument("id", type=int) + delete_p.set_defaults(func=cmd_delete) + + return parser + + +def main(): + parser = build_parser() + args = parser.parse_args() + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/deploy/aws/task-def.json b/deploy/aws/task-def.json new file mode 100644 index 0000000..d95601c --- /dev/null +++ b/deploy/aws/task-def.json @@ -0,0 +1,27 @@ +{ + "family": "inventory-api", + "networkMode": "awsvpc", + "requiresCompatibilities": ["FARGATE"], + "cpu": "256", + "memory": "512", + "executionRoleArn": "__EXECUTION_ROLE_ARN__", + "taskRoleArn": "__TASK_ROLE_ARN__", + "containerDefinitions": [ + { + "name": "api", + "image": "__IMAGE__", + "portMappings": [ + { + "containerPort": 8000, + "protocol": "tcp" + } + ], + "environment": [ + { + "name": "DATABASE_URL", + "value": "__DATABASE_URL__" + } + ] + } + ] +} diff --git a/deploy/aws/terraform/main.tf b/deploy/aws/terraform/main.tf new file mode 100644 index 0000000..3f78bda --- /dev/null +++ b/deploy/aws/terraform/main.tf @@ -0,0 +1,626 @@ +locals { + name_prefix = var.project_name + use_tls = var.acm_certificate_arn != "" +} + +data "aws_caller_identity" "current" {} + +data "aws_rds_engine_version" "postgres" { + engine = "postgres" +} + +resource "aws_vpc" "main" { + cidr_block = "10.0.0.0/16" + enable_dns_support = true + enable_dns_hostnames = true + + tags = { + Name = "${local.name_prefix}-vpc" + } +} + +resource "aws_cloudwatch_log_group" "vpc_flow" { + name = "/vpc/${local.name_prefix}/flow-logs" + retention_in_days = 30 + kms_key_id = aws_kms_key.main.arn +} + +resource "aws_iam_role" "vpc_flow" { + name = "${local.name_prefix}-vpc-flow-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "vpc-flow-logs.amazonaws.com" + } + } + ] + }) +} + +resource "aws_iam_role_policy" "vpc_flow" { + name = "${local.name_prefix}-vpc-flow-policy" + role = aws_iam_role.vpc_flow.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams" + ] + Effect = "Allow" + Resource = "${aws_cloudwatch_log_group.vpc_flow.arn}:*" + } + ] + }) +} + +resource "aws_flow_log" "vpc" { + log_destination = aws_cloudwatch_log_group.vpc_flow.arn + log_destination_type = "cloud-watch-logs" + traffic_type = "ALL" + vpc_id = aws_vpc.main.id + iam_role_arn = aws_iam_role.vpc_flow.arn +} + +resource "aws_internet_gateway" "main" { + vpc_id = aws_vpc.main.id + + tags = { + Name = "${local.name_prefix}-igw" + } +} + +#tfsec:ignore:aws-ec2-no-public-ip-subnet +resource "aws_subnet" "public_a" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.1.0/24" + availability_zone = "${var.aws_region}a" + map_public_ip_on_launch = true + + tags = { + Name = "${local.name_prefix}-public-a" + } +} + +#tfsec:ignore:aws-ec2-no-public-ip-subnet +resource "aws_subnet" "public_b" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.2.0/24" + availability_zone = "${var.aws_region}b" + map_public_ip_on_launch = true + + tags = { + Name = "${local.name_prefix}-public-b" + } +} + +resource "aws_subnet" "private_a" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.11.0/24" + availability_zone = "${var.aws_region}a" + map_public_ip_on_launch = false + + tags = { + Name = "${local.name_prefix}-private-a" + } +} + +resource "aws_subnet" "private_b" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.12.0/24" + availability_zone = "${var.aws_region}b" + map_public_ip_on_launch = false + + tags = { + Name = "${local.name_prefix}-private-b" + } +} + +resource "aws_route_table" "public" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.main.id + } + + tags = { + Name = "${local.name_prefix}-public-rt" + } +} + +resource "aws_route_table_association" "public_a" { + subnet_id = aws_subnet.public_a.id + route_table_id = aws_route_table.public.id +} + +resource "aws_route_table_association" "public_b" { + subnet_id = aws_subnet.public_b.id + route_table_id = aws_route_table.public.id +} + +resource "aws_eip" "nat" { + domain = "vpc" + + tags = { + Name = "${local.name_prefix}-nat-eip" + } +} + +resource "aws_nat_gateway" "main" { + allocation_id = aws_eip.nat.id + subnet_id = aws_subnet.public_a.id + + tags = { + Name = "${local.name_prefix}-nat" + } +} + +resource "aws_route_table" "private" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.main.id + } + + tags = { + Name = "${local.name_prefix}-private-rt" + } +} + +resource "aws_route_table_association" "private_a" { + subnet_id = aws_subnet.private_a.id + route_table_id = aws_route_table.private.id +} + +resource "aws_route_table_association" "private_b" { + subnet_id = aws_subnet.private_b.id + route_table_id = aws_route_table.private.id +} + +#tfsec:ignore:aws-ec2-no-public-ingress-sgr +resource "aws_security_group" "alb" { + name = "${local.name_prefix}-alb-sg" + description = "ALB access" + vpc_id = aws_vpc.main.id + + ingress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + description = "Allow HTTP from the internet" + } + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + description = "Allow HTTPS from the internet" + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = [aws_vpc.main.cidr_block] + description = "Allow egress to VPC targets" + } +} + +#tfsec:ignore:aws-ec2-no-public-egress-sgr +resource "aws_security_group" "ecs" { + name = "${local.name_prefix}-ecs-sg" + description = "ECS tasks access" + vpc_id = aws_vpc.main.id + + ingress { + from_port = var.container_port + to_port = var.container_port + protocol = "tcp" + security_groups = [aws_security_group.alb.id] + description = "Allow ALB to reach ECS tasks" + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + description = "Allow egress for AWS APIs and outbound dependencies" + } +} + +resource "aws_security_group" "db" { + name = "${local.name_prefix}-db-sg" + description = "Postgres access" + vpc_id = aws_vpc.main.id + + ingress { + from_port = 5432 + to_port = 5432 + protocol = "tcp" + security_groups = [aws_security_group.ecs.id] + description = "Allow Postgres from ECS tasks" + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = [aws_vpc.main.cidr_block] + description = "Allow egress within VPC" + } +} + +resource "aws_kms_key" "main" { + description = "${local.name_prefix} KMS key" + deletion_window_in_days = 7 + enable_key_rotation = true + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "EnableRootPermissions" + Effect = "Allow" + Principal = { + AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root" + } + Action = "kms:*" + Resource = "*" + }, + { + Sid = "AllowCloudWatchLogs" + Effect = "Allow" + Principal = { + Service = "logs.${var.aws_region}.amazonaws.com" + } + Action = [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ] + Resource = "*" + Condition = { + ArnLike = { + "kms:EncryptionContext:aws:logs:arn" = "arn:aws:logs:${var.aws_region}:${data.aws_caller_identity.current.account_id}:log-group:*" + } + } + } + , + { + Sid = "AllowSecretsManager" + Effect = "Allow" + Principal = { + Service = "secretsmanager.amazonaws.com" + } + Action = [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ] + Resource = "*" + Condition = { + ArnLike = { + "kms:EncryptionContext:aws:secretsmanager:arn" = "arn:aws:secretsmanager:${var.aws_region}:${data.aws_caller_identity.current.account_id}:secret:*" + } + } + }, + { + Sid = "AllowEcsExecutionRole" + Effect = "Allow" + Principal = { + AWS = aws_iam_role.execution.arn + } + Action = [ + "kms:Decrypt", + "kms:DescribeKey" + ] + Resource = "*" + } + ] + }) +} + +resource "aws_kms_alias" "main" { + name = "alias/${local.name_prefix}-key" + target_key_id = aws_kms_key.main.key_id +} + +resource "random_password" "db" { + length = 24 + special = false +} + +resource "random_string" "secret_suffix" { + length = 6 + upper = false + special = false +} + +resource "aws_secretsmanager_secret" "db_url" { + name = "${local.name_prefix}-database-url-${random_string.secret_suffix.result}" + kms_key_id = aws_kms_key.main.arn +} + +resource "aws_db_subnet_group" "main" { + name = "${local.name_prefix}-db-subnets" + subnet_ids = [aws_subnet.private_a.id, aws_subnet.private_b.id] +} + +resource "aws_db_instance" "main" { + identifier = "${local.name_prefix}-db" + engine = "postgres" + engine_version = var.db_engine_version != "" ? var.db_engine_version : data.aws_rds_engine_version.postgres.version + instance_class = "db.t3.micro" + allocated_storage = 20 + username = var.db_username + password = random_password.db.result + db_name = var.db_name + db_subnet_group_name = aws_db_subnet_group.main.name + vpc_security_group_ids = [aws_security_group.db.id] + publicly_accessible = false + storage_encrypted = true + kms_key_id = aws_kms_key.main.arn + backup_retention_period = 7 + deletion_protection = true + iam_database_authentication_enabled = true + performance_insights_enabled = true + performance_insights_kms_key_id = aws_kms_key.main.arn + skip_final_snapshot = true +} + +resource "aws_secretsmanager_secret_version" "db_url" { + secret_id = aws_secretsmanager_secret.db_url.id + secret_string = "postgresql://${var.db_username}:${random_password.db.result}@${aws_db_instance.main.address}:5432/${var.db_name}" +} + +resource "aws_ecr_repository" "main" { + name = "${local.name_prefix}-api" + image_tag_mutability = "IMMUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + encryption_configuration { + encryption_type = "KMS" + kms_key = aws_kms_key.main.arn + } +} + +resource "aws_ecs_cluster" "main" { + name = "${local.name_prefix}-cluster" + + setting { + name = "containerInsights" + value = "enabled" + } +} + +resource "aws_iam_role" "execution" { + name = "${local.name_prefix}-execution-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ecs-tasks.amazonaws.com" + } + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "execution" { + role = aws_iam_role.execution.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" +} + +resource "aws_iam_policy" "secrets_access" { + name = "${local.name_prefix}-secrets-access" + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["secretsmanager:GetSecretValue"] + Effect = "Allow" + Resource = aws_secretsmanager_secret.db_url.arn + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "secrets_access" { + role = aws_iam_role.execution.name + policy_arn = aws_iam_policy.secrets_access.arn +} + +resource "aws_iam_role" "task" { + name = "${local.name_prefix}-task-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ecs-tasks.amazonaws.com" + } + } + ] + }) +} + +resource "aws_cloudwatch_log_group" "main" { + name = "/ecs/${local.name_prefix}" + retention_in_days = 7 + kms_key_id = aws_kms_key.main.arn +} + +resource "aws_ecs_task_definition" "main" { + family = "${local.name_prefix}-task" + requires_compatibilities = ["FARGATE"] + network_mode = "awsvpc" + cpu = tostring(var.cpu) + memory = tostring(var.memory) + execution_role_arn = aws_iam_role.execution.arn + task_role_arn = aws_iam_role.task.arn + + container_definitions = jsonencode([ + { + name = "api" + image = "${aws_ecr_repository.main.repository_url}:${var.image_tag}" + portMappings = [ + { + containerPort = var.container_port + protocol = "tcp" + } + ] + secrets = [ + { + name = "DATABASE_URL" + valueFrom = aws_secretsmanager_secret.db_url.arn + } + ] + logConfiguration = { + logDriver = "awslogs" + options = { + awslogs-group = aws_cloudwatch_log_group.main.name + awslogs-region = var.aws_region + awslogs-stream-prefix = "ecs" + } + } + } + ]) +} + +#tfsec:ignore:aws-elb-alb-not-public +resource "aws_lb" "main" { + name = "${local.name_prefix}-alb" + load_balancer_type = "application" + subnets = [aws_subnet.public_a.id, aws_subnet.public_b.id] + security_groups = [aws_security_group.alb.id] + drop_invalid_header_fields = true +} + +resource "aws_lb_target_group" "main" { + name = "${local.name_prefix}-tg" + port = var.container_port + protocol = "HTTP" + vpc_id = aws_vpc.main.id + target_type = "ip" + + health_check { + path = "/servers" + interval = 10 + timeout = 5 + healthy_threshold = 2 + unhealthy_threshold = 2 + matcher = "200" + } + + deregistration_delay = 10 +} + +#tfsec:ignore:aws-elb-http-not-used +resource "aws_lb_listener" "http_forward" { + count = local.use_tls ? 0 : 1 + load_balancer_arn = aws_lb.main.arn + port = 80 + protocol = "HTTP" + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.main.arn + } +} + +resource "aws_lb_listener" "http_redirect" { + count = local.use_tls ? 1 : 0 + load_balancer_arn = aws_lb.main.arn + port = 80 + protocol = "HTTP" + + default_action { + type = "redirect" + + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +resource "aws_lb_listener" "https" { + count = local.use_tls ? 1 : 0 + load_balancer_arn = aws_lb.main.arn + port = 443 + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06" + certificate_arn = var.acm_certificate_arn + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.main.arn + } +} + +resource "aws_ecs_service" "main" { + name = "${local.name_prefix}-service" + cluster = aws_ecs_cluster.main.id + task_definition = aws_ecs_task_definition.main.arn + desired_count = var.desired_count + launch_type = "FARGATE" + health_check_grace_period_seconds = 30 + + deployment_circuit_breaker { + enable = true + rollback = true + } + + deployment_minimum_healthy_percent = 50 + deployment_maximum_percent = 200 + + network_configuration { + subnets = [aws_subnet.private_a.id, aws_subnet.private_b.id] + assign_public_ip = false + security_groups = [aws_security_group.ecs.id] + } + + load_balancer { + target_group_arn = aws_lb_target_group.main.arn + container_name = "api" + container_port = var.container_port + } +} + +resource "aws_route53_record" "api" { + count = var.route53_zone_id != "" && var.api_domain_name != "" ? 1 : 0 + zone_id = var.route53_zone_id + name = var.api_domain_name + type = "CNAME" + ttl = 300 + records = [aws_lb.main.dns_name] +} diff --git a/deploy/aws/terraform/outputs.tf b/deploy/aws/terraform/outputs.tf new file mode 100644 index 0000000..90dee62 --- /dev/null +++ b/deploy/aws/terraform/outputs.tf @@ -0,0 +1,19 @@ +output "alb_dns_name" { + value = aws_lb.main.dns_name +} + +output "api_url" { + value = var.api_domain_name != "" ? "${local.use_tls ? "https" : "http"}://${var.api_domain_name}" : "${local.use_tls ? "https" : "http"}://${aws_lb.main.dns_name}" +} + +output "ecr_repository_url" { + value = aws_ecr_repository.main.repository_url +} + +output "db_endpoint" { + value = aws_db_instance.main.address +} + +output "database_url_secret_arn" { + value = aws_secretsmanager_secret.db_url.arn +} diff --git a/deploy/aws/terraform/variables.tf b/deploy/aws/terraform/variables.tf new file mode 100644 index 0000000..cc63bbf --- /dev/null +++ b/deploy/aws/terraform/variables.tf @@ -0,0 +1,63 @@ +variable "aws_region" { + type = string + default = "us-east-1" +} + +variable "project_name" { + type = string + default = "inventory" +} + +variable "db_username" { + type = string +} + +variable "db_name" { + type = string + default = "inventory" +} + +variable "db_engine_version" { + type = string + default = "" +} + +variable "acm_certificate_arn" { + type = string + default = "" +} + +variable "api_domain_name" { + type = string + default = "" +} + +variable "route53_zone_id" { + type = string + default = "" +} + +variable "container_port" { + type = number + default = 8000 +} + +variable "desired_count" { + type = number + default = 1 +} + +variable "cpu" { + type = number + default = 256 +} + +variable "memory" { + type = number + default = 512 +} + +variable "image_tag" { + type = string + default = "latest" +} diff --git a/deploy/aws/terraform/versions.tf b/deploy/aws/terraform/versions.tf new file mode 100644 index 0000000..910a416 --- /dev/null +++ b/deploy/aws/terraform/versions.tf @@ -0,0 +1,17 @@ +terraform { + required_version = ">= 1.14.3" + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 6.28.0" + } + random = { + source = "hashicorp/random" + version = "~> 3.8.0" + } + } +} + +provider "aws" { + region = var.aws_region +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..ecdbea0 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,34 @@ +services: + db: + image: postgres:16 + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: inventory + ports: + - "5433:5432" + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 5s + timeout: 5s + retries: 5 + + api: + build: . + environment: + DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@db:5432/inventory + ports: + - "8000:8000" + depends_on: + db: + condition: service_healthy + + tests: + build: . + environment: + DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@db:5432/inventory + PYTHONPATH: /app + depends_on: + db: + condition: service_healthy + working_dir: /app + command: ["pytest", "-v"] diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..ade00ab --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,3 @@ +bandit==1.9.2 +pip-audit==2.10.0 +ruff==0.14.13 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..6372371 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +fastapi==0.128.0 +uvicorn[standard]==0.40.0 +psycopg2-binary==2.9.11 +requests==2.32.5 +python-multipart==0.0.21 +pytest==9.0.2 +httpx==0.28.1 diff --git a/scripts/aws_smoke_test.sh b/scripts/aws_smoke_test.sh new file mode 100755 index 0000000..5e5c538 --- /dev/null +++ b/scripts/aws_smoke_test.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +set -euo pipefail + +: "${API_URL:?Missing API_URL}" + +echo "Waiting for API readiness at $API_URL ..." +ready=0 +for _ in {1..30}; do + if curl -fsS "$API_URL/servers" >/dev/null; then + ready=1 + break + fi + sleep 10 +done + +if [[ "$ready" -ne 1 ]]; then + echo "API did not become ready in time. Check ALB target health and ECS task logs." + exit 1 +fi + +echo "OK: API responded to GET /servers" +curl -fsS "$API_URL/servers" >/dev/null + +suffix=$(date +%s) +payload="{\"hostname\":\"aws-smoke-$suffix\",\"ip_address\":\"10.0.9.9\",\"state\":\"active\"}" +resp_file=$(mktemp) +resp_err=$(mktemp) +trap 'rm -f "$resp_file" "$resp_err"' EXIT +status=$(curl -sS -o "$resp_file" -w "%{http_code}" -X POST "$API_URL/servers" \ + -H 'Content-Type: application/json' -d "$payload") + +if [[ "$status" != "201" ]]; then + echo "Create server failed with status $status" + cat "$resp_file" + exit 1 +fi +echo "OK: POST /servers returned 201" + +PYTHON_BIN=${PYTHON_BIN:-python3} +if ! command -v "$PYTHON_BIN" >/dev/null 2>&1; then + PYTHON_BIN=python +fi + +id=$("$PYTHON_BIN" - <<'PY' "$resp_file" 2>"$resp_err" || true +import json +import sys +from pathlib import Path + +path = Path(sys.argv[1]) +print(json.loads(path.read_text())["id"]) +PY +) + +if [[ -z "$id" ]]; then + echo "Create server response could not be parsed as JSON:" + cat "$resp_file" + if [[ -s "$resp_err" ]]; then + echo "Parser error:" + cat "$resp_err" + fi + exit 1 +fi +echo "OK: created id=$id" + +curl -fsS "$API_URL/servers/$id" >/dev/null +echo "OK: GET /servers/$id" +curl -fsS -X DELETE "$API_URL/servers/$id" >/dev/null +echo "OK: DELETE /servers/$id" + +echo "Smoke test passed" diff --git a/scripts/check_updates.sh b/scripts/check_updates.sh new file mode 100755 index 0000000..4d20e14 --- /dev/null +++ b/scripts/check_updates.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +set -euo pipefail + +python -m pip install -r requirements.txt -r requirements-dev.txt +python -m pip list --outdated diff --git a/scripts/deploy.sh b/scripts/deploy.sh new file mode 100755 index 0000000..d826c8f --- /dev/null +++ b/scripts/deploy.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash +set -euo pipefail + +TARGET=${DEPLOY_TARGET:-local} + +if [[ "$TARGET" == "local" ]]; then + docker compose up -d --build + echo "Local deployment is up at http://localhost:8000" + exit 0 +fi + +if [[ "$TARGET" != "aws" ]]; then + echo "Unknown DEPLOY_TARGET: $TARGET (use 'local' or 'aws')" >&2 + exit 1 +fi + +: "${AWS_REGION:?Missing AWS_REGION}" +: "${AWS_ACCOUNT_ID:?Missing AWS_ACCOUNT_ID}" +: "${ECR_REPO:?Missing ECR_REPO}" +: "${ECS_CLUSTER:?Missing ECS_CLUSTER}" +: "${ECS_SERVICE:?Missing ECS_SERVICE}" +: "${DATABASE_URL:?Missing DATABASE_URL}" +: "${EXECUTION_ROLE_ARN:?Missing EXECUTION_ROLE_ARN}" +: "${TASK_ROLE_ARN:?Missing TASK_ROLE_ARN}" + +IMAGE_TAG=${IMAGE_TAG:-$(git rev-parse --short HEAD 2>/dev/null || date +%s)} +IMAGE_URI="$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ECR_REPO:$IMAGE_TAG" +export IMAGE_URI + +aws ecr describe-repositories --repository-names "$ECR_REPO" >/dev/null 2>&1 \ + || aws ecr create-repository --repository-name "$ECR_REPO" >/dev/null + +aws ecr get-login-password --region "$AWS_REGION" \ + | docker login --username AWS --password-stdin \ + "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com" + +docker build -t "$IMAGE_URI" . +docker push "$IMAGE_URI" + +python - <<'PY' +import json +import os +from pathlib import Path + +path = Path("deploy/aws/task-def.json") +raw = path.read_text() +raw = raw.replace("__IMAGE__", os.environ["IMAGE_URI"]) +raw = raw.replace("__DATABASE_URL__", os.environ["DATABASE_URL"]) +raw = raw.replace("__EXECUTION_ROLE_ARN__", os.environ["EXECUTION_ROLE_ARN"]) +raw = raw.replace("__TASK_ROLE_ARN__", os.environ["TASK_ROLE_ARN"]) +print(raw) +PY > /tmp/task-def.json + +aws ecs register-task-definition --cli-input-json file:///tmp/task-def.json >/tmp/task-def-out.json + +REVISION=$(python - <<'PY' +import json +with open('/tmp/task-def-out.json') as f: + print(json.load(f)['taskDefinition']['revision']) +PY +) + +aws ecs update-service \ + --cluster "$ECS_CLUSTER" \ + --service "$ECS_SERVICE" \ + --task-definition "inventory-api:$REVISION" \ + --force-new-deployment + +echo "AWS deployment updated: $ECS_CLUSTER/$ECS_SERVICE" diff --git a/scripts/deploy_terraform_aws.sh b/scripts/deploy_terraform_aws.sh new file mode 100755 index 0000000..77d1f7a --- /dev/null +++ b/scripts/deploy_terraform_aws.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +set -euo pipefail + +: "${AWS_REGION:=us-east-1}" +: "${PROJECT_NAME:=inventory}" +: "${DB_USERNAME:=inventory}" +: "${ACM_CERT_ARN:=}" +: "${API_DOMAIN_NAME:=}" +: "${ROUTE53_ZONE_ID:=}" +: "${DB_ENGINE_VERSION:=}" +: "${DESIRED_COUNT:=1}" + +ROOT_DIR=$(cd "$(dirname "$0")/.." && pwd) +TF_DIR="$ROOT_DIR/deploy/aws/terraform" + +base_tag=$(git -C "$ROOT_DIR" rev-parse --short HEAD 2>/dev/null || true) +if [[ -z "$base_tag" ]]; then + base_tag=$(date +%s) +fi +IMAGE_TAG=${IMAGE_TAG:-${base_tag}-$(date +%s)} + +if [[ -z "$ACM_CERT_ARN" ]]; then + echo "Warning: ACM_CERT_ARN not set; ALB will run HTTP only." +fi + +pushd "$TF_DIR" >/dev/null +terraform init +ECR_URL=$(terraform output -raw ecr_repository_url 2>/dev/null || true) +popd >/dev/null + +CLUSTER_NAME="${PROJECT_NAME}-cluster" +SERVICE_NAME="${PROJECT_NAME}-service" +ECR_REPO="${PROJECT_NAME}-api" + +repo_exists=0 +if aws ecr describe-repositories --repository-names "$ECR_REPO" >/dev/null 2>&1; then + repo_exists=1 +fi + +service_status=$(aws ecs describe-services --cluster "$CLUSTER_NAME" --services "$SERVICE_NAME" \ + --query 'services[0].status' --output text 2>/dev/null || true) +service_exists=0 +if [[ "$service_status" == "ACTIVE" ]]; then + service_exists=1 +fi + +if [[ "$repo_exists" -eq 0 ]]; then + echo "ECR repo not found; provisioning infra first." + pushd "$TF_DIR" >/dev/null + terraform apply -auto-approve \ + -var="aws_region=$AWS_REGION" \ + -var="project_name=$PROJECT_NAME" \ + -var="db_username=$DB_USERNAME" \ + -var="db_engine_version=$DB_ENGINE_VERSION" \ + -var="acm_certificate_arn=$ACM_CERT_ARN" \ + -var="api_domain_name=$API_DOMAIN_NAME" \ + -var="route53_zone_id=$ROUTE53_ZONE_ID" \ + -var="image_tag=$IMAGE_TAG" \ + -var="desired_count=0" + ECR_URL=$(terraform output -raw ecr_repository_url) + popd >/dev/null +fi + +IMAGE_URI="$ECR_URL:$IMAGE_TAG" + +aws ecr get-login-password --region "$AWS_REGION" \ + | docker login --username AWS --password-stdin "${ECR_URL%/*}" + +docker buildx build --platform linux/amd64 -t "$IMAGE_URI" --push "$ROOT_DIR" + +pushd "$TF_DIR" >/dev/null +terraform apply -auto-approve \ + -var="aws_region=$AWS_REGION" \ + -var="project_name=$PROJECT_NAME" \ + -var="db_username=$DB_USERNAME" \ + -var="db_engine_version=$DB_ENGINE_VERSION" \ + -var="acm_certificate_arn=$ACM_CERT_ARN" \ + -var="api_domain_name=$API_DOMAIN_NAME" \ + -var="route53_zone_id=$ROUTE53_ZONE_ID" \ + -var="image_tag=$IMAGE_TAG" \ + -var="desired_count=$DESIRED_COUNT" +popd >/dev/null + +if [[ "$service_exists" -eq 1 ]]; then + echo "Waiting for ECS service to become stable..." + aws ecs wait services-stable --cluster "$CLUSTER_NAME" --services "$SERVICE_NAME" + tg_arn=$(aws elbv2 describe-target-groups --names "${PROJECT_NAME}-tg" \ + --query 'TargetGroups[0].TargetGroupArn' --output text 2>/dev/null || true) + if [[ -n "$tg_arn" ]]; then + echo "Waiting for target group to be healthy..." + aws elbv2 wait target-in-service --target-group-arn "$tg_arn" + fi +fi + +API_URL=$(terraform -chdir="$TF_DIR" output -raw api_url) +echo "Deployment complete. API URL: $API_URL" + +if [[ -n "$API_URL" ]]; then + API_URL="$API_URL" "$ROOT_DIR/scripts/aws_smoke_test.sh" +fi diff --git a/scripts/security_checks.sh b/scripts/security_checks.sh new file mode 100755 index 0000000..925ee92 --- /dev/null +++ b/scripts/security_checks.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -euo pipefail + +python -m pip install -r requirements-dev.txt + +python -m ruff check app cli tests +python -m bandit -r app cli +python -m pip_audit -r requirements.txt + +docker run --rm -i hadolint/hadolint < Dockerfile + +docker run --rm -v "$(pwd)":/src aquasec/trivy:latest \ + fs --exit-code 1 --severity HIGH,CRITICAL /src diff --git a/scripts/terraform_security_checks.sh b/scripts/terraform_security_checks.sh new file mode 100755 index 0000000..0ec5fc3 --- /dev/null +++ b/scripts/terraform_security_checks.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR=$(cd "$(dirname "$0")/.." && pwd) +TF_DIR="$ROOT_DIR/deploy/aws/terraform" + +pushd "$TF_DIR" >/dev/null +terraform fmt -check +terraform validate +popd >/dev/null + +# Static security scans (free/open-source) via containers + +docker run --rm -v "$TF_DIR":/src aquasec/tfsec:latest /src + +docker run --rm -v "$TF_DIR":/src bridgecrew/checkov:latest \ + --directory /src --quiet diff --git a/scripts/terraform_update_check.sh b/scripts/terraform_update_check.sh new file mode 100755 index 0000000..be57e48 --- /dev/null +++ b/scripts/terraform_update_check.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR=$(cd "$(dirname "$0")/.." && pwd) +TF_DIR="$ROOT_DIR/deploy/aws/terraform" + +pushd "$TF_DIR" >/dev/null +terraform init -upgrade +terraform providers lock -platform=linux_amd64 -platform=darwin_amd64 -platform=darwin_arm64 +terraform providers +popd >/dev/null diff --git a/terraform_1.14.3_darwin_arm64.zip b/terraform_1.14.3_darwin_arm64.zip new file mode 100644 index 0000000..dcb95be Binary files /dev/null and b/terraform_1.14.3_darwin_arm64.zip differ diff --git a/tests/test_api.py b/tests/test_api.py new file mode 100644 index 0000000..3f5c8a9 --- /dev/null +++ b/tests/test_api.py @@ -0,0 +1,112 @@ +import os + +import psycopg2 +import pytest +from fastapi.testclient import TestClient + +from app import db +from app.main import app + + +DATABASE_URL = os.getenv( + "DATABASE_URL", "postgresql://postgres:postgres@localhost:5432/inventory" +) + + +def _db_available() -> bool: + try: + conn = psycopg2.connect(DATABASE_URL) + conn.close() + return True + except Exception: + return False + + +@pytest.fixture(scope="session") +def client(): + if not _db_available(): + pytest.skip("PostgreSQL is not available for tests") + return TestClient(app) + + +@pytest.fixture(autouse=True) +def clean_db(): + if not _db_available(): + return + db.init_db() + conn = psycopg2.connect(DATABASE_URL) + try: + with conn.cursor() as cur: + cur.execute("TRUNCATE TABLE servers RESTART IDENTITY") + conn.commit() + finally: + conn.close() + + +def test_crud_flow(client): + payload = {"hostname": "srv-1", "ip_address": "10.0.0.1", "state": "active"} + resp = client.post("/servers", json=payload) + assert resp.status_code == 201 + data = resp.json() + assert data["id"] == 1 + + resp = client.get("/servers") + assert resp.status_code == 200 + assert len(resp.json()) == 1 + + resp = client.get("/servers/1") + assert resp.status_code == 200 + assert resp.json()["hostname"] == "srv-1" + + update = {"hostname": "srv-1b", "ip_address": "10.0.0.2", "state": "offline"} + resp = client.put("/servers/1", json=update) + assert resp.status_code == 200 + assert resp.json()["hostname"] == "srv-1b" + + resp = client.delete("/servers/1") + assert resp.status_code == 204 + + resp = client.get("/servers/1") + assert resp.status_code == 404 + + +def test_unique_hostname_validation(client): + payload = {"hostname": "srv-unique", "ip_address": "10.0.0.10", "state": "active"} + resp = client.post("/servers", json=payload) + assert resp.status_code == 201 + + resp = client.post( + "/servers", + json={"hostname": "srv-unique", "ip_address": "10.0.0.11", "state": "offline"}, + ) + assert resp.status_code == 400 + assert resp.json()["detail"] == "hostname must be unique" + + +def test_ip_validation(client): + resp = client.post( + "/servers", + json={"hostname": "srv-ip", "ip_address": "not-an-ip", "state": "active"}, + ) + assert resp.status_code == 422 + + +def test_state_validation(client): + resp = client.post( + "/servers", + json={"hostname": "srv-state", "ip_address": "10.0.0.12", "state": "broken"}, + ) + assert resp.status_code == 422 + + +def test_all_states_supported(client): + states = ["active", "offline", "retired"] + for idx, state in enumerate(states, start=1): + payload = { + "hostname": f"srv-state-{state}", + "ip_address": f"10.0.1.{idx}", + "state": state, + } + resp = client.post("/servers", json=payload) + assert resp.status_code == 201 + assert resp.json()["state"] == state