diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..eccaedb --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,57 @@ +on: + workflow_call: + inputs: + aws_region: + description: The AWS region target for deployment + required: true + type: string + aws_replication_region: + description: The AWS replication region target for deployment + required: true + type: string + aws_s3_terraform_state_object_key: + description: The key of the Terraform .tfstate file in AWS S3 + required: true + type: string + environment_name: + description: The name of the environment configured in Github repository settings + required: true + type: string + secrets: + aws_assume_role_arn: + description: The AWS IAM role assumed by Github Actions + aws_s3_terraform_state_bucket_name: + description: The AWS S3 bucket name containing Terraform backends, configured in Github repository settings + required: true + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + steps: + - name: Clone the Git repository + uses: actions/checkout@v3 + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4.0.1 + with: + role-to-assume: ${{ secrets.aws_assume_role_arn }} + aws-region: ${{ inputs.aws_region }} + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + - name: Terraform Format + run: terraform fmt -check + working-directory: ./terraform + - name: Terraform Init + run: | + terraform init \ + -backend-config="bucket=${{ secrets.aws_s3_terraform_state_bucket_name }}" \ + -backend-config="key=${{ inputs.aws_s3_terraform_state_object_key }}" \ + -backend-config="region=${{ inputs.aws_region }}" + working-directory: ./terraform + - name: Terraform Apply + run: | + terraform apply -auto-approve \ + -var="aws_region=${{ inputs.aws_region }}" \ + -var="aws_replication_region=${{ inputs.aws_replication_region }}" \ + -var="environment=${{ inputs.environment_name }}" + working-directory: ./terraform diff --git a/.github/workflows/development.yml b/.github/workflows/development.yml new file mode 100644 index 0000000..8717528 --- /dev/null +++ b/.github/workflows/development.yml @@ -0,0 +1,33 @@ +name: Deploy development + +on: + push: + branches: + - develop + - feature/* + +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout + +jobs: + deploy: + name: Deploy to development + uses: ./.github/workflows/deploy.yml + # Originally the workflow implementation was setup to use environment + # variables configured in the Github repository settings. However, + # after moving to a reusable action, it became ugly to pass those values + # into the called action due to this bug: + # + # https://github.com/orgs/community/discussions/26671#discussioncomment-4295807 + # + # So now we're hardcoding the values here and using it as a manifest. Please see + # commit 1ec7a0346abc04b73c03e35c0e228e9dba14300c for the previous implementation. + with: + aws_region: us-east-1 + aws_replication_region: us-west-2 + aws_s3_terraform_state_object_key: development.tfstate + environment_name: dev + secrets: + aws_assume_role_arn: ${{ secrets.AWS_ASSUME_ROLE_ARN }} + aws_s3_terraform_state_bucket_name: ${{ secrets.AWS_S3_TERRAFORM_STATE_BUCKET_NAME }} diff --git a/.github/workflows/production.yml b/.github/workflows/production.yml new file mode 100644 index 0000000..59e1a7d --- /dev/null +++ b/.github/workflows/production.yml @@ -0,0 +1,32 @@ +name: Deploy production + +on: + push: + branches: + - main + +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout + +jobs: + deploy: + name: Deploy to production + uses: ./.github/workflows/deploy.yml + # Originally the workflow implementation was setup to use environment + # variables configured in the Github repository settings. However, + # after moving to a reusable action, it became ugly to pass those values + # into the called action due to this bug: + # + # https://github.com/orgs/community/discussions/26671#discussioncomment-4295807 + # + # So now we're hardcoding the values here and using it as a manifest. Please see + # commit 1ec7a0346abc04b73c03e35c0e228e9dba14300c for the previous implementation. + with: + aws_region: us-east-1 + aws_replication_region: us-west-2 + aws_s3_terraform_state_object_key: production.tfstate + environment_name: prod + secrets: + aws_assume_role_arn: ${{ secrets.AWS_ASSUME_ROLE_ARN }} + aws_s3_terraform_state_bucket_name: ${{ secrets.AWS_S3_TERRAFORM_STATE_BUCKET_NAME }} diff --git a/.terraform-version b/.terraform-version new file mode 100644 index 0000000..6463e95 --- /dev/null +++ b/.terraform-version @@ -0,0 +1 @@ +1.6.4 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..42542b3 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,10 @@ +# Unreleased + +# 0.1.0 + +* CloudWatch log group encryption using KMS CMK +* Build improvements to leverage callable deployment workflow and explicity manifests for development and production environments +* ECS cluster, service, task and ALB running vanilla nginx image +* KMS customer managed key provisioning with multi-region replication +* VPC network provisioning +* Github Actions Terraform workflow definition and integration diff --git a/terraform/data.tf b/terraform/data.tf new file mode 100644 index 0000000..8fc4b38 --- /dev/null +++ b/terraform/data.tf @@ -0,0 +1 @@ +data "aws_caller_identity" "current" {} diff --git a/terraform/ec2.tf b/terraform/ec2.tf new file mode 100644 index 0000000..5e31cd6 --- /dev/null +++ b/terraform/ec2.tf @@ -0,0 +1,65 @@ +# Security group for the ALB accepting HTTP connections on port 80 +# +# TODO: implement SSL encrypted traffic and redirect HTTP to HTTPS +resource "aws_security_group" "alb" { + name = "${local.namespace}-alb" + vpc_id = aws_vpc.vpc.id +} + +resource "aws_security_group_rule" "alb_ingress_http" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow public HTTP traffic" + from_port = 80 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.alb.id + to_port = 80 + type = "ingress" +} + +resource "aws_security_group_rule" "alb_egress_all" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow all outbound traffic" + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = -1 + security_group_id = aws_security_group.alb.id + to_port = 0 + type = "egress" +} + +resource "aws_lb_target_group" "alb" { + name = local.namespace + port = 80 + protocol = "HTTP" + target_type = "ip" + + health_check { + # TODO: review health check + enabled = true + path = "/" + port = 80 + protocol = "HTTP" + } + + vpc_id = aws_vpc.vpc.id +} + +resource "aws_lb" "alb" { + name = local.namespace + internal = false + load_balancer_type = "application" + security_groups = [aws_security_group.alb.id] + subnets = local.public_subnet_ids +} + +resource "aws_lb_listener" "alb" { + load_balancer_arn = aws_lb.alb.id + port = 80 + protocol = "HTTP" + + default_action { + target_group_arn = aws_lb_target_group.alb.id + type = "forward" + } +} diff --git a/terraform/ecs.tf b/terraform/ecs.tf new file mode 100644 index 0000000..2cb79f2 --- /dev/null +++ b/terraform/ecs.tf @@ -0,0 +1,128 @@ +# Encrypt log data with KMS CMK: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html +resource "aws_cloudwatch_log_group" "hello_world" { + kms_key_id = aws_kms_key.primary.arn + name = "/ecs/${local.namespace}/hello-world" + retention_in_days = 30 +} + +resource "aws_ecs_cluster" "cluster" { + name = local.namespace +} + +# Task execution assumed role +data "aws_iam_policy_document" "ecs_task_assume_role" { + statement { + actions = ["sts:AssumeRole"] + effect = "Allow" + principals { + identifiers = ["ecs-tasks.amazonaws.com"] + type = "Service" + } + } +} + +resource "aws_iam_role" "ecs_task_execution" { + name = "${local.namespace}_ecs_task_execution" + assume_role_policy = data.aws_iam_policy_document.ecs_task_assume_role.json +} + +# Use the AWS-provided managed role for basic logging and ECR repository permissions +resource "aws_iam_role_policy_attachment" "legacy_listener_aws_task_execution_role_policy" { + role = aws_iam_role.ecs_task_execution.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" +} + +# Task definition featuring +# * CloudWatch logs integration +resource "aws_ecs_task_definition" "hello_world" { + container_definitions = jsonencode([ + { + # TODO: parameterize cpu, or remove this value because it is not required + # for Fargate containers when assigned at the task level and we only have one task + cpu = 256 + # TODO: specify image tag and eventually parameterize + image = "nginx" + logConfiguration = { + logDriver = "awslogs" + options = { + "awslogs-group" : aws_cloudwatch_log_group.hello_world.name + "awslogs-region" : var.aws_region + "awslogs-stream-prefix" : local.namespace + } + }, + # TODO: parameterize memory, or remove this value because it is not required + # for Fargate containers when assigned at the task level and we only have one task + memory = 512 + name = "hello-world" + networkMode = "FARGATE" + portMappings = [ + { + hostPort = 80, + containerPort = 80, + protocol = "tcp" + } + ] + } + ]) + + # TODO: parameterize cpu + cpu = 256 + execution_role_arn = aws_iam_role.ecs_task_execution.arn + family = "${local.namespace}-hello-world" + # TODO: parameterize memory + memory = 512 + network_mode = "awsvpc" + requires_compatibilities = ["FARGATE"] +} + +# Security group for the hello-world ECS service accepts HTTP +# connections from the ALB security group +resource "aws_security_group" "app" { + name = "${local.namespace}-app" + vpc_id = aws_vpc.vpc.id +} + +resource "aws_security_group_rule" "app_ingress_http" { + description = "Allow HTTP from ALB" + from_port = 80 + protocol = "tcp" + security_group_id = aws_security_group.app.id + source_security_group_id = aws_security_group.alb.id + to_port = 80 + type = "ingress" +} + +resource "aws_security_group_rule" "app_egress_all" { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow all outbound traffic" + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = -1 + security_group_id = aws_security_group.app.id + to_port = 0 + type = "egress" +} + +# Hello World ECS service +resource "aws_ecs_service" "hello_world" { + name = "${local.namespace}-hello-world" + + cluster = aws_ecs_cluster.cluster.id + desired_count = 1 + launch_type = "FARGATE" + + # TODO: consider service encrypted internal traffic between + # ALB and ECS container on 443 - requires self-signed cert + load_balancer { + target_group_arn = aws_lb_target_group.alb.arn + container_name = "hello-world" + container_port = 80 + } + + network_configuration { + security_groups = [aws_security_group.app.id] + subnets = local.private_subnet_ids + } + + task_definition = aws_ecs_task_definition.hello_world.arn +} diff --git a/terraform/kms.tf b/terraform/kms.tf new file mode 100644 index 0000000..4ce4f6f --- /dev/null +++ b/terraform/kms.tf @@ -0,0 +1,69 @@ +data "aws_iam_policy_document" "kms_primary_default" { + policy_id = "key-default-1" + + # This statement is a copy of the default statement, + # so as to not lose access to the key. + statement { + actions = ["kms:*"] + effect = "Allow" + principals { + identifiers = ["arn:aws:iam::${local.account_id}:root"] + type = "AWS" + } + resources = ["*"] + sid = "Enable IAM User Permissions" + } + + # Grant access to encrypt the hello-world CloudWatch log group + # https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html + statement { + actions = [ + "kms:Encrypt*", + "kms:Decrypt*", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:Describe*" + ] + effect = "Allow" + principals { + identifiers = ["logs.${var.aws_region}.amazonaws.com"] + type = "Service" + } + resources = ["*"] + condition { + test = "ArnEquals" + # Interpolate this (for now) due to dependency cycle when referencing + values = ["arn:aws:logs:${var.aws_region}:${local.account_id}:log-group:/ecs/${local.namespace}/hello-world"] + variable = "kms:EncryptionContext:aws:logs:arn" + } + } +} + +resource "aws_kms_key" "primary" { + description = "A custom, multi-region encryption key for securing data globally" + enable_key_rotation = true + multi_region = true + policy = data.aws_iam_policy_document.kms_primary_default.json +} + +resource "aws_kms_alias" "primary" { + name = "alias/${local.namespace}" + target_key_id = aws_kms_key.primary.key_id +} + +# Replicated KMS key +# +# TODO: support taking a list of replicated regions? +resource "aws_kms_replica_key" "replicated" { + provider = aws.replicated + + description = "Multi-region replica key" + primary_key_arn = aws_kms_key.primary.arn +} + +resource "aws_kms_alias" "replicated" { + provider = aws.replicated + + name = "alias/${local.namespace}" + target_key_id = aws_kms_replica_key.replicated.key_id +} diff --git a/terraform/locals.tf b/terraform/locals.tf new file mode 100644 index 0000000..4f8bcba --- /dev/null +++ b/terraform/locals.tf @@ -0,0 +1,11 @@ +locals { + account_id = data.aws_caller_identity.current.account_id + alb_ips = [for v in aws_lb.alb.subnet_mapping : v.private_ipv4_address] + default_tags = { + Application = "aws-swan-demo" + Environment = var.environment + } + namespace = "aws-swan-demo-${var.environment}" + private_subnet_ids = [aws_subnet.private_1.id, aws_subnet.private_2.id] + public_subnet_ids = [aws_subnet.public_1.id, aws_subnet.public_2.id] +} diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000..ef6f8fe --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,30 @@ +terraform { + backend "s3" {} + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + + required_version = ">= 1.6.4" +} + +provider "aws" { + default_tags { + tags = local.default_tags + } + + region = var.aws_region +} + +provider "aws" { + alias = "replicated" + + default_tags { + tags = local.default_tags + } + + region = var.aws_replication_region +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000..823bd96 --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,22 @@ +variable "aws_region" { + default = "us-east-1" + description = "The AWS region name in which the main infrastructure should be deployed." + type = string +} + +variable "aws_replication_region" { + default = "us-west-2" + description = "The AWS replication region where resources are provisioned for failover." + type = string +} + +variable "vpc_cidr_index" { + default = 0 + description = "The number of the second CIDR IP address segment to act as an index for multiple environment support. The default CIDR range is 10.0.0.0/16, so setting this to 1 would initialize the VPC to a CIDR range of 10.1.0.0/16. This is a negotiated stopgap solution to allow for the provisioning of multiple instances of the application in one region and avoid CIDR collisions." + type = number +} + +variable "environment" { + description = "Name of the provisioned environment for namespacing purposes." + type = string +} diff --git a/terraform/vpc.tf b/terraform/vpc.tf new file mode 100644 index 0000000..a61aaa0 --- /dev/null +++ b/terraform/vpc.tf @@ -0,0 +1,126 @@ +resource "aws_vpc" "vpc" { + cidr_block = "10.${var.vpc_cidr_index}.0.0/16" + enable_dns_support = true + enable_dns_hostnames = true + + tags = { + Name = local.namespace + } +} + +# Public subnets +resource "aws_subnet" "public_1" { + availability_zone = "${var.aws_region}a" + cidr_block = "10.${var.vpc_cidr_index}.1.0/24" + map_public_ip_on_launch = true + vpc_id = aws_vpc.vpc.id + + tags = { + Name = "Public Subnet 1" + } +} + +resource "aws_subnet" "public_2" { + availability_zone = "${var.aws_region}b" + cidr_block = "10.${var.vpc_cidr_index}.2.0/24" + map_public_ip_on_launch = true + vpc_id = aws_vpc.vpc.id + + tags = { + Name = "Public Subnet 2" + } +} + +# Private subnets +resource "aws_subnet" "private_1" { + availability_zone = "${var.aws_region}a" + cidr_block = "10.${var.vpc_cidr_index}.3.0/24" + vpc_id = aws_vpc.vpc.id + + tags = { + Name = "Private Subnet 1" + } +} + +resource "aws_subnet" "private_2" { + availability_zone = "${var.aws_region}b" + cidr_block = "10.${var.vpc_cidr_index}.4.0/24" + vpc_id = aws_vpc.vpc.id + + tags = { + Name = "Private Subnet 2" + } +} + +# Create an internet gateway for public access +resource "aws_internet_gateway" "gateway" { + vpc_id = aws_vpc.vpc.id + + tags = { + Name = local.namespace + } +} + +# Route requests for public traffic through the internet gateway +resource "aws_route_table" "public" { + vpc_id = aws_vpc.vpc.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gateway.id + } + + tags = { + Name = "${local.namespace}-public" + } +} + +# Assign the public subnets to the public route table +resource "aws_route_table_association" "public_subnet_1" { + subnet_id = aws_subnet.public_1.id + route_table_id = aws_route_table.public.id +} + +resource "aws_route_table_association" "public_subnet_2" { + subnet_id = aws_subnet.public_2.id + route_table_id = aws_route_table.public.id +} + +# Create a NAT gateway to allow private subnets access to external requests +resource "aws_eip" "eip" { + domain = "vpc" +} + +resource "aws_nat_gateway" "gateway" { + allocation_id = aws_eip.eip.id + subnet_id = aws_subnet.public_1.id + + tags = { + Name = local.namespace + } +} + +# Route table for requests for public traffic through the NAT Gateway +resource "aws_route_table" "private" { + vpc_id = aws_vpc.vpc.id + + route { + cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.gateway.id + } + + tags = { + Name = "${local.namespace}-private" + } +} + +# Assign the private subnets to the private route table +resource "aws_route_table_association" "private_subnet_1" { + subnet_id = aws_subnet.private_1.id + route_table_id = aws_route_table.private.id +} + +resource "aws_route_table_association" "private_subnet_2" { + subnet_id = aws_subnet.private_2.id + route_table_id = aws_route_table.private.id +}