diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl
index acb84d2..6907acc 100644
--- a/.terraform.lock.hcl
+++ b/.terraform.lock.hcl
@@ -1,8 +1,28 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
+provider "registry.terraform.io/hashicorp/archive" {
+ version = "2.4.1"
+ hashes = [
+ "h1:3mCpFxc6HwDIETCFHNENlxBUgKdsW2S1EmVHARn9Lgk=",
+ "zh:00240c042740d18d6ba545b211ff7ed5a9e8490d30be3f865e71dba90d7a34cf",
+ "zh:230c285beafaffd8d60da3446157b95f8fb43b359ba94b09214c1822bf310c3d",
+ "zh:726672a0e61a1d39695ce5e330aa3e6caa97f2a9438cf8125360e80f4cb52fa5",
+ "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+ "zh:7bc8f4a4fc7059ec01e767246df7937603dbc6ec49cb3eedffe6ecb68dbe9cb4",
+ "zh:800d898ce8ac96b244746c5a41f4107bd3c883fe6093d9a972a28b138ac02c4e",
+ "zh:9a8ea216af3840af48c08ef5ed998606c556b15be30d7b42c89a62df54285903",
+ "zh:b9905d0ac55b61ea78ecf0e6b07d54a9863a9f02e249d0d492e68cfcede0d89f",
+ "zh:c822495ba01ab7cee66c892f941097971c3be122a6200d556f462a751d446df8",
+ "zh:e05c31f2f4dca9eaada2726d16d2ffb03d6441b4eb55547b93d62d81383cd0ef",
+ "zh:ec14c68ca5d881bac73dbbd298f0ca84444001a81d473f51e36c4e29df040983",
+ "zh:ed32ebccb20b21c112f01d73d138ba5ada28cf8ede175441738a30711c79119a",
+ ]
+}
+
provider "registry.terraform.io/hashicorp/aws" {
- version = "5.30.0"
+ version = "5.30.0"
+ constraints = ">= 5.24.0"
hashes = [
"h1:6SZLydYMDqhA4A+Fh0oZswJ+McOBf2q+XdSuMFbPzHI=",
"h1:6ZRzAlt5BT1wD7NlRWdKJT5l4DXzMtpHcgEi/xskozM=",
diff --git a/README.md b/README.md
index 59a86ca..cc4fc9a 100644
--- a/README.md
+++ b/README.md
@@ -10,13 +10,15 @@ This project creates and manages resources within an AWS account for infrastruct
| Name | Version |
|------|---------|
-| [terraform](#requirement\_terraform) | >= 1.6.3 |
-| [aws](#requirement\_aws) | >= 5.24.0 |
+| [terraform](#requirement\_terraform) | >= 1.6.5 |
+| [archive](#requirement\_archive) | >= 2.4.1 |
+| [aws](#requirement\_aws) | >= 5.30.0 |
## Providers
| Name | Version |
|------|---------|
+| [archive](#provider\_archive) | 2.4.1 |
| [aws](#provider\_aws) | 5.30.0 |
| [aws.awsroute53root](#provider\_aws.awsroute53root) | 5.30.0 |
@@ -25,18 +27,40 @@ This project creates and manages resources within an AWS account for infrastruct
| Name | Type |
|------|------|
| [aws_athena_workgroup.infrastructure_vpc_flow_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_workgroup) | resource |
+| [aws_autoscaling_group.infrastructure_ecs_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource |
+| [aws_autoscaling_lifecycle_hook.infrastructure_ecs_cluster_termination](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_lifecycle_hook) | resource |
+| [aws_cloudwatch_log_group.ecs_cluster_infrastructure_draining_lambda_log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
| [aws_cloudwatch_log_group.infrastructure_vpc_flow_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource |
| [aws_default_network_acl.infrastructure](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/default_network_acl) | resource |
+| [aws_ecs_cluster.infrastructure](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_cluster) | resource |
| [aws_eip.infrastructure_nat](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eip) | resource |
| [aws_flow_log.infrastructure_vpc_flow_logs_cloudwatch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/flow_log) | resource |
| [aws_flow_log.infrastructure_vpc_flow_logs_s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/flow_log) | resource |
| [aws_glue_catalog_database.infrastructure_vpc_flow_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/glue_catalog_database) | resource |
| [aws_glue_catalog_table.infrastructure_vpc_flow_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/glue_catalog_table) | resource |
+| [aws_iam_instance_profile.infrastructure_ecs_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_iam_policy.ecs_cluster_infrastructure_draining_ecs_container_instance_state_update_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.ecs_cluster_infrastructure_draining_kms_encrypt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.ecs_cluster_infrastructure_draining_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.ecs_cluster_infrastructure_draining_sns_publish_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.infrastructure_ecs_cluster_ec2_ecs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role.ecs_cluster_infrastructure_draining_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role.infrastructure_ecs_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role.infrastructure_ecs_cluster_autoscaling_lifecycle_termination](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role.infrastructure_vpc_flow_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy.infrastructure_ecs_cluster_autoscaling_lifecycle_termination](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource |
| [aws_iam_role_policy.infrastructure_vpc_flow_logs_allow_cloudwatch_rw](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource |
+| [aws_iam_role_policy_attachment.ecs_cluster_infrastructure_draining_ecs_container_instance_state_update_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.ecs_cluster_infrastructure_draining_kms_encrypt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.ecs_cluster_infrastructure_draining_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.ecs_cluster_infrastructure_draining_sns_publish_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.infrastructure_ecs_cluster_ec2_ecs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_internet_gateway.infrastructure_public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway) | resource |
| [aws_kms_alias.infrastructure](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_alias) | resource |
| [aws_kms_key.infrastructure](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
+| [aws_lambda_function.ecs_cluster_infrastructure_draining](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource |
+| [aws_lambda_permission.ecs_cluster_infrastructure_draining_allow_sns_execution](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource |
+| [aws_launch_template.infrastructure_ecs_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_nat_gateway.infrastructure](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/nat_gateway) | resource |
| [aws_network_acl.infrastructure_private](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/network_acl) | resource |
| [aws_network_acl.infrastructure_public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/network_acl) | resource |
@@ -50,6 +74,7 @@ This project creates and manages resources within an AWS account for infrastruct
| [aws_network_acl_rule.ingress_allow_all_public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/network_acl_rule) | resource |
| [aws_network_acl_rule.ingress_private](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/network_acl_rule) | resource |
| [aws_network_acl_rule.ingress_public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/network_acl_rule) | resource |
+| [aws_placement_group.infrastructure_ecs_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/placement_group) | resource |
| [aws_route.infrustructure_public_internet_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource |
| [aws_route.private_nat_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource |
| [aws_route53_record.infrastructure_ns](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource |
@@ -64,9 +89,20 @@ This project creates and manages resources within an AWS account for infrastruct
| [aws_s3_bucket_public_access_block.infrastructure_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_public_access_block) | resource |
| [aws_s3_bucket_server_side_encryption_configuration.infrastructure_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource |
| [aws_s3_bucket_versioning.infrastructure_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_versioning) | resource |
+| [aws_security_group.infrastructure_ecs_cluster_container_instances](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
+| [aws_security_group_rule.infrastructure_ecs_cluster_container_instances_egress_dns_tcp](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.infrastructure_ecs_cluster_container_instances_egress_dns_udp](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.infrastructure_ecs_cluster_container_instances_egress_https_tcp](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.infrastructure_ecs_cluster_container_instances_egress_https_udp](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.infrastructure_ecs_cluster_container_instances_ingress_tcp](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_security_group_rule.infrastructure_ecs_cluster_container_instances_ingress_udp](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
+| [aws_sns_topic.infrastructure_ecs_cluster_autoscaling_lifecycle_termination](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic) | resource |
+| [aws_sns_topic_subscription.ecs_cluster_infrastructure_draining_autoscaling_lifecycle_termination](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic_subscription) | resource |
| [aws_subnet.infrastructure_private](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource |
| [aws_subnet.infrastructure_public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource |
| [aws_vpc.infrastructure](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource |
+| [archive_file.ecs_cluster_infrastructure_draining_lambda](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source |
+| [aws_ami.ecs_cluster_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_route53_zone.root](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/route53_zone) | data source |
@@ -76,8 +112,22 @@ This project creates and manages resources within an AWS account for infrastruct
|------|-------------|------|---------|:--------:|
| [aws\_profile\_name\_route53\_root](#input\_aws\_profile\_name\_route53\_root) | AWS Profile name which is configured for the account in which the root Route53 Hosted Zone exists. | `string` | n/a | yes |
| [aws\_region](#input\_aws\_region) | AWS region in which to launch resources | `string` | n/a | yes |
+| [enable\_infrastructure\_ecs\_cluster](#input\_enable\_infrastructure\_ecs\_cluster) | Enable creation of infrastructure ECS cluster, to place ECS services | `bool` | n/a | yes |
| [enable\_infrastructure\_route53\_hosted\_zone](#input\_enable\_infrastructure\_route53\_hosted\_zone) | Creates a Route53 hosted zone, where DNS records will be created for resources launched within this module. | `bool` | n/a | yes |
| [environment](#input\_environment) | The environment name to be used as part of the resource prefix | `string` | n/a | yes |
+| [infrastructure\_dockerhub\_email](#input\_infrastructure\_dockerhub\_email) | Dockerhub email | `string` | n/a | yes |
+| [infrastructure\_dockerhub\_token](#input\_infrastructure\_dockerhub\_token) | Dockerhub token which has permissions to pull images | `string` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_ami\_version](#input\_infrastructure\_ecs\_cluster\_ami\_version) | AMI version for ECS cluster instances (amzn2-ami-ecs-hvm-) | `string` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_draining\_lambda\_enabled](#input\_infrastructure\_ecs\_cluster\_draining\_lambda\_enabled) | Enable the Lambda which ensures all containers have drained before terminating ECS cluster instances | `bool` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_draining\_lambda\_log\_retention](#input\_infrastructure\_ecs\_cluster\_draining\_lambda\_log\_retention) | Log retention for the ECS cluster draining Lambda | `number` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_ebs\_docker\_storage\_volume\_size](#input\_infrastructure\_ecs\_cluster\_ebs\_docker\_storage\_volume\_size) | Size of EBS volume for Docker storage on the infrastructure ECS instances | `number` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_ebs\_docker\_storage\_volume\_type](#input\_infrastructure\_ecs\_cluster\_ebs\_docker\_storage\_volume\_type) | Type of EBS volume for Docker storage on the infrastructure ECS instances (eg. gp3) | `string` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_instance\_type](#input\_infrastructure\_ecs\_cluster\_instance\_type) | The instance type for EC2 instances launched in the ECS cluster | `string` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_max\_instance\_lifetime](#input\_infrastructure\_ecs\_cluster\_max\_instance\_lifetime) | Maximum lifetime in seconds of an instance within the ECS cluster | `number` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_max\_size](#input\_infrastructure\_ecs\_cluster\_max\_size) | Maximum number of instances for the ECS cluster | `number` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_min\_size](#input\_infrastructure\_ecs\_cluster\_min\_size) | Minimum number of instances for the ECS cluster | `number` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_publicly\_avaialble](#input\_infrastructure\_ecs\_cluster\_publicly\_avaialble) | Conditionally launch the ECS cluster EC2 instances into the Public subnet | `bool` | n/a | yes |
+| [infrastructure\_ecs\_cluster\_termination\_timeout](#input\_infrastructure\_ecs\_cluster\_termination\_timeout) | The timeout for the terminiation lifecycle hook | `number` | n/a | yes |
| [infrastructure\_kms\_encryption](#input\_infrastructure\_kms\_encryption) | Enable infrastructure KMS encryption. This will create a single KMS key to be used across all resources that support KMS encryption. | `bool` | n/a | yes |
| [infrastructure\_logging\_bucket\_retention](#input\_infrastructure\_logging\_bucket\_retention) | Retention in days for the infrasrtucture S3 logs. This is for the default S3 logs bucket, where all AWS service logs will be delivered | `number` | n/a | yes |
| [infrastructure\_name](#input\_infrastructure\_name) | The infrastructure name to be used as part of the resource prefix | `string` | n/a | yes |
diff --git a/data.tf b/data.tf
index 028fab1..921742f 100644
--- a/data.tf
+++ b/data.tf
@@ -5,3 +5,24 @@ data "aws_route53_zone" "root" {
name = local.route53_root_hosted_zone_domain_name
}
+
+data "aws_ami" "ecs_cluster_ami" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ most_recent = true
+ owners = ["amazon"]
+
+ filter {
+ name = "name"
+ values = [
+ "amzn2-ami-ecs-hvm-${local.infrastructure_ecs_cluster_ami_version}"
+ ]
+ }
+
+ filter {
+ name = "architecture"
+ values = [
+ "x86_64"
+ ]
+ }
+}
diff --git a/ec2-userdata/ecs-instance.tpl b/ec2-userdata/ecs-instance.tpl
new file mode 100644
index 0000000..d87d551
--- /dev/null
+++ b/ec2-userdata/ecs-instance.tpl
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Mount docker storage volume
+sudo mkfs -t xfs ${docker_storage_volume_device_name}
+sudo mkdir -p /var/lib/docker
+sudo mount -o prjquota ${docker_storage_volume_device_name} /var/lib/docker
+
+# Configure ECS with Docker
+echo ECS_CLUSTER="${ecs_cluster_name}" >> /etc/ecs/ecs.config
+echo ECS_ENGINE_AUTH_TYPE=dockercfg >> /etc/ecs/ecs.config
+echo 'ECS_ENGINE_AUTH_DATA={"https://index.docker.io/v1/": { "auth": "${dockerhub_token}", "email": "${dockerhub_email}"}}' >> /etc/ecs/ecs.config
+# Set low task cleanup - reduces chance of docker thin pool running out of free space
+echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=15m" >> /etc/ecs/ecs.config
+
+# Configure Docker options
+sed -i s/OPTIONS/#OPTIONS/ /etc/sysconfig/docker
+echo 'OPTIONS="--default-ulimit nofile=1024:4096 --storage-opt overlay2.size=${docker_storage_size}G"' >> /etc/sysconfig/docker
+sudo service docker restart
+
+# Install useful packages
+sudo yum update -y
+
+if ! command -v aws &> /dev/null
+then
+ sudo yum install -y aws-cli
+fi
+
+sudo yum install -y \
+ jq \
+ rsync
diff --git a/ecs-cluster-infrastructure-draining-lambda.tf b/ecs-cluster-infrastructure-draining-lambda.tf
new file mode 100644
index 0000000..6d22d21
--- /dev/null
+++ b/ecs-cluster-infrastructure-draining-lambda.tf
@@ -0,0 +1,140 @@
+resource "aws_cloudwatch_log_group" "ecs_cluster_infrastructure_draining_lambda_log_group" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ name = "/aws/lambda/${local.project_name}-ecs-cluster-infrastructure-draining"
+ kms_key_id = local.infrastructure_kms_encryption ? aws_kms_key.infrastructure[0].arn : null
+ retention_in_days = local.infrastructure_ecs_cluster_draining_lambda_log_retention
+}
+
+resource "aws_iam_role" "ecs_cluster_infrastructure_draining_lambda" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ name = "${local.project_name}-ecs-cluster-infrastructure-draining-lambda"
+ assume_role_policy = templatefile(
+ "${path.root}/policies/assume-roles/service-principle-standard.json.tpl",
+ { services = jsonencode(["lambda.amazonaws.com"]) }
+ )
+}
+
+resource "aws_iam_policy" "ecs_cluster_infrastructure_draining_lambda" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ name = "${local.project_name}-ecs-cluster-infrastructure-draining-lambda"
+ policy = templatefile(
+ "${path.root}/policies/lambda-default.json.tpl",
+ {
+ region = local.aws_region
+ account_id = local.aws_account_id
+ function_name = "${local.project_name}-ecs-cluster-infrastructure-draining"
+ }
+ )
+}
+
+resource "aws_iam_role_policy_attachment" "ecs_cluster_infrastructure_draining_lambda" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ role = aws_iam_role.ecs_cluster_infrastructure_draining_lambda[0].name
+ policy_arn = aws_iam_policy.ecs_cluster_infrastructure_draining_lambda[0].arn
+}
+
+resource "aws_iam_policy" "ecs_cluster_infrastructure_draining_ecs_container_instance_state_update_lambda" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ name = "${local.project_name}-ecs-cluster-infrastructure-ecs-container-instance-state-update"
+ policy = templatefile(
+ "${path.root}/policies/ecs-container-instance-state-update.json.tpl", {}
+ )
+}
+
+resource "aws_iam_role_policy_attachment" "ecs_cluster_infrastructure_draining_ecs_container_instance_state_update_lambda" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ role = aws_iam_role.ecs_cluster_infrastructure_draining_lambda[0].name
+ policy_arn = aws_iam_policy.ecs_cluster_infrastructure_draining_ecs_container_instance_state_update_lambda[0].arn
+}
+
+resource "aws_iam_policy" "ecs_cluster_infrastructure_draining_sns_publish_lambda" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ name = "${local.project_name}-ecs-cluster-infrastructure-sns-publish"
+ policy = templatefile(
+ "${path.root}/policies/sns-publish.json.tpl",
+ { sns_topic_arn = aws_sns_topic.infrastructure_ecs_cluster_autoscaling_lifecycle_termination[0].arn }
+ )
+}
+
+resource "aws_iam_role_policy_attachment" "ecs_cluster_infrastructure_draining_sns_publish_lambda" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ role = aws_iam_role.ecs_cluster_infrastructure_draining_lambda[0].name
+ policy_arn = aws_iam_policy.ecs_cluster_infrastructure_draining_sns_publish_lambda[0].arn
+}
+
+resource "aws_iam_policy" "ecs_cluster_infrastructure_draining_kms_encrypt" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled && local.infrastructure_kms_encryption ? 1 : 0
+
+ name = "${local.project_name}-ecs-cluster-infrastructure-kms-encrypt"
+ policy = templatefile(
+ "${path.root}/policies/kms-encrypt.json.tpl",
+ { kms_key_arn = aws_kms_key.infrastructure[0].arn }
+ )
+}
+
+resource "aws_iam_role_policy_attachment" "ecs_cluster_infrastructure_draining_kms_encrypt" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled && local.infrastructure_kms_encryption ? 1 : 0
+
+ role = aws_iam_role.ecs_cluster_infrastructure_draining_lambda[0].name
+ policy_arn = aws_iam_policy.ecs_cluster_infrastructure_draining_kms_encrypt[0].arn
+}
+
+data "archive_file" "ecs_cluster_infrastructure_draining_lambda" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ type = "zip"
+ source_dir = "lambdas/ecs-ec2-draining"
+ output_path = "lambdas/.zip-cache/ecs-ec2-draining.zip"
+}
+
+resource "aws_lambda_function" "ecs_cluster_infrastructure_draining" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ filename = data.archive_file.ecs_cluster_infrastructure_draining_lambda[0].output_path
+ function_name = "${local.project_name}-ecs-cluster-infrastructure-draining"
+ description = "${local.project_name} ECS Cluster Infrastructure Draining"
+ handler = "function.lambda_handler"
+ runtime = "python3.11"
+ role = aws_iam_role.ecs_cluster_infrastructure_draining_lambda[0].arn
+ source_code_hash = data.archive_file.ecs_cluster_infrastructure_draining_lambda[0].output_base64sha256
+ memory_size = 128
+ package_type = "Zip"
+ timeout = 900
+
+ environment {
+ variables = {
+ ecsClusterName = local.infrastructure_ecs_cluster_name
+ awsRegion = local.aws_region
+ }
+ }
+
+ tracing_config {
+ mode = "Active"
+ }
+}
+
+resource "aws_lambda_permission" "ecs_cluster_infrastructure_draining_allow_sns_execution" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ statement_id = "AllowExecutionFromSNS"
+ action = "lambda:InvokeFunction"
+ function_name = aws_lambda_function.ecs_cluster_infrastructure_draining[0].function_name
+ principal = "sns.amazonaws.com"
+ source_arn = aws_sns_topic.infrastructure_ecs_cluster_autoscaling_lifecycle_termination[0].arn
+}
+
+resource "aws_sns_topic_subscription" "ecs_cluster_infrastructure_draining_autoscaling_lifecycle_termination" {
+ count = local.infrastructure_ecs_cluster_draining_lambda_enabled ? 1 : 0
+
+ topic_arn = aws_sns_topic.infrastructure_ecs_cluster_autoscaling_lifecycle_termination[0].arn
+ protocol = "lambda"
+ endpoint = aws_lambda_function.ecs_cluster_infrastructure_draining[0].arn
+}
diff --git a/ecs-cluster-infrastructure.tf b/ecs-cluster-infrastructure.tf
new file mode 100644
index 0000000..66ed04a
--- /dev/null
+++ b/ecs-cluster-infrastructure.tf
@@ -0,0 +1,297 @@
+resource "aws_ecs_cluster" "infrastructure" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = local.infrastructure_ecs_cluster_name
+
+ setting {
+ name = "containerInsights"
+ value = "enabled"
+ }
+}
+
+resource "aws_security_group" "infrastructure_ecs_cluster_container_instances" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-infrastructure-ecs-cluster-container-instances"
+ description = "Infrastructure ECS cluster container instances"
+ vpc_id = aws_vpc.infrastructure[0].id
+}
+
+resource "aws_security_group_rule" "infrastructure_ecs_cluster_container_instances_ingress_tcp" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ description = "Allow container port tcp ingress from public subnet (TO BE CHANGED TO ONLY ALLOW ALB)"
+ type = "ingress"
+ from_port = 32768
+ to_port = 65535
+ protocol = "tcp"
+ cidr_blocks = aws_subnet.infrastructure_public[*].cidr_block
+ security_group_id = aws_security_group.infrastructure_ecs_cluster_container_instances[0].id
+}
+
+resource "aws_security_group_rule" "infrastructure_ecs_cluster_container_instances_ingress_udp" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ description = "Allow container port udp ingress from public subnet (TO BE CHANGED TO ONLY ALLOW ALB)"
+ type = "ingress"
+ from_port = 32768
+ to_port = 65535
+ protocol = "udp"
+ cidr_blocks = aws_subnet.infrastructure_public[*].cidr_block
+ security_group_id = aws_security_group.infrastructure_ecs_cluster_container_instances[0].id
+}
+
+resource "aws_security_group_rule" "infrastructure_ecs_cluster_container_instances_egress_https_tcp" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ description = "Allow HTTPS tcp outbound (Required to pull images)"
+ type = "egress"
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = local.infrastructure_ecs_cluster_publicly_avaialble ? [
+ for subnet in aws_subnet.infrastructure_public : subnet.cidr_block
+ ] : [
+ for subnet in aws_subnet.infrastructure_private : subnet.cidr_block
+ ]
+ security_group_id = aws_security_group.infrastructure_ecs_cluster_container_instances[0].id
+}
+
+resource "aws_security_group_rule" "infrastructure_ecs_cluster_container_instances_egress_https_udp" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ description = "Allow HTTPS udp outbound (Required to pull images)"
+ type = "egress"
+ from_port = 443
+ to_port = 443
+ protocol = "udp"
+ cidr_blocks = local.infrastructure_ecs_cluster_publicly_avaialble ? [
+ for subnet in aws_subnet.infrastructure_public : subnet.cidr_block
+ ] : [
+ for subnet in aws_subnet.infrastructure_private : subnet.cidr_block
+ ]
+ security_group_id = aws_security_group.infrastructure_ecs_cluster_container_instances[0].id
+}
+
+resource "aws_security_group_rule" "infrastructure_ecs_cluster_container_instances_egress_dns_tcp" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ description = "Allow DNS tcp outbound (Required to pull images)"
+ type = "egress"
+ from_port = 53
+ to_port = 53
+ protocol = "tcp"
+ cidr_blocks = local.infrastructure_ecs_cluster_publicly_avaialble ? [
+ for subnet in aws_subnet.infrastructure_public : subnet.cidr_block
+ ] : [
+ for subnet in aws_subnet.infrastructure_private : subnet.cidr_block
+ ]
+ security_group_id = aws_security_group.infrastructure_ecs_cluster_container_instances[0].id
+}
+
+resource "aws_security_group_rule" "infrastructure_ecs_cluster_container_instances_egress_dns_udp" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ description = "Allow DNS udp outbound (Required to pull images)"
+ type = "egress"
+ from_port = 53
+ to_port = 53
+ protocol = "udp"
+ cidr_blocks = local.infrastructure_ecs_cluster_publicly_avaialble ? [
+ for subnet in aws_subnet.infrastructure_public : subnet.cidr_block
+ ] : [
+ for subnet in aws_subnet.infrastructure_private : subnet.cidr_block
+ ]
+ security_group_id = aws_security_group.infrastructure_ecs_cluster_container_instances[0].id
+}
+
+resource "aws_iam_role" "infrastructure_ecs_cluster" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-infrastructure-ecs-cluster"
+ assume_role_policy = templatefile(
+ "${path.root}/policies/assume-roles/service-principle-standard.json.tpl",
+ { services = jsonencode(["ecs.amazonaws.com", "ec2.amazonaws.com"]) }
+ )
+}
+
+resource "aws_iam_policy" "infrastructure_ecs_cluster_ec2_ecs" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.project_name}-ec2-ecs"
+ policy = templatefile("${path.root}/policies/ec2-ecs.json.tpl", {})
+}
+
+resource "aws_iam_role_policy_attachment" "infrastructure_ecs_cluster_ec2_ecs" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ role = aws_iam_role.infrastructure_ecs_cluster[0].name
+ policy_arn = aws_iam_policy.infrastructure_ecs_cluster_ec2_ecs[0].arn
+}
+
+resource "aws_iam_instance_profile" "infrastructure_ecs_cluster" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-infrastructure-ecs-cluster"
+ role = aws_iam_role.infrastructure_ecs_cluster[0].name
+}
+
+resource "aws_launch_template" "infrastructure_ecs_cluster" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-infrastructure-ecs-cluster"
+ description = "Infrastructure ECS Cluster (${local.resource_prefix})"
+
+ block_device_mappings {
+ # Root EBS volume
+ device_name = "/dev/xvda"
+
+ ebs {
+ volume_size = 40
+ encrypted = true
+ delete_on_termination = true
+ }
+ }
+
+ block_device_mappings {
+ # Docker Storage EBS volume
+ device_name = local.infrastructure_ecs_cluster_ebs_docker_storage_volume_device_name
+
+ ebs {
+ volume_size = local.infrastructure_ecs_cluster_ebs_docker_storage_volume_size
+ volume_type = local.infrastructure_ecs_cluster_ebs_docker_storage_volume_type
+ encrypted = true
+ delete_on_termination = true
+ }
+ }
+
+ capacity_reservation_specification {
+ capacity_reservation_preference = "open"
+ }
+
+ network_interfaces {
+ associate_public_ip_address = local.infrastructure_ecs_cluster_publicly_avaialble
+ security_groups = [aws_security_group.infrastructure_ecs_cluster_container_instances[0].id]
+ }
+
+ iam_instance_profile {
+ name = aws_iam_instance_profile.infrastructure_ecs_cluster[0].name
+ }
+
+ metadata_options {
+ http_endpoint = "enabled"
+ http_tokens = "required"
+ }
+
+ monitoring {
+ enabled = true
+ }
+
+ disable_api_termination = false
+ disable_api_stop = false
+ ebs_optimized = true
+ image_id = data.aws_ami.ecs_cluster_ami[0].id
+ instance_initiated_shutdown_behavior = "stop"
+ instance_type = local.infrastructure_ecs_cluster_instance_type
+
+ user_data = local.infrastructure_ecs_cluster_user_data
+}
+
+resource "aws_placement_group" "infrastructure_ecs_cluster" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-infrastructure-ecs-cluster"
+
+ strategy = "spread"
+ spread_level = "rack"
+}
+
+resource "aws_autoscaling_group" "infrastructure_ecs_cluster" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-infrastructure-ecs-cluster"
+
+ launch_template {
+ id = aws_launch_template.infrastructure_ecs_cluster[0].id
+ version = aws_launch_template.infrastructure_ecs_cluster[0].latest_version
+ }
+
+ vpc_zone_identifier = local.infrastructure_ecs_cluster_publicly_avaialble ? [
+ for subnet in aws_subnet.infrastructure_public : subnet.id
+ ] : [
+ for subnet in aws_subnet.infrastructure_private : subnet.id
+ ]
+ placement_group = aws_placement_group.infrastructure_ecs_cluster[0].id
+
+ min_size = local.infrastructure_ecs_cluster_min_size
+ max_size = local.infrastructure_ecs_cluster_max_size
+ desired_capacity = local.infrastructure_ecs_cluster_min_size
+ max_instance_lifetime = local.infrastructure_ecs_cluster_max_instance_lifetime
+
+ termination_policies = ["OldestLaunchConfiguration", "ClosestToNextInstanceHour", "Default"]
+
+ tag {
+ key = "Name"
+ value = "${local.resource_prefix}-infrastructure-ecs-cluster"
+ propagate_at_launch = true
+ }
+
+ dynamic "tag" {
+ for_each = local.default_tags
+
+ content {
+ key = tag.key
+ value = tag.value
+ propagate_at_launch = true
+ }
+ }
+
+ instance_refresh {
+ strategy = "Rolling"
+ preferences {
+ min_healthy_percentage = 100
+ }
+ triggers = ["tag"]
+ }
+
+ timeouts {
+ delete = "15m"
+ }
+}
+
+resource "aws_sns_topic" "infrastructure_ecs_cluster_autoscaling_lifecycle_termination" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-ecs-termination-hook"
+ kms_master_key_id = local.infrastructure_kms_encryption ? aws_kms_alias.infrastructure[0].name : null
+}
+
+resource "aws_iam_role" "infrastructure_ecs_cluster_autoscaling_lifecycle_termination" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-ecs-termination-hook"
+ assume_role_policy = templatefile(
+ "${path.root}/policies/assume-roles/service-principle-standard.json.tpl",
+ { services = jsonencode(["autoscaling.amazonaws.com"]) }
+ )
+}
+
+resource "aws_iam_role_policy" "infrastructure_ecs_cluster_autoscaling_lifecycle_termination" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-ecs-termination-hook"
+ role = aws_iam_role.infrastructure_ecs_cluster_autoscaling_lifecycle_termination[0].id
+ policy = templatefile("${path.root}/policies/sns-publish.json.tpl", { sns_topic_arn = aws_sns_topic.infrastructure_ecs_cluster_autoscaling_lifecycle_termination[0].arn })
+}
+
+resource "aws_autoscaling_lifecycle_hook" "infrastructure_ecs_cluster_termination" {
+ count = local.enable_infrastructure_ecs_cluster ? 1 : 0
+
+ name = "${local.resource_prefix}-infrastructure-ecs-cluster-termination"
+ autoscaling_group_name = aws_autoscaling_group.infrastructure_ecs_cluster[0].name
+ default_result = local.infrastructure_ecs_cluster_draining_lambda_enabled ? "ABANDON" : "CONTINUE"
+ heartbeat_timeout = local.infrastructure_ecs_cluster_termination_timeout
+ lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
+ role_arn = aws_iam_role.infrastructure_ecs_cluster_autoscaling_lifecycle_termination[0].arn
+ notification_target_arn = aws_sns_topic.infrastructure_ecs_cluster_autoscaling_lifecycle_termination[0].arn
+}
diff --git a/kms-infrastructure.tf b/kms-infrastructure.tf
index 5461369..34a9c5e 100644
--- a/kms-infrastructure.tf
+++ b/kms-infrastructure.tf
@@ -25,6 +25,12 @@ resource "aws_kms_key" "infrastructure" {
account_id = local.infrastructure_vpc_flow_logs_s3_with_athena && local.infrastructure_kms_encryption ? local.aws_account_id : ""
region = local.aws_region
}
+ )}${local.enable_infrastructure_ecs_cluster && local.infrastructure_kms_encryption ? "," : ""}
+ ${templatefile("${path.root}/policies/kms-key-policy-statements/service-sns-allow-encrypt.json.tpl",
+ {
+ sns_topic_arn = local.enable_infrastructure_ecs_cluster && local.infrastructure_kms_encryption ? "arn:aws:sns:${local.aws_region}:${local.aws_account_id}:${terraform.workspace}-infrastructure-ecs-cluster-autoscaling-lifecycle-termination" : ""
+ services = jsonencode(["autoscaling.amazonaws.com"])
+ }
)}
]
EOT
diff --git a/lambdas/ecs-ec2-draining/function.py b/lambdas/ecs-ec2-draining/function.py
new file mode 100644
index 0000000..37e24cd
--- /dev/null
+++ b/lambdas/ecs-ec2-draining/function.py
@@ -0,0 +1,71 @@
+import json
+import time
+import boto3
+import os
+
+CLUSTER = os.environ['ecsClusterName']
+REGION = os.environ['awsRegion']
+
+ECS = boto3.client('ecs', region_name=REGION)
+ASG = boto3.client('autoscaling', region_name=REGION)
+SNS = boto3.client('sns', region_name=REGION)
+
+def find_ecs_instance_info(instance_id):
+ paginator = ECS.get_paginator('list_container_instances')
+ for list_resp in paginator.paginate(cluster=CLUSTER):
+ arns = list_resp['containerInstanceArns']
+ desc_resp = ECS.describe_container_instances(cluster=CLUSTER,
+ containerInstances=arns)
+ for container_instance in desc_resp['containerInstances']:
+ if container_instance['ec2InstanceId'] != instance_id:
+ continue
+
+ print('Found instance: id=%s, arn=%s, status=%s, runningTasksCount=%s' %
+ (instance_id, container_instance['containerInstanceArn'],
+ container_instance['status'], container_instance['runningTasksCount']))
+
+ return (container_instance['containerInstanceArn'],
+ container_instance['status'], container_instance['runningTasksCount'])
+
+ return None, None, 0
+
+def instance_has_running_tasks(instance_id):
+ (instance_arn, container_status, running_tasks) = find_ecs_instance_info(instance_id)
+ if instance_arn is None:
+ print('Could not find instance ID %s. Letting autoscaling kill the instance.' %
+ (instance_id))
+ return False
+
+ if container_status != 'DRAINING':
+ print('Setting container instance %s (%s) to DRAINING' %
+ (instance_id, instance_arn))
+ ECS.update_container_instances_state(cluster=CLUSTER,
+ containerInstances=[instance_arn],
+ status='DRAINING')
+
+ return running_tasks > 0
+
+def lambda_handler(event, context):
+ msg = json.loads(event['Records'][0]['Sns']['Message'])
+
+ if 'LifecycleTransition' not in msg.keys() or \
+ msg['LifecycleTransition'].find('autoscaling:EC2_INSTANCE_TERMINATING') == -1:
+ print('Exiting since the lifecycle transition is not EC2_INSTANCE_TERMINATING.')
+ return
+
+ if instance_has_running_tasks(msg['EC2InstanceId']):
+ print('Tasks are still running on instance %s; posting msg to SNS topic %s' %
+ (msg['EC2InstanceId'], event['Records'][0]['Sns']['TopicArn']))
+ time.sleep(5)
+ sns_resp = SNS.publish(TopicArn=event['Records'][0]['Sns']['TopicArn'],
+ Message=json.dumps(msg),
+ Subject='Publishing SNS msg to invoke Lambda again.')
+ print('Posted msg %s to SNS topic.' % (sns_resp['MessageId']))
+ else:
+ print('No tasks are running on instance %s; setting lifecycle to complete' %
+ (msg['EC2InstanceId']))
+
+ ASG.complete_lifecycle_action(LifecycleHookName=msg['LifecycleHookName'],
+ AutoScalingGroupName=msg['AutoScalingGroupName'],
+ LifecycleActionResult='CONTINUE',
+ InstanceId=msg['EC2InstanceId'])
diff --git a/locals.tf b/locals.tf
index d8add67..ec052f4 100644
--- a/locals.tf
+++ b/locals.tf
@@ -89,6 +89,33 @@ locals {
hour = "string"
}
+ infrastructure_dockerhub_email = var.infrastructure_dockerhub_email
+ infrastructure_dockerhub_token = var.infrastructure_dockerhub_token
+
+ enable_infrastructure_ecs_cluster = var.enable_infrastructure_ecs_cluster && local.infrastructure_vpc
+ infrastructure_ecs_cluster_name = "${local.resource_prefix}-infrastructure"
+ infrastructure_ecs_cluster_ami_version = var.infrastructure_ecs_cluster_ami_version
+ infrastructure_ecs_cluster_ebs_docker_storage_volume_device_name = "/dev/xvdcz"
+ infrastructure_ecs_cluster_ebs_docker_storage_volume_size = var.infrastructure_ecs_cluster_ebs_docker_storage_volume_size
+ infrastructure_ecs_cluster_ebs_docker_storage_volume_type = var.infrastructure_ecs_cluster_ebs_docker_storage_volume_type
+ infrastructure_ecs_cluster_publicly_avaialble = var.infrastructure_ecs_cluster_publicly_avaialble && local.infrastructure_vpc_network_enable_public
+ infrastructure_ecs_cluster_instance_type = var.infrastructure_ecs_cluster_instance_type
+ infrastructure_ecs_cluster_termination_timeout = var.infrastructure_ecs_cluster_termination_timeout
+ infrastructure_ecs_cluster_draining_lambda_enabled = var.infrastructure_ecs_cluster_draining_lambda_enabled && local.enable_infrastructure_ecs_cluster
+ infrastructure_ecs_cluster_draining_lambda_log_retention = var.infrastructure_ecs_cluster_draining_lambda_log_retention
+ infrastructure_ecs_cluster_min_size = var.infrastructure_ecs_cluster_min_size
+ infrastructure_ecs_cluster_max_size = var.infrastructure_ecs_cluster_max_size
+ infrastructure_ecs_cluster_max_instance_lifetime = var.infrastructure_ecs_cluster_max_instance_lifetime
+ infrastructure_ecs_cluster_user_data = base64encode(
+ templatefile("ec2-userdata/ecs-instance.tpl", {
+ docker_storage_volume_device_name = local.infrastructure_ecs_cluster_ebs_docker_storage_volume_device_name,
+ ecs_cluster_name = local.infrastructure_ecs_cluster_name,
+ dockerhub_token = local.infrastructure_dockerhub_token,
+ dockerhub_email = local.infrastructure_dockerhub_email,
+ docker_storage_size = local.infrastructure_ecs_cluster_ebs_docker_storage_volume_size
+ })
+ )
+
default_tags = {
Project = local.project_name,
Infrastructure = local.infrastructure_name,
diff --git a/policies/ec2-ecs.json.tpl b/policies/ec2-ecs.json.tpl
new file mode 100644
index 0000000..caa3be4
--- /dev/null
+++ b/policies/ec2-ecs.json.tpl
@@ -0,0 +1,39 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeTags",
+ "ecs:CreateCluster",
+ "ecs:DeregisterContainerInstance",
+ "ecs:DiscoverPollEndpoint",
+ "ecs:Poll",
+ "ecs:RegisterContainerInstance",
+ "ecs:StartTelemetrySession",
+ "ecs:UpdateContainerInstancesState",
+ "ecs:Submit*",
+ "ecr:GetAuthorizationToken",
+ "ecr:BatchCheckLayerAvailability",
+ "ecr:GetDownloadUrlForLayer",
+ "ecr:BatchGetImage",
+ "logs:CreateLogStream",
+ "logs:PutLogEvents"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": "ecs:TagResource",
+ "Resource": "*",
+ "Condition": {
+ "StringEquals": {
+ "ecs:CreateAction": [
+ "CreateCluster",
+ "RegisterContainerInstance"
+ ]
+ }
+ }
+ }
+ ]
+}
diff --git a/policies/ecs-container-instance-state-update.json.tpl b/policies/ecs-container-instance-state-update.json.tpl
new file mode 100644
index 0000000..d82ab75
--- /dev/null
+++ b/policies/ecs-container-instance-state-update.json.tpl
@@ -0,0 +1,17 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "autoscaling:CompleteLifecycleAction",
+ "ecs:ListContainerInstances",
+ "ecs:DescribeContainerInstances",
+ "ecs:UpdateContainerInstancesState"
+ ],
+ "Resource": [
+ "*"
+ ]
+ }
+ ]
+}
diff --git a/policies/kms-encrypt.json.tpl b/policies/kms-encrypt.json.tpl
new file mode 100644
index 0000000..28ac785
--- /dev/null
+++ b/policies/kms-encrypt.json.tpl
@@ -0,0 +1,11 @@
+{
+ "Version": "2012-10-17",
+ "Statement": {
+ "Effect": "Allow",
+ "Action": [
+ "kms:GenerateDataKey",
+ "kms:Decrypt"
+ ],
+ "Resource": "${kms_key_arn}"
+ }
+}
diff --git a/policies/kms-key-policy-statements/service-sns-allow-encrypt.json.tpl b/policies/kms-key-policy-statements/service-sns-allow-encrypt.json.tpl
new file mode 100644
index 0000000..c89d2e1
--- /dev/null
+++ b/policies/kms-key-policy-statements/service-sns-allow-encrypt.json.tpl
@@ -0,0 +1,16 @@
+%{if sns_topic_arn != ""}{
+ "Effect": "Allow",
+ "Principal": {
+ "Service": ${services}
+ },
+ "Action": [
+ "kms:GenerateDataKey*",
+ "kms:Decrypt"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "StringEquals": {
+ "kms:EncryptionContext:aws:sns:topicArn": "${sns_topic_arn}"
+ }
+ }
+}%{endif}
diff --git a/policies/lambda-default.json.tpl b/policies/lambda-default.json.tpl
new file mode 100644
index 0000000..f3b72d5
--- /dev/null
+++ b/policies/lambda-default.json.tpl
@@ -0,0 +1,22 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "logs:CreateLogGroup"
+ ],
+ "Resource": "arn:aws:logs:${region}:${account_id}:*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "logs:CreateLogStream",
+ "logs:PutLogEvents"
+ ],
+ "Resource": [
+ "arn:aws:logs:${region}:${account_id}:log-group:/aws/lambda/${function_name}:*"
+ ]
+ }
+ ]
+}
diff --git a/policies/sns-publish.json.tpl b/policies/sns-publish.json.tpl
new file mode 100644
index 0000000..1da083d
--- /dev/null
+++ b/policies/sns-publish.json.tpl
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "sns:Publish"
+ ],
+ "Resource": "${sns_topic_arn}"
+ }
+ ]
+}
diff --git a/variables.tf b/variables.tf
index 3923192..cbdf658 100644
--- a/variables.tf
+++ b/variables.tf
@@ -18,6 +18,17 @@ variable "aws_region" {
type = string
}
+variable "infrastructure_dockerhub_email" {
+ description = "Dockerhub email"
+ type = string
+}
+
+variable "infrastructure_dockerhub_token" {
+ description = "Dockerhub token which has permissions to pull images"
+ type = string
+ sensitive = true
+}
+
variable "infrastructure_kms_encryption" {
description = "Enable infrastructure KMS encryption. This will create a single KMS key to be used across all resources that support KMS encryption."
type = bool
@@ -193,3 +204,63 @@ variable "enable_infrastructure_route53_hosted_zone" {
description = "Creates a Route53 hosted zone, where DNS records will be created for resources launched within this module."
type = bool
}
+
+variable "enable_infrastructure_ecs_cluster" {
+ description = "Enable creation of infrastructure ECS cluster, to place ECS services"
+ type = bool
+}
+
+variable "infrastructure_ecs_cluster_ami_version" {
+ description = "AMI version for ECS cluster instances (amzn2-ami-ecs-hvm-)"
+ type = string
+}
+
+variable "infrastructure_ecs_cluster_ebs_docker_storage_volume_size" {
+ description = "Size of EBS volume for Docker storage on the infrastructure ECS instances"
+ type = number
+}
+
+variable "infrastructure_ecs_cluster_ebs_docker_storage_volume_type" {
+ description = "Type of EBS volume for Docker storage on the infrastructure ECS instances (eg. gp3)"
+ type = string
+}
+
+variable "infrastructure_ecs_cluster_publicly_avaialble" {
+ description = "Conditionally launch the ECS cluster EC2 instances into the Public subnet"
+ type = bool
+}
+
+variable "infrastructure_ecs_cluster_instance_type" {
+ description = "The instance type for EC2 instances launched in the ECS cluster"
+ type = string
+}
+
+variable "infrastructure_ecs_cluster_termination_timeout" {
+ description = "The timeout for the terminiation lifecycle hook"
+ type = number
+}
+
+variable "infrastructure_ecs_cluster_draining_lambda_enabled" {
+ description = "Enable the Lambda which ensures all containers have drained before terminating ECS cluster instances"
+ type = bool
+}
+
+variable "infrastructure_ecs_cluster_draining_lambda_log_retention" {
+ description = "Log retention for the ECS cluster draining Lambda"
+ type = number
+}
+
+variable "infrastructure_ecs_cluster_min_size" {
+ description = "Minimum number of instances for the ECS cluster"
+ type = number
+}
+
+variable "infrastructure_ecs_cluster_max_size" {
+ description = "Maximum number of instances for the ECS cluster"
+ type = number
+}
+
+variable "infrastructure_ecs_cluster_max_instance_lifetime" {
+ description = "Maximum lifetime in seconds of an instance within the ECS cluster"
+ type = number
+}
diff --git a/versions.tf b/versions.tf
index 4589b65..de5eeef 100644
--- a/versions.tf
+++ b/versions.tf
@@ -1,9 +1,13 @@
terraform {
- required_version = ">= 1.6.3"
+ required_version = ">= 1.6.5"
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 5.24.0"
+ version = ">= 5.30.0"
+ }
+ archive = {
+ source = "hashicorp/archive"
+ version = ">= 2.4.1"
}
}
}