Skip to content

Commit

Permalink
RDS backups to S3
Browse files Browse the repository at this point in the history
* Conditonally create a scheduled Fargate task to backup RDS databases
  to S3
* Fargate was chosen, so that it doesn't rely on the ECS infrastructure
  to run - meaning we can have RDS backups running even if an ECS
  Infrastructure hasn't been launched
* A CodeBuild project is created, which clones, builds and pushes the
  dalmatian-sql-backup image to ECR, ready to be used for the backup
  scheduled task. The Dockerfile for this is stored at
  https://github.com/dxw/dalmatian-sql-backup. This CodeBuild project is
  triggered every 24 hours to keep up with any updates.
* The backup task will backup all databases (except system databases),
  and store the SQL dumps into an S3 bucket, which can be configured
  with the retention time in days.
  • Loading branch information
Stretch96 committed Aug 1, 2024
1 parent a454611 commit d3efe30
Show file tree
Hide file tree
Showing 22 changed files with 881 additions and 9 deletions.
53 changes: 53 additions & 0 deletions README.md

Large diffs are not rendered by default.

24 changes: 24 additions & 0 deletions buildspecs/dalmatian-sql-backup.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
version: 0.2

phases:
pre_build:
commands:
- echo "Build started on $(date)"
- echo "Logging in to Amazon ECR..."
- aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS --password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
- |
if [ -n "$DOCKERHUB_USERNAME" ] && [ -n "DOCKERHUB_TOKEN" ];
then
echo "Logging into Dockerhub ...";
docker login --username "$DOCKERHUB_USERNAME" --password "$DOCKERHUB_PASSWORD";
fi;
- echo Building dalmatian-sql-backup docker image ...
- docker build -t dalmatian-sql-backup:latest .
build:
commands:
- echo Adding ECR repo tag...
- docker tag dalmatian-sql-backup:latest "$REPOSITORY_URI:latest"
post_build:
commands:
- echo Pushing the Docker image...
- docker push "$REPOSITORY_URI:latest"
7 changes: 5 additions & 2 deletions container-definitions/app.json.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
"logConfiguration": {
"logDriver": "awslogs",
"options": {
%{ if awslogs_stream_prefix != "" }
"awslogs-stream-prefix": "${awslogs_stream_prefix}",
%{ endif }
"awslogs-group": "${cloudwatch_log_group}",
"awslogs-region": "${region}"
}
Expand All @@ -30,10 +33,10 @@
],
%{ endif }
%{ if environment != "[]" }
"environment": "${environment},
"environment": ${environment},
%{ endif }
%{ if secrets != "[]" }
"secrets": "${secrets},
"secrets": ${secrets},
%{ endif }
%{ if environment_file_s3 != "" }
"environmentFiles": [
Expand Down
7 changes: 4 additions & 3 deletions ecs-cluster-infrastructure-service-scheduled-task.tf
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,9 @@ resource "aws_ecs_task_definition" "infrastructure_ecs_cluster_service_scheduled
linux_parameters = jsonencode({
initProcessEnabled = false
})
cloudwatch_log_group = each.value["enable_cloudwatch_logs"] == true ? aws_cloudwatch_log_group.infrastructure_ecs_cluster_service[each.value["container_name"]].name : ""
region = local.aws_region
cloudwatch_log_group = each.value["enable_cloudwatch_logs"] == true ? aws_cloudwatch_log_group.infrastructure_ecs_cluster_service[each.value["container_name"]].name : ""
awslogs_stream_prefix = ""
region = local.aws_region
}
)
execution_role_arn = aws_iam_role.infrastructure_ecs_cluster_service_task_execution[each.value["container_name"]].arn
Expand Down Expand Up @@ -123,7 +124,7 @@ resource "aws_iam_policy" "infrastructure_ecs_cluster_service_scheduled_task_pas
"${path.root}/policies/pass-role.json.tpl",
{
role_arn = aws_iam_role.infrastructure_ecs_cluster_service_task_execution[each.value["container_name"]].arn
service = "events.amazonaws.com"
service = "ecs-tasks.amazonaws.com"
}
)
}
Expand Down
5 changes: 3 additions & 2 deletions ecs-cluster-infrastructure-service.tf
Original file line number Diff line number Diff line change
Expand Up @@ -241,8 +241,9 @@ resource "aws_ecs_task_definition" "infrastructure_ecs_cluster_service" {
linux_parameters = each.value["enable_execute_command"] == true ? jsonencode({
initProcessEnabled = true
}) : "{}"
cloudwatch_log_group = each.value["enable_cloudwatch_logs"] == true ? aws_cloudwatch_log_group.infrastructure_ecs_cluster_service[each.key].name : ""
region = local.aws_region
cloudwatch_log_group = each.value["enable_cloudwatch_logs"] == true ? aws_cloudwatch_log_group.infrastructure_ecs_cluster_service[each.key].name : ""
awslogs_stream_prefix = ""
region = local.aws_region
}
)
execution_role_arn = aws_iam_role.infrastructure_ecs_cluster_service_task_execution[each.key].arn
Expand Down
1 change: 1 addition & 0 deletions ecs-entrypoints/rds-s3-backups-mysql.txt.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/usr/local/bin/ump-to-s3-mysql.sh -b ${s3_bucket_name}
1 change: 1 addition & 0 deletions ecs-entrypoints/rds-s3-backups-postgres.txt.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/usr/local/bin/dump-to-s3-postgres.sh -b ${s3_bucket_name}
12 changes: 12 additions & 0 deletions kms-infrastructure.tf
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,18 @@ resource "aws_kms_key" "infrastructure" {
for k, v in local.infrastructure_ecs_cluster_services : aws_iam_role.infrastructure_ecs_cluster_service_task_execution[k].arn if v["enable_execute_command"] == true && local.infrastructure_ecs_cluster_enable_execute_command_logging
])
}
)}${length(local.infrastructure_rds) > 0 && local.infrastructure_kms_encryption && local.enable_infrastructure_rds_backup_to_s3 ? "," : ""}
${templatefile("${path.root}/policies/kms-key-policy-statements/role-allow-encrypt.json.tpl",
{
role_arns = jsonencode([
for k, v in local.infrastructure_rds : aws_iam_role.infrastructure_rds_s3_backups_task[k].arn if local.enable_infrastructure_rds_backup_to_s3
])
}
)}${length(local.infrastructure_rds) > 0 && local.infrastructure_kms_encryption && local.enable_infrastructure_rds_backup_to_s3 ? "," : ""}
${templatefile("${path.root}/policies/kms-key-policy-statements/cloudwatch-logs-allow.json.tpl",
{
log_group_arn = length(local.infrastructure_rds) > 0 && local.infrastructure_kms_encryption && local.enable_infrastructure_rds_backup_to_s3 ? "arn:aws:logs:${local.aws_region}:${local.aws_account_id}:log-group:${local.resource_prefix}-infrastructure-rds-s3-backups-*" : ""
}
)}${contains([for k, v in local.custom_s3_buckets : (v["cloudfront_dedicated_distribution"] == true || v["cloudfront_infrastructure_ecs_cluster_service"] != null) && v["create_dedicated_kms_key"] == false ? true : false], true) && local.infrastructure_kms_encryption ? "," : ""}
${templatefile("${path.root}/policies/kms-key-policy-statements/cloudfront-distribution-allow.json.tpl",
{
Expand Down
70 changes: 70 additions & 0 deletions local-exec-scripts/trigger-codedeploy-project.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
#!/bin/bash

# exit on failures
set -e
set -o pipefail

usage() {
echo "Usage: $(basename "$0") [OPTIONS]" 1>&2
echo " -h - help"
echo " -n - CodeBuild project name"
exit 1
}

while getopts "n:h" opt; do
case $opt in
n)
PROJECT_NAME=$OPTARG
;;
h)
usage
;;
*)
usage
;;
esac
done

if [ -z "$PROJECT_NAME" ]
then
usage
fi

BUILD_ID="$(
aws codebuild start-build \
--project-name "$PROJECT_NAME" \
| jq -r '.build.id'
)"

echo "Triggered $PROJECT_NAME CodeBuild project ($BUILD_ID) ..."

COMPLETED=""
while [ -z "$COMPLETED" ]
do
sleep 10
echo "Checking progress of CodeBuild $BUILD_ID ..."
BUILD_PROGRESS="$(
aws codebuild batch-get-builds \
--ids "$BUILD_ID" \
)"
COMPLETED="$(
echo "$BUILD_PROGRESS" \
| jq -r \
'.builds[0].phases[] | select(.phaseType == "COMPLETED")'
)"
done
echo "CodeBuild $BUILD_ID Completed, checking for failures ..."

FAILURES="$(
echo "$BUILD_PROGRESS" \
| jq -r \
'.builds[0].phases[] | select(.phaseStatus == "FAILED")'
)"

if [ -n "$FAILURES" ]
then
echo "$FAILURES"
exit 1
fi

echo "CodeBuild $BUILD_ID completed without failures"
17 changes: 15 additions & 2 deletions locals.tf
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,14 @@ locals {
length(local.custom_s3_buckets) != 0 ||
local.enable_cloudformatian_s3_template_store ||
local.enable_infrastructure_vpc_transfer_s3_bucket ||
local.infrastructure_ecs_cluster_enable_execute_command_logging
local.infrastructure_ecs_cluster_enable_execute_command_logging ||
local.enable_infrastructure_rds_backup_to_s3
)
logs_bucket_s3_source_arns = concat(
length(local.infrastructure_ecs_cluster_services) != 0 ? [aws_s3_bucket.infrastructure_ecs_cluster_service_build_pipeline_artifact_store[0].arn] : [],
local.enable_infrastructure_vpc_transfer_s3_bucket ? [aws_s3_bucket.infrastructure_vpc_transfer[0].arn] : [],
[for k, v in local.custom_s3_buckets : aws_s3_bucket.custom[k].arn]
[for k, v in local.custom_s3_buckets : aws_s3_bucket.custom[k].arn],
local.enable_infrastructure_rds_backup_to_s3 ? [aws_s3_bucket.infrastructure_rds_s3_backups[0].arn] : [],
)
logs_bucket_logs_source_arns = concat(
local.infrastructure_vpc_flow_logs_s3_with_athena ? ["arn:aws:logs:${local.aws_region}:${local.aws_account_id}:*"] : []
Expand Down Expand Up @@ -232,6 +234,17 @@ locals {
"mysql" = 3306
"postgres" = 5432
}
rds_s3_backups_container_entrypoint_file = {
"mysql" = "${path.root}/ecs-entrypoints/rds-s3-backups-mysql.txt.tpl"
"postgres" = "${path.root}/ecs-entrypoints/rds-s3-backups-postgres.txt.tpl"
}
enable_infrastructure_rds_backup_to_s3 = var.enable_infrastructure_rds_backup_to_s3
infrastructure_rds_backup_to_s3_cron_expression = var.infrastructure_rds_backup_to_s3_cron_expression
infrastructure_rds_backup_to_s3_retention = var.infrastructure_rds_backup_to_s3_retention
enable_infrastructure_rds_tooling_ecs_cluster = anytrue([
local.enable_infrastructure_rds_backup_to_s3,
])
infrastructure_rds_tooling_ecs_cluster_name = "${local.resource_prefix}-infrastructure-rds-tooling"

infrastructure_elasticache_defaults = var.infrastructure_elasticache_defaults
infrastructure_elasticache_keys = length(var.infrastructure_elasticache) > 0 ? keys(values(var.infrastructure_elasticache)[0]) : []
Expand Down
16 changes: 16 additions & 0 deletions policies/codebuild-allow-builds.json.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"codebuild:StartBuild",
"codebuild:StopBuild",
"codebuild:BatchGet*",
"codebuild:Get*",
"codebuild:List*"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
12 changes: 12 additions & 0 deletions policies/secrets-manager-get-secret-value.json.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"secretsmanager:GetSecretValue"
],
"Resource": ${secret_name_arns}
}
]
}
9 changes: 9 additions & 0 deletions rds-infrastructure-cloudwatch-logs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,12 @@ resource "aws_cloudwatch_log_group" "infrastructure_rds_exports" {
retention_in_days = 30
kms_key_id = local.infrastructure_kms_encryption ? aws_kms_key.infrastructure[0].id : null
}

resource "aws_cloudwatch_log_group" "infrastructure_rds_s3_backups" {
for_each = local.enable_infrastructure_rds_backup_to_s3 ? local.infrastructure_rds : {}

name = "${local.resource_prefix}-infrastructure-rds-s3-backups-${each.key}"
retention_in_days = local.infrastructure_rds_backup_to_s3_retention
kms_key_id = local.infrastructure_kms_encryption ? aws_kms_key.infrastructure[0].arn : null
skip_destroy = true
}
17 changes: 17 additions & 0 deletions rds-infrastructure-s3-backups-ecr.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
resource "aws_ecr_repository" "infrastructure_rds_s3_backups" {
count = local.enable_infrastructure_rds_backup_to_s3 ? 1 : 0

name = "${local.resource_prefix}-rds-s3-backups"

#tfsec:ignore:aws-ecr-enforce-immutable-repository
image_tag_mutability = "MUTABLE"

encryption_configuration {
encryption_type = local.infrastructure_kms_encryption ? "KMS" : "AES256"
kms_key = local.infrastructure_kms_encryption ? aws_kms_key.infrastructure[0].arn : null
}

image_scanning_configuration {
scan_on_push = true
}
}
Loading

0 comments on commit d3efe30

Please sign in to comment.