diff --git a/infrastructure/api.tf b/infrastructure/api.tf index a7dddb1c..3399cc92 100644 --- a/infrastructure/api.tf +++ b/infrastructure/api.tf @@ -50,9 +50,7 @@ resource "aws_instance" "api_server_1" { database_host = aws_db_instance.postgres_db.address database_port = aws_db_instance.postgres_db.port database_user = aws_db_instance.postgres_db.username - database_name = aws_db_instance.postgres_db.name - # TODO: enable after upgrade - # database_name = aws_db_instance.postgres_db.db_name + database_name = aws_db_instance.postgres_db.db_name database_password = var.database_password # TODO: enable batch # aws_batch_job_queue_name = module.batch.job_queue_name diff --git a/infrastructure/database.tf b/infrastructure/database.tf index c1415097..8168b162 100644 --- a/infrastructure/database.tf +++ b/infrastructure/database.tf @@ -35,9 +35,7 @@ resource "aws_db_instance" "postgres_db" { engine_version = "12.19" auto_minor_version_upgrade = false instance_class = var.database_instance_type - name = "scpca_portal" - # TODO: replace db_name with name after upgrade - # db_name = "scpca_portal" + db_name = "scpca_portal" port = "5432" username = "scpcapostgresuser" password = var.database_password diff --git a/infrastructure/deploy.py b/infrastructure/deploy.py index 69189a7e..c913bdf7 100644 --- a/infrastructure/deploy.py +++ b/infrastructure/deploy.py @@ -8,6 +8,7 @@ import time from init_terraform import init_terraform +from replace_provider import replace_provider PRIVATE_KEY_FILE_PATH = "scpca-portal-key.pem" PUBLIC_KEY_FILE_PATH = "scpca-portal-key.pub" @@ -240,6 +241,11 @@ def restart_api_if_still_running(args, api_ip_address): if init_code != 0: exit(init_code) + replace_provider_code = replace_provider("hashicorp", "aws") + + if replace_provider_code != 0: + exit(replace_provider_code) + terraform_code, terraform_output = run_terraform(args) if terraform_code != 0: exit(terraform_code) diff --git a/infrastructure/provider.tf b/infrastructure/provider.tf index f6f56402..b6d037b9 100644 --- a/infrastructure/provider.tf +++ b/infrastructure/provider.tf @@ -1,8 +1,9 @@ terraform { required_providers { aws = { - source = "-/aws" + source = "hashicorp/aws" version = ">= 4.9.0, < 5.0.0" +# version = "~> 5.0.0" } } required_version = "0.13.0" diff --git a/infrastructure/replace_provider.py b/infrastructure/replace_provider.py new file mode 100644 index 00000000..f101e34c --- /dev/null +++ b/infrastructure/replace_provider.py @@ -0,0 +1,28 @@ +import signal +import subprocess + + +def replace_provider(org, provider): + """ + Replaces the aws provider. + Takes an org name, and a provider, + and changes the terraform state to use the new qualified provider. + """ + + # Make sure that Terraform is allowed to shut down gracefully. + try: + command = [ + "terraform", + "state", + "replace-provider", + "-auto-approve", + f"registry.terraform.io/-/{provider}", + f"registry.terraform.io/{org}/{provider}", + ] + terraform_process = subprocess.Popen(command) + terraform_process.wait() + except KeyboardInterrupt: + terraform_process.send_signal(signal.SIGINT) + terraform_process.wait() + + return terraform_process.returncode diff --git a/infrastructure/s3.tf b/infrastructure/s3.tf index c89e8ee1..3168ab38 100644 --- a/infrastructure/s3.tf +++ b/infrastructure/s3.tf @@ -1,7 +1,5 @@ resource "aws_s3_bucket" "scpca_portal_bucket" { bucket = "scpca-portal-${var.user}-${var.stage}" - # TODO: remove this when upgrading aws_provider version - acl = "private" force_destroy = var.stage == "prod" ? false : true tags = merge( @@ -13,21 +11,19 @@ resource "aws_s3_bucket" "scpca_portal_bucket" { ) } -# TODO: enable after upgrade -# resource "aws_s3_bucket_ownership_controls" "scpca_portal_bucket" { -# bucket = aws_s3_bucket.scpca_portal_bucket.id -# rule { -# object_ownership = "BucketOwnerPreferred" -# } -#} +resource "aws_s3_bucket_ownership_controls" "scpca_portal_bucket" { + bucket = aws_s3_bucket.scpca_portal_bucket.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} -# TODO: enable after upgrade -# resource "aws_s3_bucket_acl" "scpca_portal_bucket" { -# depends_on = [aws_s3_bucket_ownership_controls.scpca_portal_bucket] -# -# bucket = aws_s3_bucket.scpca_portal_bucket.id -# acl = "private" -#} +resource "aws_s3_bucket_acl" "scpca_portal_bucket" { + depends_on = [aws_s3_bucket_ownership_controls.scpca_portal_bucket] + + bucket = aws_s3_bucket.scpca_portal_bucket.id + acl = "private" +} resource "aws_s3_bucket_public_access_block" "scpca_portal_bucket" { bucket = aws_s3_bucket.scpca_portal_bucket.id @@ -38,8 +34,6 @@ resource "aws_s3_bucket_public_access_block" "scpca_portal_bucket" { resource "aws_s3_bucket" "scpca_portal_cert_bucket" { bucket = "scpca-portal-cert-${var.user}-${var.stage}" - # TODO: remove this when upgrading aws_provider version - acl = "private" force_destroy = var.stage == "prod" ? false : true # TODO: remove lifecycle rule when we upgrade aws_provider version @@ -63,19 +57,18 @@ resource "aws_s3_bucket" "scpca_portal_cert_bucket" { ) } -# TODO: enable after upgrade -# resource "aws_s3_bucket_ownership_controls" "scpca_portal_cert_bucket" { -# bucket = aws_s3_bucket.scpca_portal_cert_bucket.id -# rule { -# object_ownership = "BucketOwnerPreferred" -# } -#} +resource "aws_s3_bucket_ownership_controls" "scpca_portal_cert_bucket" { + bucket = aws_s3_bucket.scpca_portal_cert_bucket.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} -# resource "aws_s3_bucket_acl" "scpca_portal_cert_bucket" { -# depends_on = [aws_s3_bucket_ownership_controls.scpca_portal_cert_bucket] -# bucket = aws_s3_bucket.scpca_portal_cert_bucket.id -# acl = "private" -#} +resource "aws_s3_bucket_acl" "scpca_portal_cert_bucket" { + depends_on = [aws_s3_bucket_ownership_controls.scpca_portal_cert_bucket] + bucket = aws_s3_bucket.scpca_portal_cert_bucket.id + acl = "private" +} # resource "aws_s3_bucket_lifecycle_configuration" "scpca_portal_cert_bucket" { # bucket = aws_s3_bucket.scpca_portal_cert_bucket.id diff --git a/infrastructure/unlock_state.py b/infrastructure/unlock_state.py new file mode 100644 index 00000000..5338ee89 --- /dev/null +++ b/infrastructure/unlock_state.py @@ -0,0 +1,22 @@ +import signal +import subprocess + + +def unlock_state(lock_id): + """ + Replaces the aws provider. + Takes an org name, and a provider, + and changes the terraform state to use the new qualified provider. + """ + + # Make sure that Terraform is allowed to shut down gracefully. + try: + command = ["terraform", "force-unlock", "-force", lock_id] + terraform_process = subprocess.Popen(command) + terraform_process.wait() + except KeyboardInterrupt: + terraform_process.send_signal(signal.SIGINT) + terraform_process.wait() + + # ignore error + return 1 diff --git a/infrastructure/variables.tf b/infrastructure/variables.tf index d25dfa3f..5b1dc447 100644 --- a/infrastructure/variables.tf +++ b/infrastructure/variables.tf @@ -71,9 +71,7 @@ variable "ssh_public_key" { output "environment_variables" { value = [ {name = "DATABASE_NAME" - value = aws_db_instance.postgres_db.name}, - # TODO: replace db_name with name after upgrade - # value = aws_db_instance.postgres_db.db_name}, + value = aws_db_instance.postgres_db.db_name}, {name = "DATABASE_HOST" value = aws_db_instance.postgres_db.address}, {name = "DATABASE_USER"