diff --git a/.gitignore b/.gitignore index 7a3e2fd..ffca97b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.DS_Store + # Local .terraform directories **/.terraform/* @@ -13,6 +15,7 @@ crash.log # version control. # # example.tfvars +*.tfvars # Ignore override files as they are usually used to override resources locally and so # are not checked in diff --git a/README.md b/README.md index 5990ddb..565e98c 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,34 @@ -# ocp311_tf_aws -Deploy OCP 3.11 to AWS via Terraform +# Openshift 3.11 on AWS via Terraform + +Deploy Openshift Container Platform (OCP) v3.11 to Amazon Web Services (AWS) via Terraform + +- Creates a basic OCP v3.11 cluster with a single master node, single compute node, and a bastion. + +# To Use + +- (Optional) Copy/rename `terraform.tfvars.example` to `terraform.tfvars` and fill in the information (otherwise these will be prompted on apply): + ```bash + mv terraform.tfvars.example terraform.tfvars + ``` +- Initialize and apply the Terraform configuration. Provide verification to deploy OCP v3.11 (add `-auto-approve` to apply without user verification): + ```bash + terraform init && terraform apply + ``` +- The Terraform output provides access credentials for the cluster. + (NOTE: You can administer the cluster directly by SSH-ing to the Bastion and then SSH-ing to the Master, where `oc` is already configured and logged in with the default Cluster Administrator, `system:admin`): + - To see all output: + ```bash + terraform output + ``` + - To see only one output (handy for copy/paste or scripts): + ```bash + terraform output + ``` + - Example: _SSH directly to Master node through Bastion_ + ```bash + $(terraform output bastion_ssh) -A -t ssh $(terraform output private_dns_master) + ``` +- To destroy the cluster and its resources: + ```bash + terraform destroy + ``` \ No newline at end of file diff --git a/cloud-init.sh b/cloud-init.sh new file mode 100644 index 0000000..e5db67f --- /dev/null +++ b/cloud-init.sh @@ -0,0 +1,19 @@ +#! /bin/bash + +# Update node +# sudo yum -y update + +# Register system with Red Hat +sudo subscription-manager unregister +sudo subscription-manager register --username ${rh_subscription_username} --password ${rh_subscription_password} +sudo subscription-manager refresh +sudo subscription-manager attach --pool ${rh_subscription_pool_id} +sudo subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms" --enable="rhel-7-server-ansible-2.9-rpms" --enable="rhel-server-rhscl-7-rpms" --enable="rhel-7-server-ose-3.11-rpms" + +# Signal to Terraform that update is complete and reboot +touch /home/ec2-user/cloud-init-complete + +# Signal to Terraform to skip the OCP install steps (prerequisites and deploy_cluster) +${skip_install ? "" : "#"}touch /home/ec2-user/ocp-prereq-complete +${skip_install ? "" : "#"}touch /home/ec2-user/ocp-install-complete +# reboot \ No newline at end of file diff --git a/dns.tf b/dns.tf new file mode 100644 index 0000000..4e8eb55 --- /dev/null +++ b/dns.tf @@ -0,0 +1,27 @@ +# Find the public hosted zone +data "aws_route53_zone" "base_dns" { + name = var.aws_base_dns_domain + private_zone = false +} + +# Create a public DNS alias for Master load balancer +resource "aws_route53_record" "master_public_dns_record" { + zone_id = data.aws_route53_zone.base_dns.zone_id + name = local.cluster_master_domain + type = "A" + alias { + name = aws_lb.master_elb.dns_name + zone_id = aws_lb.master_elb.zone_id + evaluate_target_health = true + } +} +resource "aws_route53_record" "subdomain_public_dns_record" { + zone_id = data.aws_route53_zone.base_dns.zone_id + name = "*.${local.cluster_subdomain}" + type = "A" + alias { + name = aws_lb.master_elb.dns_name + zone_id = aws_lb.master_elb.zone_id + evaluate_target_health = true + } +} diff --git a/ec2.tf b/ec2.tf new file mode 100644 index 0000000..bed2e20 --- /dev/null +++ b/ec2.tf @@ -0,0 +1,126 @@ +# Define and query for the RHEL 7.7 AMI +data "aws_ami" "rhel" { + most_recent = true + owners = ["309956199498"] # Red Hat's account ID + filter { + name = "architecture" + values = ["x86_64"] + } + filter { + name = "root-device-type" + values = ["ebs"] + } + filter { + name = "virtualization-type" + values = ["hvm"] + } + filter { + name = "name" + values = ["RHEL-7.7*GA*"] + } +} + +# Create Bastion EC2 Instance +resource "aws_instance" "bastion" { + ami = data.aws_ami.rhel.id + instance_type = "t2.small" + iam_instance_profile = aws_iam_instance_profile.ocp311_master_profile.id + key_name = aws_key_pair.default.key_name + subnet_id = aws_subnet.public_subnet.id + associate_public_ip_address = true + vpc_security_group_ids = [ + aws_security_group.ocp311_ssh.id, + aws_security_group.ocp311_vpc.id, + aws_security_group.ocp311_public_egress.id + ] + + user_data = data.template_file.cloud-init.rendered + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-bastion" + ) + ) + + connection { + type = "ssh" + user = "ec2-user" + private_key = file(var.ssh_private_key_path) + host = self.public_ip + } + + provisioner "file" { + content = data.template_file.inventory.rendered + destination = "~/inventory.yaml" + } + + provisioner "file" { + content = file(var.ssh_private_key_path) + destination = "~/.ssh/id_rsa" + } +} + +# Create Master EC2 instance +resource "aws_instance" "master" { + ami = data.aws_ami.rhel.id + instance_type = "m4.xlarge" + iam_instance_profile = aws_iam_instance_profile.ocp311_master_profile.id + key_name = aws_key_pair.default.key_name + subnet_id = aws_subnet.private_subnet.id + vpc_security_group_ids = [ + aws_security_group.ocp311_vpc.id, + aws_security_group.ocp311_public_ingress.id, + aws_security_group.ocp311_public_egress.id + ] + root_block_device { + volume_type = "gp2" + volume_size = 50 + } + ebs_block_device { + volume_type = "gp2" + device_name = "/dev/sdf" + volume_size = 80 + } + + user_data = data.template_file.cloud-init.rendered + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-master" + ) + ) +} + +# Create Node EC2 instance +resource "aws_instance" "node" { + ami = data.aws_ami.rhel.id + instance_type = "m4.large" + iam_instance_profile = aws_iam_instance_profile.ocp311_worker_profile.id + key_name = aws_key_pair.default.key_name + subnet_id = aws_subnet.private_subnet.id + vpc_security_group_ids = [ + aws_security_group.ocp311_vpc.id, + aws_security_group.ocp311_public_ingress.id, + aws_security_group.ocp311_public_egress.id + ] + root_block_device { + volume_type = "gp2" + volume_size = 50 + } + ebs_block_device { + volume_type = "gp2" + device_name = "/dev/sdf" + volume_size = 80 + } + + user_data = data.template_file.cloud-init.rendered + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-node" + ) + ) +} diff --git a/elb.tf b/elb.tf new file mode 100644 index 0000000..dd52fb2 --- /dev/null +++ b/elb.tf @@ -0,0 +1,111 @@ +#~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# PUBLIC LOAD BALANCER # +#~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Create Master Public Elastic Load Balancer +resource "aws_lb" "master_elb" { + name = "${local.cluster_id}-master" + internal = false + load_balancer_type = "network" + subnets = [aws_subnet.public_subnet.id] + enable_cross_zone_load_balancing = true + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-master-elb" + ) + ) +} + +# Create Master Load Balancer listener for port 8443 +resource "aws_lb_listener" "listener_master_elb" { + load_balancer_arn = aws_lb.master_elb.arn + port = 8443 + protocol = "TCP" + default_action { + target_group_arn = aws_lb_target_group.group_master_elb.arn + type = "forward" + } +} +# Create Master Load Balancer listener for port 80 +resource "aws_lb_listener" "listener_http_elb" { + load_balancer_arn = aws_lb.master_elb.arn + port = 80 + protocol = "TCP" + default_action { + target_group_arn = aws_lb_target_group.group_http_elb.arn + type = "forward" + } +} +# Create Master Load Balancer listener for port 443 +resource "aws_lb_listener" "listener_https_elb" { + load_balancer_arn = aws_lb.master_elb.arn + port = 443 + protocol = "TCP" + default_action { + target_group_arn = aws_lb_target_group.group_https_elb.arn + type = "forward" + } +} + +# Create Master target group for port 8443 +resource "aws_lb_target_group" "group_master_elb" { + name = "${local.cluster_id}-master-elb-group" + port = 8443 + protocol = "TCP" + vpc_id = aws_vpc.ocp311.id + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-master-elb-group" + ) + ) +} +# Create Master target group for port 80 +resource "aws_lb_target_group" "group_http_elb" { + name = "${local.cluster_id}-http-group" + port = 80 + protocol = "TCP" + vpc_id = aws_vpc.ocp311.id + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-http-group" + ) + ) +} +# Create Master target group for port 443 +resource "aws_lb_target_group" "group_https_elb" { + name = "${local.cluster_id}-https-group" + port = 443 + protocol = "TCP" + vpc_id = aws_vpc.ocp311.id + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-https-group" + ) + ) +} + +# Attach Master group to EC2 instance +resource "aws_lb_target_group_attachment" "attachment_master_elb" { + target_group_arn = aws_lb_target_group.group_master_elb.arn + target_id = aws_instance.master.id + port = 8443 +} +# Attach Master group to EC2 instance +resource "aws_lb_target_group_attachment" "attachment_master_http_elb" { + target_group_arn = aws_lb_target_group.group_http_elb.arn + target_id = aws_instance.master.id + port = 80 +} +# Attach Master group to EC2 instance +resource "aws_lb_target_group_attachment" "attachment_master_https_elb" { + target_group_arn = aws_lb_target_group.group_https_elb.arn + target_id = aws_instance.master.id + port = 443 +} diff --git a/iam.tf b/iam.tf new file mode 100644 index 0000000..d44601e --- /dev/null +++ b/iam.tf @@ -0,0 +1,128 @@ +# Create Master IAM role +resource "aws_iam_role" "ocp311_master_role" { + name = "ocp311_master_role" + assume_role_policy = </dev/null; then git clone -b release-3.11 https://github.com/openshift/openshift-ansible; else echo === openshift-ansible directory already exists...; fi" + ] + } + + # Install OCP prerequisites + provisioner "remote-exec" { + inline = [ + "echo === INSTALLING OCP PREREQUISITES...", + "cd /home/ec2-user/openshift-ansible", + "if [ ! -f /home/ec2-user/ocp-prereq-complete ]; then ansible-playbook -i /home/ec2-user/inventory.yaml playbooks/prerequisites.yml; else echo === prerequisite playbook already run...; fi" + ] + } + + # Install OCP + provisioner "remote-exec" { + inline = [ + "echo === INSTALLING OCP...", + "if [ ! -f /home/ec2-user/ocp-prereq-complete ]; then touch /home/ec2-user/ocp-prereq-complete; fi", + "cd /home/ec2-user/openshift-ansible", + "if [ ! -f /home/ec2-user/ocp-install-complete ]; then ansible-playbook -i /home/ec2-user/inventory.yaml playbooks/deploy_cluster.yml; else echo === install playbook already run...; fi" + ] + } + + # Establish our user + provisioner "remote-exec" { + inline = [ + "echo === ESTABLISHING USER...", + "if [ ! -f /home/ec2-user/ocp-install-complete ]; then ssh -o StrictHostKeyChecking=no ${aws_instance.master.private_dns} sudo htpasswd -b /etc/origin/master/htpasswd ${var.ocp_user} ${var.ocp_pass}; else echo === user already established...; fi", + "if [ ! -f /home/ec2-user/ocp-install-complete ]; then ssh -o StrictHostKeyChecking=no ${aws_instance.master.private_dns} oc adm policy add-cluster-role-to-user cluster-admin ${var.ocp_user}; fi" + ] + } + + # Signal completion + provisioner "remote-exec" { + inline = [ + "if [ ! -f /home/ec2-user/ocp-install-complete ]; then touch /home/ec2-user/ocp-install-complete; fi" + ] + } +} + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# INSTANCE DESTROY SCRIPTS # +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Unregister nodes on destroy +resource "null_resource" "unregister_master" { + depends_on = [ + null_resource.unregister_bastion + ] + triggers = { + ssh_key = file(var.ssh_private_key_path) + bastion_ip = aws_instance.bastion.public_ip + master_ip = aws_instance.master.private_ip + } + + connection { + type = "ssh" + host = self.triggers.master_ip + user = "ec2-user" + bastion_host = self.triggers.bastion_ip + private_key = self.triggers.ssh_key + } + + provisioner "remote-exec" { + when = destroy + on_failure = continue + inline = [ + "sudo subscription-manager remove --all", + "sudo subscription-manager unregister" + ] + } +} +resource "null_resource" "unregister_node" { + depends_on = [ + null_resource.unregister_bastion + ] + triggers = { + ssh_key = file(var.ssh_private_key_path) + bastion_ip = aws_instance.bastion.public_ip + node_ip = aws_instance.node.private_ip + } + + connection { + type = "ssh" + host = self.triggers.node_ip + user = "ec2-user" + bastion_host = self.triggers.bastion_ip + private_key = self.triggers.ssh_key + } + + provisioner "remote-exec" { + when = destroy + on_failure = continue + inline = [ + "sudo subscription-manager remove --all", + "sudo subscription-manager unregister" + ] + } +} +resource "null_resource" "unregister_bastion" { + depends_on = [ + aws_nat_gateway.private_natgateway, + aws_route_table_association.public-subnet, + aws_route_table_association.private-subnet, + aws_iam_policy_attachment.ocp311_attach_master_policy, + aws_iam_policy_attachment.ocp311_attach_worker_policy + ] + triggers = { + ssh_key = file(var.ssh_private_key_path) + bastion_ip = aws_instance.bastion.public_ip + } + + connection { + type = "ssh" + host = self.triggers.bastion_ip + user = "ec2-user" + private_key = self.triggers.ssh_key + } + + provisioner "remote-exec" { + when = destroy + on_failure = continue + inline = [ + "sudo subscription-manager remove --all", + "sudo subscription-manager unregister" + ] + } +} + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# OUTPUT # +#~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Output cluster access commands/addresses +output "bastion_ssh" { + value = "ssh -i ${var.ssh_private_key_path} ec2-user@${aws_instance.bastion.public_dns}" + description = "Public IP of Bastion (for SSH access)" +} +output "cluster_console_url" { + value = "https://master.${local.cluster_domain}:8443" + description = "Console address using Public IP of Master" +} +output "cluster_cli_login" { + value = "oc login https://master.${local.cluster_domain}:8443 -u ${var.ocp_user} -p ${var.ocp_pass} --insecure-skip-tls-verify" + description = "Command to log in to cluster" +} +output "private_dns_master" { + value = aws_instance.master.private_dns + description = "Private DNS of Master Node (to SSH from Bastion)" +} +output "private_dns_node" { + value = aws_instance.node.private_dns + description = "Private DNS of Compute Node (to SSH from Bastion)" +} diff --git a/security_groups.tf b/security_groups.tf new file mode 100644 index 0000000..e7160b7 --- /dev/null +++ b/security_groups.tf @@ -0,0 +1,131 @@ +# Allow SSH access +resource "aws_security_group" "ocp311_ssh" { + name = "ocp311_ssh" + description = "Security group to allow SSH" + vpc_id = aws_vpc.ocp311.id + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-ssh-group" + ) + ) +} + +# Allow all intra-node communication +resource "aws_security_group" "ocp311_vpc" { + name = "ocp_311_vpc" + description = "Allow all intra-node communication" + vpc_id = aws_vpc.ocp311.id + ingress { + from_port = "0" + to_port = "0" + protocol = "-1" + self = true + } + egress { + from_port = "0" + to_port = "0" + protocol = "-1" + self = true + } + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-internal-vpc-group" + ) + ) +} + +# Allow public ingress +resource "aws_security_group" "ocp311_public_ingress" { + name = "ocp311_public_ingress" + description = "Allow public access to HTTP, HTTPS, etc" + vpc_id = aws_vpc.ocp311.id + + # HTTP + ingress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + # HTTP Proxy + ingress { + from_port = 8080 + to_port = 8080 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + # HTTPS + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + # HTTPS Proxy + ingress { + from_port = 8443 + to_port = 8443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-public-ingress" + ) + ) +} + +# Allow public egress (for yum updates, git, etc) +resource "aws_security_group" "ocp311_public_egress" { + name = "ocp311_public_egress" + description = "Security group that allows egress to the internet for instances over HTTP and HTTPS." + vpc_id = aws_vpc.ocp311.id + + # HTTP + egress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + # HTTP Proxy + egress { + from_port = 8080 + to_port = 8080 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + # HTTPS + egress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + # HTTPS Proxy + egress { + from_port = 8443 + to_port = 8443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-public-egress" + ) + ) +} \ No newline at end of file diff --git a/terraform.tfvars.example b/terraform.tfvars.example new file mode 100644 index 0000000..c0a7f0c --- /dev/null +++ b/terraform.tfvars.example @@ -0,0 +1,20 @@ +# Cluster Name and your User Name (for tagging resources) +cluster_name = "" +cluster_user_id = "" + +# AWS Credentials and Configuration +aws_access_key_id = "" +aws_secret_access_key = "" +aws_region = "" +# aws_base_dns_domain = "" # Not currently in use for simplicity + +# Red Hat Subscription Information (See https://access.redhat.com/management/products) +# Check https://access.redhat.com/management/products to ensure you have at least 3 registrations available +# openshift_deployment_type = "openshift-enterprise" # Defaults to the Community/OKD variant, "origin" +rh_subscription_username = "" +rh_subscription_password = "" +rh_subscription_pool_id = "" + +# Paths to SSH keypair to access cluster +ssh_private_key_path = "" +ssh_public_key_path = "" diff --git a/variables.tf b/variables.tf new file mode 100644 index 0000000..ed8cf8e --- /dev/null +++ b/variables.tf @@ -0,0 +1,99 @@ +variable "cluster_user_id" { + type = string + description = "User ID for tagging AWS resources" +} + +variable "cluster_name" { + type = string + description = "Name of cluster for tagging AWS resources" +} + +variable "aws_access_key_id" { + type = string + description = "AWS Access Key ID" +} + +variable aws_secret_access_key { + type = string + description = "AWS Secret Access Key" +} + +variable "aws_region" { + type = string + description = "AWS region to deploy resources" +} + +variable "aws_base_dns_domain" { + type = string + description = "Base public DNS domain under which to create resources" + default = "" +} + +variable "ssh_private_key_path" { + type = string + description = "Path to SSH private key" + default = "~/.ssh/id_rsa" +} + +variable "ssh_public_key_path" { + type = string + description = "Path to SSH public key" + default = "~/.ssh/id_rsa.pub" +} + +variable "openshift_deployment_type" { + type = string + description = "Default to Community/OKD. For Enterprise specify 'openshift-enterprise' and provide your subscription credentials" + default = "origin" +} + +variable "skip_install" { + type = bool + description = "Specify whether to skip installing OCP and only set up the infrastructure" + default = false +} + +variable "rh_subscription_username" { + description = "Red Hat Network login username for registration system of the OpenShift Container Platform cluster" +} + +variable "rh_subscription_password" { + description = "Red Hat Network login password for registration system of the OpenShift Container Platform cluster" +} + +variable "rh_subscription_pool_id" { + description = "Red Hat subscription pool id for OpenShift Container Platform" +} + +variable "ocp_user" { + type = string + description = "User for logging in to OCP via htpasswd" + default = "acmtest" +} + +variable "ocp_pass" { + type = string + description = "Password for logging in to OCP via htpasswd" + default = "Test4ACM" +} + +data "aws_availability_zones" "zones" {} + +variable vpc_cidr { + type = string + description = "VPC CIDR" + default = "10.0.0.0/16" +} + +variable public_subnet_cidr { + type = string + description = "Public Subnet CIDR" + default = "10.0.0.0/20" +} + +variable private_subnet_cidr { + type = string + description = "Private Subnet CIDR" + default = "10.0.16.0/20" +} + diff --git a/vpc.tf b/vpc.tf new file mode 100644 index 0000000..55ddd9b --- /dev/null +++ b/vpc.tf @@ -0,0 +1,124 @@ +# Create a VPC +resource "aws_vpc" "ocp311" { + cidr_block = var.vpc_cidr + enable_dns_support = true + enable_dns_hostnames = true + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-vpc" + ) + ) +} + +# Create an Internet Gateway +resource "aws_internet_gateway" "ocp311" { + vpc_id = aws_vpc.ocp311.id + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-internet-gateway" + ) + ) +} + +# Create a public subnet +resource "aws_subnet" "public_subnet" { + vpc_id = aws_vpc.ocp311.id + cidr_block = var.public_subnet_cidr + availability_zone = data.aws_availability_zones.zones.names[0] + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-public-subnet" + ) + ) +} + +# Create a route table allowing all addresses access to the Internet Gateway +resource "aws_route_table" "public_route_table" { + vpc_id = aws_vpc.ocp311.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.ocp311.id + } + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-public-route-table" + ) + ) +} + +# Associate the route table with the public subnet +resource "aws_route_table_association" "public-subnet" { + subnet_id = aws_subnet.public_subnet.id + route_table_id = aws_route_table.public_route_table.id +} + +# Create an Elastic IP for NAT gateway +resource "aws_eip" "natgateway_eip" { + vpc = true + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-nat-gateway-eip" + ) + ) +} + +# Create the NAT gateway +resource "aws_nat_gateway" "private_natgateway" { + allocation_id = aws_eip.natgateway_eip.id + subnet_id = aws_subnet.public_subnet.id + depends_on = [ aws_internet_gateway.ocp311 ] + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-private-nat-gateway" + ) + ) +} + +# Create a private subnet +resource "aws_subnet" "private_subnet" { + vpc_id = aws_vpc.ocp311.id + cidr_block = var.private_subnet_cidr + availability_zone = data.aws_availability_zones.zones.names[0] + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-private-subnet" + ) + ) +} + +# Create a route table allowing private subnet access to the NAT Gateway +resource "aws_route_table" "private_route_table" { + vpc_id = aws_vpc.ocp311.id + route { + cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.private_natgateway.id + } + + tags = merge( + local.common_tags, + map( + "Name", "${local.cluster_id}-private-route-table" + ) + ) +} + +# Associate the route table with the private subnet +resource "aws_route_table_association" "private-subnet" { + subnet_id = aws_subnet.private_subnet.id + route_table_id = aws_route_table.private_route_table.id +} \ No newline at end of file