Skip to content

Commit

Permalink
Add Terraform templates and scripts for deployment
Browse files Browse the repository at this point in the history
  • Loading branch information
dhaiducek committed Jun 8, 2020
1 parent 9428ceb commit 7cf3836
Show file tree
Hide file tree
Showing 13 changed files with 1,122 additions and 2 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
.DS_Store

# Local .terraform directories
**/.terraform/*

Expand All @@ -13,6 +15,7 @@ crash.log
# version control.
#
# example.tfvars
*.tfvars

# Ignore override files as they are usually used to override resources locally and so
# are not checked in
Expand Down
36 changes: 34 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,34 @@
# ocp311_tf_aws
Deploy OCP 3.11 to AWS via Terraform
# Openshift 3.11 on AWS via Terraform

Deploy Openshift Container Platform (OCP) v3.11 to Amazon Web Services (AWS) via Terraform

- Creates a basic OCP v3.11 cluster with a single master node, single compute node, and a bastion.

# To Use

- (Optional) Copy/rename `terraform.tfvars.example` to `terraform.tfvars` and fill in the information (otherwise these will be prompted on apply):
```bash
mv terraform.tfvars.example terraform.tfvars
```
- Initialize and apply the Terraform configuration. Provide verification to deploy OCP v3.11 (add `-auto-approve` to apply without user verification):
```bash
terraform init && terraform apply
```
- The Terraform output provides access credentials for the cluster.
(NOTE: You can administer the cluster directly by SSH-ing to the Bastion and then SSH-ing to the Master, where `oc` is already configured and logged in with the default Cluster Administrator, `system:admin`):
- To see all output:
```bash
terraform output
```
- To see only one output (handy for copy/paste or scripts):
```bash
terraform output <variable>
```
- Example: _SSH directly to Master node through Bastion_
```bash
$(terraform output bastion_ssh) -A -t ssh $(terraform output private_dns_master)
```
- To destroy the cluster and its resources:
```bash
terraform destroy
```
19 changes: 19 additions & 0 deletions cloud-init.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#! /bin/bash

# Update node
# sudo yum -y update

# Register system with Red Hat
sudo subscription-manager unregister
sudo subscription-manager register --username ${rh_subscription_username} --password ${rh_subscription_password}
sudo subscription-manager refresh
sudo subscription-manager attach --pool ${rh_subscription_pool_id}
sudo subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms" --enable="rhel-7-server-ansible-2.9-rpms" --enable="rhel-server-rhscl-7-rpms" --enable="rhel-7-server-ose-3.11-rpms"

# Signal to Terraform that update is complete and reboot
touch /home/ec2-user/cloud-init-complete

# Signal to Terraform to skip the OCP install steps (prerequisites and deploy_cluster)
${skip_install ? "" : "#"}touch /home/ec2-user/ocp-prereq-complete
${skip_install ? "" : "#"}touch /home/ec2-user/ocp-install-complete
# reboot
27 changes: 27 additions & 0 deletions dns.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Find the public hosted zone
data "aws_route53_zone" "base_dns" {
name = var.aws_base_dns_domain
private_zone = false
}

# Create a public DNS alias for Master load balancer
resource "aws_route53_record" "master_public_dns_record" {
zone_id = data.aws_route53_zone.base_dns.zone_id
name = local.cluster_master_domain
type = "A"
alias {
name = aws_lb.master_elb.dns_name
zone_id = aws_lb.master_elb.zone_id
evaluate_target_health = true
}
}
resource "aws_route53_record" "subdomain_public_dns_record" {
zone_id = data.aws_route53_zone.base_dns.zone_id
name = "*.${local.cluster_subdomain}"
type = "A"
alias {
name = aws_lb.master_elb.dns_name
zone_id = aws_lb.master_elb.zone_id
evaluate_target_health = true
}
}
126 changes: 126 additions & 0 deletions ec2.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# Define and query for the RHEL 7.7 AMI
data "aws_ami" "rhel" {
most_recent = true
owners = ["309956199498"] # Red Hat's account ID
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "name"
values = ["RHEL-7.7*GA*"]
}
}

# Create Bastion EC2 Instance
resource "aws_instance" "bastion" {
ami = data.aws_ami.rhel.id
instance_type = "t2.small"
iam_instance_profile = aws_iam_instance_profile.ocp311_master_profile.id
key_name = aws_key_pair.default.key_name
subnet_id = aws_subnet.public_subnet.id
associate_public_ip_address = true
vpc_security_group_ids = [
aws_security_group.ocp311_ssh.id,
aws_security_group.ocp311_vpc.id,
aws_security_group.ocp311_public_egress.id
]

user_data = data.template_file.cloud-init.rendered

tags = merge(
local.common_tags,
map(
"Name", "${local.cluster_id}-bastion"
)
)

connection {
type = "ssh"
user = "ec2-user"
private_key = file(var.ssh_private_key_path)
host = self.public_ip
}

provisioner "file" {
content = data.template_file.inventory.rendered
destination = "~/inventory.yaml"
}

provisioner "file" {
content = file(var.ssh_private_key_path)
destination = "~/.ssh/id_rsa"
}
}

# Create Master EC2 instance
resource "aws_instance" "master" {
ami = data.aws_ami.rhel.id
instance_type = "m4.xlarge"
iam_instance_profile = aws_iam_instance_profile.ocp311_master_profile.id
key_name = aws_key_pair.default.key_name
subnet_id = aws_subnet.private_subnet.id
vpc_security_group_ids = [
aws_security_group.ocp311_vpc.id,
aws_security_group.ocp311_public_ingress.id,
aws_security_group.ocp311_public_egress.id
]
root_block_device {
volume_type = "gp2"
volume_size = 50
}
ebs_block_device {
volume_type = "gp2"
device_name = "/dev/sdf"
volume_size = 80
}

user_data = data.template_file.cloud-init.rendered

tags = merge(
local.common_tags,
map(
"Name", "${local.cluster_id}-master"
)
)
}

# Create Node EC2 instance
resource "aws_instance" "node" {
ami = data.aws_ami.rhel.id
instance_type = "m4.large"
iam_instance_profile = aws_iam_instance_profile.ocp311_worker_profile.id
key_name = aws_key_pair.default.key_name
subnet_id = aws_subnet.private_subnet.id
vpc_security_group_ids = [
aws_security_group.ocp311_vpc.id,
aws_security_group.ocp311_public_ingress.id,
aws_security_group.ocp311_public_egress.id
]
root_block_device {
volume_type = "gp2"
volume_size = 50
}
ebs_block_device {
volume_type = "gp2"
device_name = "/dev/sdf"
volume_size = 80
}

user_data = data.template_file.cloud-init.rendered

tags = merge(
local.common_tags,
map(
"Name", "${local.cluster_id}-node"
)
)
}
111 changes: 111 additions & 0 deletions elb.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
#~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# PUBLIC LOAD BALANCER #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Create Master Public Elastic Load Balancer
resource "aws_lb" "master_elb" {
name = "${local.cluster_id}-master"
internal = false
load_balancer_type = "network"
subnets = [aws_subnet.public_subnet.id]
enable_cross_zone_load_balancing = true

tags = merge(
local.common_tags,
map(
"Name", "${local.cluster_id}-master-elb"
)
)
}

# Create Master Load Balancer listener for port 8443
resource "aws_lb_listener" "listener_master_elb" {
load_balancer_arn = aws_lb.master_elb.arn
port = 8443
protocol = "TCP"
default_action {
target_group_arn = aws_lb_target_group.group_master_elb.arn
type = "forward"
}
}
# Create Master Load Balancer listener for port 80
resource "aws_lb_listener" "listener_http_elb" {
load_balancer_arn = aws_lb.master_elb.arn
port = 80
protocol = "TCP"
default_action {
target_group_arn = aws_lb_target_group.group_http_elb.arn
type = "forward"
}
}
# Create Master Load Balancer listener for port 443
resource "aws_lb_listener" "listener_https_elb" {
load_balancer_arn = aws_lb.master_elb.arn
port = 443
protocol = "TCP"
default_action {
target_group_arn = aws_lb_target_group.group_https_elb.arn
type = "forward"
}
}

# Create Master target group for port 8443
resource "aws_lb_target_group" "group_master_elb" {
name = "${local.cluster_id}-master-elb-group"
port = 8443
protocol = "TCP"
vpc_id = aws_vpc.ocp311.id

tags = merge(
local.common_tags,
map(
"Name", "${local.cluster_id}-master-elb-group"
)
)
}
# Create Master target group for port 80
resource "aws_lb_target_group" "group_http_elb" {
name = "${local.cluster_id}-http-group"
port = 80
protocol = "TCP"
vpc_id = aws_vpc.ocp311.id

tags = merge(
local.common_tags,
map(
"Name", "${local.cluster_id}-http-group"
)
)
}
# Create Master target group for port 443
resource "aws_lb_target_group" "group_https_elb" {
name = "${local.cluster_id}-https-group"
port = 443
protocol = "TCP"
vpc_id = aws_vpc.ocp311.id

tags = merge(
local.common_tags,
map(
"Name", "${local.cluster_id}-https-group"
)
)
}

# Attach Master group to EC2 instance
resource "aws_lb_target_group_attachment" "attachment_master_elb" {
target_group_arn = aws_lb_target_group.group_master_elb.arn
target_id = aws_instance.master.id
port = 8443
}
# Attach Master group to EC2 instance
resource "aws_lb_target_group_attachment" "attachment_master_http_elb" {
target_group_arn = aws_lb_target_group.group_http_elb.arn
target_id = aws_instance.master.id
port = 80
}
# Attach Master group to EC2 instance
resource "aws_lb_target_group_attachment" "attachment_master_https_elb" {
target_group_arn = aws_lb_target_group.group_https_elb.arn
target_id = aws_instance.master.id
port = 443
}
Loading

0 comments on commit 7cf3836

Please sign in to comment.