Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Simplify the 0-hardware stage to not use a separate module #45

Draft
wants to merge 2 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 122 additions & 10 deletions deployment/aws-terraform/0-hardware/cluster.tf
Original file line number Diff line number Diff line change
@@ -1,11 +1,123 @@
module "k8s" {
source="../../../modules/aws/infrastructure"

app_name=var.project_prefix
environment=var.environment
aws_region=var.aws_region
cluster_version=var.cluster_version
num_base_instances=var.num_base_instances
base_instance_type=var.base_instance_type
user_map=var.user_map
# module "k8s" {
# source="../../../modules/aws/infrastructure"

# app_name=var.project_prefix
# environment=var.environment
# aws_region=var.aws_region
# cluster_version=var.cluster_version
# num_base_instances=var.num_base_instances
# base_instance_type=var.base_instance_type
# user_map=var.user_map
# role_map=var.role_map
# }

module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "18.31.2"

cluster_name = local.cluster_name
cluster_version = var.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true

cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
service_account_role_arn = module.vpc_cni_irsa.iam_role_arn
}
aws-ebs-csi-driver = {}
}

# cluster_encryption_config = [{
# provider_key_arn = aws_kms_key.eks.arn
# resources = ["secrets"]
# }]

cluster_tags = {
# This should not affect the name of the cluster primary security group
# Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006
# Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008
Name = var.project_prefix
GithubRepo = var.repo_name
GithubOrg = "azavea"
}

vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets

# This feature doesn't always work when creating a new cluster from scratch.
# Allow the cold start flag to dictate if this is our first time applying.
# Cluster users won't be properly set up until the second go around (cold_start=false).
manage_aws_auth_configmap = !var.cold_start
aws_auth_roles = var.role_map
aws_auth_users = var.user_map

# Extend cluster security group rules
cluster_security_group_additional_rules = {
egress_nodes_ephemeral_ports_tcp = {
description = "To node 1025-65535"
protocol = "tcp"
from_port = 1025
to_port = 65535
type = "egress"
source_node_security_group = true
}
}

# Extend node-to-node security group rules
node_security_group_additional_rules = {
ingress_self_all = {
description = "Node to node all ports/protocols"
protocol = "-1"
from_port = 0
to_port = 0
type = "ingress"
self = true
}
egress_all = {
description = "Node all egress"
protocol = "-1"
from_port = 0
to_port = 0
type = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}

eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
instance_types = [var.base_instance_type]

iam_role_attach_cni_policy = true
}

eks_managed_node_groups = {
base = {
create_launch_template = false
launch_template_name = ""
instance_types = [var.base_instance_type]
capacity_type = var.base_instance_capacity_type
min_size = 1
max_size = var.num_base_instances
desired_size = var.num_base_instances
labels = {
node-type = "core"
"hub.jupyter.org/node-purpose" = "core"
}
}
}

tags = local.tags
}

# resource "null_resource" "kubectl" {
# depends_on = [module.eks.kubeconfig]
# provisioner "local-exec" {
# command = "aws eks --region ${var.aws_region} update-kubeconfig --name ${module.eks.cluster_id}"
# }
# }
21 changes: 21 additions & 0 deletions deployment/aws-terraform/0-hardware/config.tf
Original file line number Diff line number Diff line change
@@ -1,8 +1,29 @@
provider "aws" {}

provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)

exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
}
}

terraform {
required_version = ">= 1.0.0"

backend "s3" {
region = local.region
encrypt = "true"
}

required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.18.0"
}
}
}
57 changes: 57 additions & 0 deletions deployment/aws-terraform/0-hardware/ebs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# data "aws_caller_identity" "current" {}

# # This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
# data "aws_iam_policy_document" "ebs" {
# # Copy of default KMS policy that lets you manage it
# statement {
# sid = "Enable IAM User Permissions"
# actions = ["kms:*"]
# resources = ["*"]

# principals {
# type = "AWS"
# identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
# }
# }

# # Required for EKS
# statement {
# sid = "Allow service-linked role use of the CMK"
# actions = [
# "kms:Encrypt",
# "kms:Decrypt",
# "kms:ReEncrypt*",
# "kms:GenerateDataKey*",
# "kms:DescribeKey"
# ]
# resources = ["*"]

# principals {
# type = "AWS"
# identifiers = [
# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
# ]
# }
# }

# statement {
# sid = "Allow attachment of persistent resources"
# actions = ["kms:CreateGrant"]
# resources = ["*"]

# principals {
# type = "AWS"
# identifiers = [
# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
# ]
# }

# condition {
# test = "Bool"
# variable = "kms:GrantIsForAWSResource"
# values = ["true"]
# }
# }
# }
14 changes: 14 additions & 0 deletions deployment/aws-terraform/0-hardware/kms.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# # This key would be needed for cluster encryption
# # resource "aws_kms_key" "eks" {
# # description = "EKS Secret Encryption Key"
# # deletion_window_in_days = 7
# # enable_key_rotation = true

# # tags = local.tags
# # }

# resource "aws_kms_key" "ebs" {
# description = "Customer managed key to encrypt EKS managed node group volumes"
# deletion_window_in_days = 7
# policy = data.aws_iam_policy_document.ebs.json
# }
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
locals {
region = var.aws_region
cluster_name = "${var.app_name}-${var.environment}"
cluster_name = "${var.project_prefix}-${var.environment}"

tags = {
Name = var.app_name
Name = var.project_prefix
Environment = var.environment
GithubRepo = var.repo_name
GithubOrg = "azavea"
Expand Down
2 changes: 1 addition & 1 deletion deployment/aws-terraform/0-hardware/output.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
output "cluster_arn" {
value = module.k8s.cluster.cluster_arn
value = module.eks.cluster_arn
}

output "cluster_name" {
Expand Down
24 changes: 24 additions & 0 deletions deployment/aws-terraform/0-hardware/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,24 @@ variable "project_prefix" {
description="The project name prefix used to identify cluster resources. This will be set by wrapper scripts; avoid setting in the .tfvars file!"
}

variable "repo_name" {
type = string
description = "Name of the Github repo hosting the deployment (for tagging)"
default = "kubernetes"
}

variable "cluster_version" {
type = string
description = "The Kubernetes version to deploy"
default = null
}

variable "cold_start" {
type = bool
description = "A flag to indicate that this is the first time we are applying this base infrastructure; not all features are applied correctly for a brand new cluster; run once with this variable set to true; subsequent runs should set this to false"
default = false
}

variable "num_base_instances" {
type = number
description = "Number of instances to be provided in the base group"
Expand All @@ -31,8 +43,20 @@ variable "base_instance_type" {
default = "t3.medium"
}

variable "base_instance_capacity_type" {
type = string
description = "The capacity type of the always-on core instance (SPOT, ON_DEMAND)"
default = "ON_DEMAND"
}

variable "user_map" {
type = list(object({username: string, userarn: string, groups: list(string)}))
description = "A list of {\"username\": string, \"userarn\": string, \"groups\": list(string)} objects describing the users who should have RBAC access to the cluster; note: system:masters should be reserved for those who need the highest level of admin access (including modifying RBAC)"
default = []
}

variable "role_map" {
type = list(object({rolearn: string, username: string, groups: list(string)}))
description = "A list of {\"rolearn\": string, \"username\": string, \"groups\": list(string)} objects describing the mapping of IAM roles to cluster users who should have RBAC access to the cluster; note: system:masters should be used for admin access"
default = []
}
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.2.0"

name = "${var.app_name}-vpc"
name = "${var.project_prefix}-vpc"
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
Expand Down
5 changes: 5 additions & 0 deletions deployment/aws-terraform/1-services/providers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,11 @@ terraform {
}

required_providers {
null = {
source = "hashicorp/null"
version = "3.1.0"
}

kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.10.0"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,4 @@ roleRef:
name: view
apiGroup: rbac.authorization.k8s.io
YAML

depends_on = [
null_resource.kubectl
]
}
1 change: 1 addition & 0 deletions deployment/aws-terraform/1-services/rds.tf
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ module "database" {
backup_window = var.rds_backup_window
maintenance_window = var.rds_maintenance_window
auto_minor_version_upgrade = var.rds_auto_minor_version_upgrade
snapshot_identifier = var.rds_source_snapshot_identifier
final_snapshot_identifier = var.rds_final_snapshot_identifier
skip_final_snapshot = var.rds_skip_final_snapshot
copy_tags_to_snapshot = var.rds_copy_tags_to_snapshot
Expand Down
5 changes: 5 additions & 0 deletions deployment/aws-terraform/1-services/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,11 @@ variable "rds_database_password" {
default = null
}

variable "rds_source_snapshot_identifier" {
type = string
default = null
}

variable "rds_final_snapshot_identifier" {
default = "rds-snapshot"
type = string
Expand Down
20 changes: 0 additions & 20 deletions modules/aws/infrastructure/config.tf

This file was deleted.

Loading