Skip to content

Commit

Permalink
refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
baixiac committed Oct 16, 2023
1 parent 0d0ad67 commit 2d817ef
Show file tree
Hide file tree
Showing 17 changed files with 265 additions and 163 deletions.
6 changes: 3 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
.terraform
.terraform.lock.hcl
terraform.tfstate.backup
**/.terraform
**/.terraform.lock.hcl
**/terraform.tfstate.backup
File renamed without changes.
File renamed without changes.
58 changes: 23 additions & 35 deletions eks.tf → cluster/eks.tf
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ module "external_dns_irsa" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "~> 5.0"

role_name = "${var.environment}-radar-base-external-dns-irsa"
attach_external_dns_policy = true
role_name = "${var.environment}-radar-base-external-dns-irsa"
attach_external_dns_policy = true
external_dns_hosted_zone_arns = ["arn:aws:route53:::hostedzone/${aws_route53_zone.primary.id}"]

oidc_providers = {
Expand All @@ -56,8 +56,8 @@ module "cert_manager_irsa" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "~> 5.0"

role_name = "${var.environment}-radar-base-cert-manager-irsa"
attach_cert_manager_policy = true
role_name = "${var.environment}-radar-base-cert-manager-irsa"
attach_cert_manager_policy = true
cert_manager_hosted_zone_arns = ["arn:aws:route53:::hostedzone/${aws_route53_zone.primary.id}"]

oidc_providers = {
Expand All @@ -70,20 +70,15 @@ module "cert_manager_irsa" {
tags = merge(tomap({ "Name" : "${var.environment}-radar-base-cert-manager-irsa" }), var.common_tags)
}

module "karpenter" {
source = "terraform-aws-modules/eks/aws//modules/karpenter"
version = "19.17.2"

cluster_name = module.eks.cluster_name

irsa_oidc_provider_arn = module.eks.oidc_provider_arn
irsa_namespace_service_accounts = ["karpenter:karpenter"]
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)

iam_role_additional_policies = {
AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name, "--region", var.AWS_REGION]
command = "aws"
}

tags = merge(tomap({ "Name" : "${var.environment}-radar-base-karpenter" }), var.common_tags)
}

module "eks" {
Expand Down Expand Up @@ -231,31 +226,12 @@ module "eks" {
username = module.eks_admins_iam_role.iam_role_name
groups = ["system:masters"]
},
{
rolearn = module.karpenter.role_arn
username = "system:node:{{EC2PrivateDNSName}}"
groups = [
"system:bootstrappers",
"system:nodes",
]
},
]

tags = merge(tomap({ "Name" : "${var.environment}-${var.eks_cluster_base_name}" }), var.common_tags)

}

provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)

exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
command = "aws"
}
}

output "radar_base_eks_cluster_name" {
value = module.eks.cluster_name
}
Expand All @@ -271,3 +247,15 @@ output "radar_base_eks_dmz_node_group_name" {
output "radar_base_eks_worker_node_group_name" {
value = element(split(":", module.eks.eks_managed_node_groups.worker.node_group_id), 1)
}

# output "radar_base_eks_karpenter_irsa_arn" {
# value = module.karpenter.irsa_arn
# }

# output "radar_base_eks_karpenter_interruption_queue_name" {
# value = module.karpenter.queue_name
# }

# output "radar_base_eks_karpenter_instance_profile" {
# value = module.karpenter.instance_profile_name
# }
File renamed without changes.
File renamed without changes.
20 changes: 20 additions & 0 deletions cluster/provider.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "< 5.0.0"
}
postgresql = {
source = "cyrilgdn/postgresql"
version = ">= 1.19.0"
}
}
required_version = "~> 1.0"
}

provider "aws" {
region = var.AWS_REGION
access_key = var.AWS_ACCESS_KEY_ID
secret_key = var.AWS_SECRET_ACCESS_KEY
token = var.AWS_SESSION_TOKEN
}
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
160 changes: 160 additions & 0 deletions config/karpenter.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
data "aws_eks_cluster" "main" {
name = var.cluster_name
}

data "aws_eks_cluster_auth" "main" {
name = var.cluster_name
}

data "aws_autoscaling_groups" "main" {
filter {
name = "tag:eks:cluster-name"
values = [var.cluster_name]
}
}

data "aws_eks_node_group" "worker" {
cluster_name = var.cluster_name
node_group_name = join("-", [
element(split("-", [for asg in data.aws_autoscaling_groups.main.names : asg if startswith(asg, "eks-worker-")][0]), 1),
element(split("-", [for asg in data.aws_autoscaling_groups.main.names : asg if startswith(asg, "eks-worker-")][0]), 2)
])
}

locals {
aws_account = element(split(":", data.aws_eks_cluster.main.arn), 4)
oidc_issuer = element(split("//", data.aws_eks_cluster.main.identity[0].oidc[0].issuer), 1)
}

provider "kubernetes" {
host = data.aws_eks_cluster.main.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.main.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.main.token
}

provider "helm" {
kubernetes {
host = data.aws_eks_cluster.main.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.main.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.main.token
}
}

provider "kubectl" {
apply_retry_count = 5
host = data.aws_eks_cluster.main.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.main.certificate_authority[0].data)
load_config_file = false
token = data.aws_eks_cluster_auth.main.token
}

module "karpenter" {
source = "terraform-aws-modules/eks/aws//modules/karpenter"
version = "19.17.2"

cluster_name = data.aws_eks_cluster.main.id

irsa_oidc_provider_arn = join("", ["arn:aws:iam::", local.aws_account, ":oidc-provider/", local.oidc_issuer])
irsa_namespace_service_accounts = ["karpenter:karpenter"]

create_iam_role = false
iam_role_arn = data.aws_eks_node_group.worker.node_role_arn

tags = merge(tomap({ "Name" : "${var.environment}-radar-base-karpenter" }), var.common_tags)
}

resource "helm_release" "karpenter" {
namespace = "karpenter"
create_namespace = true

name = "karpenter"
repository = "oci://public.ecr.aws/karpenter"
chart = "karpenter"
version = "v0.29.0"

set {
name = "settings.aws.clusterName"
value = data.aws_eks_cluster.main.id
}

set {
name = "settings.aws.clusterEndpoint"
value = data.aws_eks_cluster.main.endpoint
}

set {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = module.karpenter.irsa_arn
}

set {
name = "settings.aws.defaultInstanceProfile"
value = module.karpenter.instance_profile_name
}

set {
name = "settings.aws.interruptionQueueName"
value = module.karpenter.queue_name
}

}

resource "kubectl_manifest" "karpenter_provisioner" {
yaml_body = <<-YAML
apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
requirements:
- key: kubernetes.io/arch
operator: In
values:
- amd64
- key: kubernetes.io/os
operator: In
values:
- linux
- key: karpenter.sh/capacity-type
operator: In
values:
- "${lower(var.instance_capacity_type)}"
- key: topology.kubernetes.io/zone
operator: In
values:
- "${var.AWS_REGION}a"
# - "${var.AWS_REGION}b"
# - "${var.AWS_REGION}c"
ttlSecondsAfterEmpty: 30
limits:
resources:
cpu: 64
memory: 256Gi
providerRef:
name: default
YAML

depends_on = [
helm_release.karpenter
]
}

resource "kubectl_manifest" "karpenter_node_template" {
yaml_body = <<-YAML
apiVersion: karpenter.k8s.aws/v1alpha1
kind: AWSNodeTemplate
metadata:
name: default
spec:
subnetSelector:
karpenter.sh/discovery: ${data.aws_eks_cluster.main.id}
securityGroupSelector:
karpenter.sh/discovery: ${data.aws_eks_cluster.main.id}
tags:
karpenter.sh/discovery: ${data.aws_eks_cluster.main.id}
YAML

depends_on = [
helm_release.karpenter
]
}
4 changes: 0 additions & 4 deletions provider.tf → config/provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,6 @@ terraform {
source = "hashicorp/aws"
version = "< 5.0.0"
}
postgresql = {
source = "cyrilgdn/postgresql"
version = ">= 1.19.0"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.11"
Expand Down
59 changes: 59 additions & 0 deletions config/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
variable "AWS_REGION" {
type = string
default = "eu-west-2"
}

variable "AWS_ACCESS_KEY_ID" {
type = string
sensitive = true
}

variable "AWS_SECRET_ACCESS_KEY" {
type = string
sensitive = true
}

variable "AWS_SESSION_TOKEN" {
type = string
default = ""
sensitive = true
}

variable "environment" {
type = string
default = "dev"
}

variable "common_tags" {
type = map(string)
default = {
Project = "radar-base-development"
Environment = "dev"
}
}

variable "cluster_name" {
type = string
}

variable "instance_capacity_type" {
type = string
default = "SPOT"

validation {
condition = var.instance_capacity_type == "ON_DEMAND" || var.instance_capacity_type == "SPOT"
error_message = "Invalid instance capacity type. Allowed values are 'ON_DEMAND' or 'SPOT'."
}
}

# variable "karpenter_irsa_arn" {
# type = string
# }

# variable "karpenter_instance_profile" {
# type = string
# }

# variable "karpenter_interruption_queue_name" {
# type = string
# }
Loading

0 comments on commit 2d817ef

Please sign in to comment.