Skip to content

Commit

Permalink
add support for multiple EKS K8s versions
Browse files Browse the repository at this point in the history
  • Loading branch information
baixiac committed Mar 22, 2024
1 parent 6d8ee53 commit 340b009
Show file tree
Hide file tree
Showing 6 changed files with 108 additions and 85 deletions.
30 changes: 30 additions & 0 deletions cluster/data.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,35 @@
locals {

eks_core_versions = {
"1.27" : {
"cluster_version" = "1.27"
"cluster_addons" = {
"coredns" = "v1.10.1-eksbuild.7"
"kube_proxy" = "v1.27.10-eksbuild.2"
"vpc_cni" = "v1.15.3-eksbuild.1"
"ebs_csi_driver" = "v1.25.0-eksbuild.1"
}
},
"1.26" : {
"cluster_version" = "1.26"
"cluster_addons" = {
"coredns" = "v1.9.3-eksbuild.2"
"kube_proxy" = "v1.26.2-eksbuild.1"
"vpc_cni" = "v1.12.2-eksbuild.1"
"ebs_csi_driver" = "v1.17.0-eksbuild.1"
}
},
"1.25" : {
"cluster_version" = "1.25"
"cluster_addons" = {
"coredns" = "v1.9.3-eksbuild.2"
"kube_proxy" = "v1.25.6-eksbuild.1"
"vpc_cni" = "v1.12.2-eksbuild.1"
"ebs_csi_driver" = "v1.16.0-eksbuild.1"
}
}
}

storage_classes = {
gp2 = "radar-base-ebs-sc-gp2"
gp3 = "radar-base-ebs-sc-gp3"
Expand Down
10 changes: 5 additions & 5 deletions cluster/eks.tf
Original file line number Diff line number Diff line change
Expand Up @@ -105,14 +105,14 @@ module "eks" {
version = "19.13.1"

cluster_name = var.eks_cluster_name
cluster_version = var.eks_cluster_version
cluster_version = local.eks_core_versions[var.eks_kubernetes_version].cluster_version

cluster_endpoint_private_access = true
cluster_endpoint_public_access = true

cluster_addons = {
coredns = {
addon_version = var.eks_addon_version.coredns
addon_version = local.eks_core_versions[var.eks_kubernetes_version].cluster_addons.coredns
resolve_conflicts = "OVERWRITE"
configuration_values = var.create_dmz_node_group ? jsonencode({
tolerations : [
Expand All @@ -132,11 +132,11 @@ module "eks" {
})
}
kube-proxy = {
addon_version = var.eks_addon_version.kube_proxy
addon_version = local.eks_core_versions[var.eks_kubernetes_version].cluster_addons.kube_proxy
resolve_conflicts = "OVERWRITE"
}
vpc-cni = {
addon_version = var.eks_addon_version.vpc_cni
addon_version = local.eks_core_versions[var.eks_kubernetes_version].cluster_addons.vpc_cni
resolve_conflicts = "OVERWRITE"
before_compute = true
service_account_role_arn = module.vpc_cni_irsa.iam_role_arn
Expand All @@ -149,7 +149,7 @@ module "eks" {
})
}
aws-ebs-csi-driver = {
addon_version = var.eks_addon_version.ebs_csi_driver
addon_version = local.eks_core_versions[var.eks_kubernetes_version].cluster_addons.ebs_csi_driver
resolve_conflicts = "OVERWRITE"
service_account_role_arn = module.ebs_csi_irsa.iam_role_arn
configuration_values = jsonencode({
Expand Down
1 change: 1 addition & 0 deletions cluster/terraform.tfvars
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
AWS_REGION = "eu-west-2"
eks_kubernetes_version = "1.27"
environment = "dev"
eks_admins_group_users = []
defaut_storage_class = "radar-base-ebs-sc-gp2"
Expand Down
48 changes: 20 additions & 28 deletions cluster/variables.tf
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

variable "AWS_REGION" {
type = string
description = "Target AWS region"
Expand Down Expand Up @@ -49,20 +48,14 @@ variable "common_tags" {
}
}

variable "eks_cluster_version" {
variable "eks_kubernetes_version" {
type = string
description = "Amazon EKS Kubernetes version"
default = "1.27"
}

variable "eks_addon_version" {
type = map(string)
description = "Amazon EKS add-on versions"
default = {
"coredns" = "v1.9.3-eksbuild.10"
"kube_proxy" = "v1.26.9-eksbuild.2"
"vpc_cni" = "v1.15.3-eksbuild.1"
"ebs_csi_driver" = "v1.25.0-eksbuild.1"
validation {
condition = contains(["1.27", "1.26", "1.25"], var.eks_kubernetes_version)
error_message = "Invalid EKS Kubernetes version. Supported versions are '1.27', '1.26', '1.25'."
}
}

Expand All @@ -79,23 +72,7 @@ variable "instance_capacity_type" {

validation {
condition = var.instance_capacity_type == "ON_DEMAND" || var.instance_capacity_type == "SPOT"
error_message = "Invalid instance capacity type. Allowed values are 'ON_DEMAND' or 'SPOT'."
}
}

variable "create_dmz_node_group" {
type = bool
description = "Whether or not to create a DMZ node group with taints"
default = false
}

variable "dmz_node_size" {
type = map(number)
description = "Node size of the DMZ node group"
default = {
"desired" = 1
"min" = 0
"max" = 2
error_message = "Invalid instance capacity type. Allowed values are 'ON_DEMAND', 'SPOT'."
}
}

Expand All @@ -115,6 +92,21 @@ variable "eks_admins_group_users" {
default = []
}

variable "create_dmz_node_group" {
type = bool
description = "Whether or not to create a DMZ node group with taints"
default = false
}

variable "dmz_node_size" {
type = map(number)
description = "Node size of the DMZ node group"
default = {
"desired" = 1
"min" = 0
"max" = 2
}
}

variable "defaut_storage_class" {
type = string
Expand Down
102 changes: 51 additions & 51 deletions config/karpenter.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,54 @@ module "karpenter" {
tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-karpenter" }), var.common_tags)
}

locals {
common_settings = [
{
name = "settings.aws.clusterName"
value = data.aws_eks_cluster.main.id
},
{
name = "settings.aws.clusterEndpoint"
value = data.aws_eks_cluster.main.endpoint
},
{
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = module.karpenter[0].irsa_arn
},
{
name = "settings.aws.defaultInstanceProfile"
value = module.karpenter[0].instance_profile_name
},
{
name = "settings.aws.interruptionQueueName"
value = module.karpenter[0].queue_name
},
{
name = "replicas"
value = 2
},
]

tolerations_settings = [
{
name = "tolerations[0].key"
value = "dmz-pod"
},
{
name = "tolerations[0].value"
value = "yes"
},
{
name = "tolerations[0].operator"
value = "Equal"
},
{
name = "tolerations[0].effect"
value = "NoExecute"
},
]
}

resource "helm_release" "karpenter" {
count = var.enable_karpenter ? 1 : 0

Expand All @@ -26,58 +74,10 @@ resource "helm_release" "karpenter" {
chart = "karpenter"
version = var.karpenter_version



dynamic "set" {
for_each = var.create_dmz_node_group ? [
{
name = "settings.aws.clusterName"
value = data.aws_eks_cluster.main.id
}, {
name = "settings.aws.clusterEndpoint"
value = data.aws_eks_cluster.main.endpoint
}, {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = module.karpenter[0].irsa_arn
}, {
name = "settings.aws.defaultInstanceProfile"
value = module.karpenter[0].instance_profile_name
}, {
name = "settings.aws.interruptionQueueName"
value = module.karpenter[0].queue_name
}, {
name = "replicas"
value = 1 # The initial value should match the "desired" node size defined in cluster/variables.tf
}, {
name = "tolerations[0].key"
value = "dmz-pod"
}, {
name = "tolerations[0].value"
value = "yes"
}, {
name = "tolerations[0].operator"
value = "Equal"
}, {
name = "tolerations[0].effect"
value = "NoExecute"
},
] : [{
name = "settings.aws.clusterName"
value = data.aws_eks_cluster.main.id
}, {
name = "settings.aws.clusterEndpoint"
value = data.aws_eks_cluster.main.endpoint
}, {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = module.karpenter[0].irsa_arn
}, {
name = "settings.aws.defaultInstanceProfile"
value = module.karpenter[0].instance_profile_name
}, {
name = "settings.aws.interruptionQueueName"
value = module.karpenter[0].queue_name
}, {
name = "replicas"
value = 1 # The initial value should match the "desired" node size defined in cluster/variables.tf
}]
for_each = var.create_dmz_node_group ? concat(local.common_settings, local.tolerations_settings) : local.common_settings

content {
name = set.value.name
Expand Down
2 changes: 1 addition & 1 deletion config/terraform.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ AWS_REGION = "eu-west-2"
environment = "dev"
domain_name = "change-me-radar-base-dummy-domain.net"
create_dmz_node_group = false
enable_karpenter = true
enable_karpenter = false
enable_msk = false
enable_rds = false
enable_route53 = false
Expand Down

0 comments on commit 340b009

Please sign in to comment.