From e015513f3ec093c91c737ea41acafc60b59f0b08 Mon Sep 17 00:00:00 2001
From: yehiel etah Support for Kuala Lumpur region ( EKS Add-ons support receiving IAM permissions via EKS Pod Identity Associations. Support for AMIs based on AmazonLinux2023 Configuring cluster access management via AWS EKS Access Entries."1.23"
, "1.24"
, "1.25"
, "1.26"
, "1.27"
, "1.28"
, "1.29"
, "1.30"
(default).",
+ "description": "Valid variants are: `\"1.23\"`, `\"1.24\"`, `\"1.25\"`, `\"1.26\"`, `\"1.27\"`, `\"1.28\"`, `\"1.29\"`, `\"1.30\"` (default), `\"1.31\"`.",
+ "x-intellij-html-description": "Valid variants are: "1.23"
, "1.24"
, "1.25"
, "1.26"
, "1.27"
, "1.28"
, "1.29"
, "1.30"
(default), "1.31"
.",
"default": "1.30",
"enum": [
"1.23",
@@ -780,7 +787,8 @@
"1.27",
"1.28",
"1.29",
- "1.30"
+ "1.30",
+ "1.31"
]
}
},
diff --git a/pkg/apis/eksctl.io/v1alpha5/defaults.go b/pkg/apis/eksctl.io/v1alpha5/defaults.go
index 6bb309927b..88fe169066 100644
--- a/pkg/apis/eksctl.io/v1alpha5/defaults.go
+++ b/pkg/apis/eksctl.io/v1alpha5/defaults.go
@@ -79,7 +79,7 @@ func SetClusterConfigDefaults(cfg *ClusterConfig) {
// IAM SAs that need to be explicitly deleted.
func IAMServiceAccountsWithImplicitServiceAccounts(cfg *ClusterConfig) []*ClusterIAMServiceAccount {
serviceAccounts := cfg.IAM.ServiceAccounts
- if IsEnabled(cfg.IAM.WithOIDC) && !vpcCNIAddonSpecified(cfg) {
+ if IsEnabled(cfg.IAM.WithOIDC) && !vpcCNIAddonSpecified(cfg) && !cfg.AddonsConfig.DisableDefaultAddons {
var found bool
for _, sa := range cfg.IAM.ServiceAccounts {
found = found || (sa.Name == AWSNodeMeta.Name && sa.Namespace == AWSNodeMeta.Namespace)
@@ -134,7 +134,8 @@ func SetManagedNodeGroupDefaults(ng *ManagedNodeGroup, meta *ClusterMeta, contro
// When using custom AMIs, we want the user to explicitly specify AMI family.
// Thus, we only set up default AMI family when no custom AMI is being used.
if ng.AMIFamily == "" && ng.AMI == "" {
- if isMinVer, _ := utils.IsMinVersion(Version1_30, meta.Version); isMinVer && !instanceutils.IsGPUInstanceType(ng.InstanceType) &&
+
+ if isMinVer, _ := utils.IsMinVersion(Version1_30, meta.Version); isMinVer &&
!instanceutils.IsARMGPUInstanceType(ng.InstanceType) {
ng.AMIFamily = NodeImageFamilyAmazonLinux2023
} else {
diff --git a/pkg/apis/eksctl.io/v1alpha5/defaults_test.go b/pkg/apis/eksctl.io/v1alpha5/defaults_test.go
index a0d470275e..e47147e0dc 100644
--- a/pkg/apis/eksctl.io/v1alpha5/defaults_test.go
+++ b/pkg/apis/eksctl.io/v1alpha5/defaults_test.go
@@ -392,6 +392,7 @@ var _ = Describe("ClusterConfig validation", func() {
}, false)
Expect(mng.AMIFamily).To(Equal(expectedAMIFamily))
},
+ Entry("EKS 1.31 uses AL2023", "1.31", NodeImageFamilyAmazonLinux2023),
Entry("EKS 1.30 uses AL2023", "1.30", NodeImageFamilyAmazonLinux2023),
Entry("EKS 1.29 uses AL2", "1.29", NodeImageFamilyAmazonLinux2),
Entry("EKS 1.28 uses AL2", "1.28", NodeImageFamilyAmazonLinux2),
diff --git a/pkg/apis/eksctl.io/v1alpha5/gpu_validation_test.go b/pkg/apis/eksctl.io/v1alpha5/gpu_validation_test.go
index 33796d387a..404105932b 100644
--- a/pkg/apis/eksctl.io/v1alpha5/gpu_validation_test.go
+++ b/pkg/apis/eksctl.io/v1alpha5/gpu_validation_test.go
@@ -40,22 +40,16 @@ var _ = Describe("GPU instance support", func() {
assertValidationError(e, api.ValidateManagedNodeGroup(0, mng))
},
Entry("AL2023 INF", gpuInstanceEntry{
- amiFamily: api.NodeImageFamilyAmazonLinux2023,
- gpuInstanceType: "inf1.xlarge",
- expectUnsupportedErr: true,
- instanceTypeName: "Inferentia",
+ amiFamily: api.NodeImageFamilyAmazonLinux2023,
+ gpuInstanceType: "inf1.xlarge",
}),
Entry("AL2023 TRN", gpuInstanceEntry{
- amiFamily: api.NodeImageFamilyAmazonLinux2023,
- gpuInstanceType: "trn1.2xlarge",
- expectUnsupportedErr: true,
- instanceTypeName: "Trainium",
+ amiFamily: api.NodeImageFamilyAmazonLinux2023,
+ gpuInstanceType: "trn1.2xlarge",
}),
Entry("AL2023 NVIDIA", gpuInstanceEntry{
- amiFamily: api.NodeImageFamilyAmazonLinux2023,
- gpuInstanceType: "g4dn.xlarge",
- expectUnsupportedErr: true,
- instanceTypeName: "GPU",
+ amiFamily: api.NodeImageFamilyAmazonLinux2023,
+ gpuInstanceType: "g4dn.xlarge",
}),
Entry("AL2", gpuInstanceEntry{
gpuInstanceType: "asdf",
@@ -107,22 +101,16 @@ var _ = Describe("GPU instance support", func() {
},
Entry("AL2023 INF", gpuInstanceEntry{
- amiFamily: api.NodeImageFamilyAmazonLinux2023,
- gpuInstanceType: "inf1.xlarge",
- expectUnsupportedErr: true,
- instanceTypeName: "Inferentia",
+ amiFamily: api.NodeImageFamilyAmazonLinux2023,
+ gpuInstanceType: "inf1.xlarge",
}),
Entry("AL2023 TRN", gpuInstanceEntry{
- amiFamily: api.NodeImageFamilyAmazonLinux2023,
- gpuInstanceType: "trn1.2xlarge",
- expectUnsupportedErr: true,
- instanceTypeName: "Trainium",
+ amiFamily: api.NodeImageFamilyAmazonLinux2023,
+ gpuInstanceType: "trn1.2xlarge",
}),
Entry("AL2023 NVIDIA", gpuInstanceEntry{
- amiFamily: api.NodeImageFamilyAmazonLinux2023,
- gpuInstanceType: "g4dn.xlarge",
- expectUnsupportedErr: true,
- instanceTypeName: "GPU",
+ amiFamily: api.NodeImageFamilyAmazonLinux2023,
+ gpuInstanceType: "g4dn.xlarge",
}),
Entry("AL2", gpuInstanceEntry{
gpuInstanceType: "g4dn.xlarge",
diff --git a/pkg/apis/eksctl.io/v1alpha5/iam.go b/pkg/apis/eksctl.io/v1alpha5/iam.go
index 85e93d6a38..1a446dd5e5 100644
--- a/pkg/apis/eksctl.io/v1alpha5/iam.go
+++ b/pkg/apis/eksctl.io/v1alpha5/iam.go
@@ -52,9 +52,8 @@ type ClusterIAM struct {
// See [IAM Service Accounts](/usage/iamserviceaccounts/#usage-with-config-files)
// +optional
ServiceAccounts []*ClusterIAMServiceAccount `json:"serviceAccounts,omitempty"`
-
// pod identity associations to create in the cluster.
- // See [Pod Identity Associations](TBD)
+ // See [Pod Identity Associations](/usage/pod-identity-associations)
// +optional
PodIdentityAssociations []PodIdentityAssociation `json:"podIdentityAssociations,omitempty"`
diff --git a/pkg/apis/eksctl.io/v1alpha5/types.go b/pkg/apis/eksctl.io/v1alpha5/types.go
index 94b67afd7d..c9ceb24a7f 100644
--- a/pkg/apis/eksctl.io/v1alpha5/types.go
+++ b/pkg/apis/eksctl.io/v1alpha5/types.go
@@ -43,13 +43,14 @@ const (
Version1_29 = "1.29"
- // Version1_30 represents Kubernetes version 1.30.x.
Version1_30 = "1.30"
+ Version1_31 = "1.31"
+
// DefaultVersion (default)
DefaultVersion = Version1_30
- LatestVersion = Version1_30
+ LatestVersion = Version1_31
DockershimDeprecationVersion = Version1_24
)
@@ -98,8 +99,8 @@ const (
// Not yet supported versions
const (
- // Version1_31 represents Kubernetes version 1.31.x
- Version1_31 = "1.31"
+ // Version1_32 represents Kubernetes version 1.32.x
+ Version1_32 = "1.32"
)
const (
@@ -172,6 +173,9 @@ const (
// RegionAPSouthEast4 represents the Asia-Pacific South East Region Melbourne
RegionAPSouthEast4 = "ap-southeast-4"
+ // RegionAPSouthEast5 represents the Asia-Pacific South East Region Kuala Lumpur
+ RegionAPSouthEast5 = "ap-southeast-5"
+
// RegionAPSouth1 represents the Asia-Pacific South Region Mumbai
RegionAPSouth1 = "ap-south-1"
@@ -393,6 +397,10 @@ const (
// eksResourceAccountAPSouthEast4 defines the AWS EKS account ID that provides node resources in ap-southeast-4
eksResourceAccountAPSouthEast4 = "491585149902"
+
+ // eksResourceAccountAPSouthEast5 defines the AWS EKS account ID that provides node resources in ap-southeast-5
+ eksResourceAccountAPSouthEast5 = "151610086707"
+
// eksResourceAccountUSISOEast1 defines the AWS EKS account ID that provides node resources in us-iso-east-1
eksResourceAccountUSISOEast1 = "725322719131"
@@ -443,17 +451,6 @@ const (
IPV6Family = "IPv6"
)
-// Values for core addons
-const (
- minimumVPCCNIVersionForIPv6 = "1.10.0"
- VPCCNIAddon = "vpc-cni"
- KubeProxyAddon = "kube-proxy"
- CoreDNSAddon = "coredns"
- PodIdentityAgentAddon = "eks-pod-identity-agent"
- AWSEBSCSIDriverAddon = "aws-ebs-csi-driver"
- AWSEFSCSIDriverAddon = "aws-efs-csi-driver"
-)
-
// supported version of Karpenter
const (
supportedKarpenterVersion = "v0.20.0"
@@ -550,6 +547,7 @@ func SupportedRegions() []string {
RegionAPSouthEast2,
RegionAPSouthEast3,
RegionAPSouthEast4,
+ RegionAPSouthEast5,
RegionAPSouth1,
RegionAPSouth2,
RegionAPEast1,
@@ -610,6 +608,7 @@ func SupportedVersions() []string {
Version1_28,
Version1_29,
Version1_30,
+ Version1_31,
}
}
@@ -696,6 +695,8 @@ func EKSResourceAccountID(region string) string {
return eksResourceAccountAPSouthEast3
case RegionAPSouthEast4:
return eksResourceAccountAPSouthEast4
+ case RegionAPSouthEast5:
+ return eksResourceAccountAPSouthEast5
case RegionILCentral1:
return eksResourceAccountILCentral1
case RegionUSISOEast1:
@@ -995,6 +996,9 @@ type ClusterConfig struct {
// Spot Ocean.
// +optional
SpotOcean *SpotOceanCluster `json:"spotOcean,omitempty"`
+
+ // ZonalShiftConfig specifies the zonal shift configuration.
+ ZonalShiftConfig *ZonalShiftConfig `json:"zonalShiftConfig,omitempty"`
}
// Outpost holds the Outpost configuration.
@@ -1022,6 +1026,12 @@ func (o *Outpost) HasPlacementGroup() bool {
return o.ControlPlanePlacement != nil
}
+// ZonalShiftConfig holds the zonal shift configuration.
+type ZonalShiftConfig struct {
+ // Enabled enables or disables zonal shift.
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
// OutpostInfo describes the Outpost info.
type OutpostInfo interface {
// IsControlPlaneOnOutposts returns true if the control plane is on Outposts.
@@ -1692,6 +1702,7 @@ type (
Headroom *SpotOceanHeadroom `json:"headrooms,omitempty"`
// +optional
ResourceLimits *SpotOceanClusterResourceLimits `json:"resourceLimits,omitempty"`
+ Down *AutoScalerDown `json:"down,omitempty"`
}
// SpotOceanVirtualNodeGroupAutoScaler holds the auto scaler configuration used by Spot Ocean.
@@ -1722,6 +1733,16 @@ type (
MaxMemoryGiB *int `json:"maxMemoryGib,omitempty"`
}
+ AutoScalerDown struct {
+ EvaluationPeriods *int `json:"evaluationPeriods,omitempty"`
+ MaxScaleDownPercentage *float64 `json:"maxScaleDownPercentage,omitempty"`
+ AggressiveScaleDown *AggressiveScaleDown `json:"aggressiveScaleDown,omitempty"`
+ }
+
+ AggressiveScaleDown struct {
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+ }
+
// SpotOceanVirtualNodeGroupResourceLimits holds the resource limits configuration used by Spot Ocean.
SpotOceanVirtualNodeGroupResourceLimits struct {
// +optional
diff --git a/pkg/apis/eksctl.io/v1alpha5/validation.go b/pkg/apis/eksctl.io/v1alpha5/validation.go
index 1ac66eab99..81b03b4d50 100644
--- a/pkg/apis/eksctl.io/v1alpha5/validation.go
+++ b/pkg/apis/eksctl.io/v1alpha5/validation.go
@@ -661,12 +661,11 @@ func validateNodeGroupBase(np NodePool, path string, controlPlaneOnOutposts bool
instanceType := SelectInstanceType(np)
- if ng.AMIFamily == NodeImageFamilyAmazonLinux2023 && instanceutils.IsNvidiaInstanceType(instanceType) {
- return ErrUnsupportedInstanceTypes("GPU", NodeImageFamilyAmazonLinux2023,
- fmt.Sprintf("EKS accelerated AMIs based on %s will be available at a later date", NodeImageFamilyAmazonLinux2023))
- }
+ if ng.AMIFamily != NodeImageFamilyAmazonLinux2023 &&
+ ng.AMIFamily != NodeImageFamilyAmazonLinux2 &&
+ ng.AMIFamily != NodeImageFamilyBottlerocket &&
+ ng.AMIFamily != "" {
- if ng.AMIFamily != NodeImageFamilyAmazonLinux2 && ng.AMIFamily != NodeImageFamilyBottlerocket && ng.AMIFamily != "" {
if instanceutils.IsNvidiaInstanceType(instanceType) {
logger.Warning(GPUDriversWarning(ng.AMIFamily))
}
@@ -676,17 +675,35 @@ func validateNodeGroupBase(np NodePool, path string, controlPlaneOnOutposts bool
}
}
- if ng.AMIFamily != NodeImageFamilyAmazonLinux2 && ng.AMIFamily != "" {
- // Only AL2 supports Inferentia hosts.
+ if ng.AMIFamily != NodeImageFamilyAmazonLinux2 &&
+ ng.AMIFamily != NodeImageFamilyAmazonLinux2023 &&
+ ng.AMIFamily != "" {
+ // Only AL2 and AL2023 support Inferentia hosts.
if instanceutils.IsInferentiaInstanceType(instanceType) {
return ErrUnsupportedInstanceTypes("Inferentia", ng.AMIFamily, fmt.Sprintf("please use %s instead", NodeImageFamilyAmazonLinux2))
}
- // Only AL2 supports Trainium hosts.
+ // Only AL2 and AL2023 support Trainium hosts.
if instanceutils.IsTrainiumInstanceType(instanceType) {
return ErrUnsupportedInstanceTypes("Trainium", ng.AMIFamily, fmt.Sprintf("please use %s instead", NodeImageFamilyAmazonLinux2))
}
}
+ if ng.AMIFamily == NodeImageFamilyAmazonLinux2023 {
+ fieldNotSupported := func(field string) error {
+ return &unsupportedFieldError{
+ ng: ng,
+ path: path,
+ field: field,
+ }
+ }
+ if ng.PreBootstrapCommands != nil {
+ return fieldNotSupported("preBootstrapCommands")
+ }
+ if ng.OverrideBootstrapCommand != nil {
+ return fieldNotSupported("overrideBootstrapCommand")
+ }
+ }
+
if ng.CapacityReservation != nil {
if ng.CapacityReservation.CapacityReservationPreference != nil {
if ng.CapacityReservation.CapacityReservationTarget != nil {
@@ -871,13 +888,6 @@ func ValidateNodeGroup(i int, ng *NodeGroup, cfg *ClusterConfig) error {
if ng.KubeletExtraConfig != nil {
return fieldNotSupported("kubeletExtraConfig")
}
- } else if ng.AMIFamily == NodeImageFamilyAmazonLinux2023 {
- if ng.PreBootstrapCommands != nil {
- return fieldNotSupported("preBootstrapCommands")
- }
- if ng.OverrideBootstrapCommand != nil {
- return fieldNotSupported("overrideBootstrapCommand")
- }
} else if ng.AMIFamily == NodeImageFamilyBottlerocket {
if ng.KubeletExtraConfig != nil {
return fieldNotSupported("kubeletExtraConfig")
diff --git a/pkg/apis/eksctl.io/v1alpha5/validation_test.go b/pkg/apis/eksctl.io/v1alpha5/validation_test.go
index a34a1880d2..59ab318a51 100644
--- a/pkg/apis/eksctl.io/v1alpha5/validation_test.go
+++ b/pkg/apis/eksctl.io/v1alpha5/validation_test.go
@@ -171,14 +171,6 @@ var _ = Describe("ClusterConfig validation", func() {
errMsg := fmt.Sprintf("overrideBootstrapCommand is required when using a custom AMI based on %s", ng0.AMIFamily)
Expect(api.ValidateNodeGroup(0, ng0, cfg)).To(MatchError(ContainSubstring(errMsg)))
})
- It("should not require overrideBootstrapCommand if ami is set and type is AmazonLinux2023", func() {
- cfg := api.NewClusterConfig()
- ng0 := cfg.NewNodeGroup()
- ng0.Name = "node-group"
- ng0.AMI = "ami-1234"
- ng0.AMIFamily = api.NodeImageFamilyAmazonLinux2023
- Expect(api.ValidateNodeGroup(0, ng0, cfg)).To(Succeed())
- })
It("should not require overrideBootstrapCommand if ami is set and type is Bottlerocket", func() {
cfg := api.NewClusterConfig()
ng0 := cfg.NewNodeGroup()
@@ -204,15 +196,6 @@ var _ = Describe("ClusterConfig validation", func() {
ng0.OverrideBootstrapCommand = aws.String("echo 'yo'")
Expect(api.ValidateNodeGroup(0, ng0, cfg)).To(Succeed())
})
- It("should throw an error if overrideBootstrapCommand is set and type is AmazonLinux2023", func() {
- cfg := api.NewClusterConfig()
- ng0 := cfg.NewNodeGroup()
- ng0.Name = "node-group"
- ng0.AMI = "ami-1234"
- ng0.AMIFamily = api.NodeImageFamilyAmazonLinux2023
- ng0.OverrideBootstrapCommand = aws.String("echo 'yo'")
- Expect(api.ValidateNodeGroup(0, ng0, cfg)).To(MatchError(ContainSubstring(fmt.Sprintf("overrideBootstrapCommand is not supported for %s nodegroups", api.NodeImageFamilyAmazonLinux2023))))
- })
It("should throw an error if overrideBootstrapCommand is set and type is Bottlerocket", func() {
cfg := api.NewClusterConfig()
ng0 := cfg.NewNodeGroup()
@@ -2104,7 +2087,40 @@ var _ = Describe("ClusterConfig validation", func() {
err := api.ValidateManagedNodeGroup(0, ng)
Expect(err).To(MatchError(ContainSubstring("eksctl does not support configuring maxPodsPerNode EKS-managed nodes")))
})
- })
+ It("returns an error when setting preBootstrapCommands for self-managed nodegroups", func() {
+ cfg := api.NewClusterConfig()
+ ng := cfg.NewNodeGroup()
+ ng.Name = "node-group"
+ ng.AMI = "ami-1234"
+ ng.AMIFamily = api.NodeImageFamilyAmazonLinux2023
+ ng.PreBootstrapCommands = []string{"echo 'rubarb'"}
+ Expect(api.ValidateNodeGroup(0, ng, cfg)).To(MatchError(ContainSubstring(fmt.Sprintf("preBootstrapCommands is not supported for %s nodegroups", api.NodeImageFamilyAmazonLinux2023))))
+ })
+ It("returns an error when setting overrideBootstrapCommand for self-managed nodegroups", func() {
+ cfg := api.NewClusterConfig()
+ ng := cfg.NewNodeGroup()
+ ng.Name = "node-group"
+ ng.AMI = "ami-1234"
+ ng.AMIFamily = api.NodeImageFamilyAmazonLinux2023
+ ng.OverrideBootstrapCommand = aws.String("echo 'rubarb'")
+ Expect(api.ValidateNodeGroup(0, ng, cfg)).To(MatchError(ContainSubstring(fmt.Sprintf("overrideBootstrapCommand is not supported for %s nodegroups", api.NodeImageFamilyAmazonLinux2023))))
+ })
+ It("returns an error when setting preBootstrapCommands for EKS-managed nodegroups", func() {
+ ng := api.NewManagedNodeGroup()
+ ng.Name = "node-group"
+ ng.AMI = "ami-1234"
+ ng.AMIFamily = api.NodeImageFamilyAmazonLinux2023
+ ng.PreBootstrapCommands = []string{"echo 'rubarb'"}
+ Expect(api.ValidateManagedNodeGroup(0, ng)).To(MatchError(ContainSubstring(fmt.Sprintf("preBootstrapCommands is not supported for %s nodegroups", api.NodeImageFamilyAmazonLinux2023))))
+ })
+ It("returns an error when setting overrideBootstrapCommand for EKS-managed nodegroups", func() {
+ ng := api.NewManagedNodeGroup()
+ ng.Name = "node-group"
+ ng.AMI = "ami-1234"
+ ng.AMIFamily = api.NodeImageFamilyAmazonLinux2023
+ ng.OverrideBootstrapCommand = aws.String("echo 'rubarb'")
+ Expect(api.ValidateManagedNodeGroup(0, ng)).To(MatchError(ContainSubstring(fmt.Sprintf("overrideBootstrapCommand is not supported for %s nodegroups", api.NodeImageFamilyAmazonLinux2023))))
+ })
Describe("Windows node groups", func() {
It("returns an error with unsupported fields", func() {
diff --git a/pkg/az/az.go b/pkg/az/az.go
index d2eab86619..c9bc5a8f83 100644
--- a/pkg/az/az.go
+++ b/pkg/az/az.go
@@ -19,7 +19,10 @@ import (
)
var zoneIDsToAvoid = map[string][]string{
- api.RegionCNNorth1: {"cnn1-az4"}, // https://github.com/eksctl-io/eksctl/issues/3916
+ api.RegionCNNorth1: {"cnn1-az4"}, // https://github.com/eksctl-io/eksctl/issues/3916
+ api.RegionUSEast1: {"use1-az3"},
+ api.RegionUSWest1: {"usw1-az2"},
+ api.RegionCACentral1: {"cac1-az3"},
}
func GetAvailabilityZones(ctx context.Context, ec2API awsapi.EC2, region string, spec *api.ClusterConfig) ([]string, error) {
diff --git a/pkg/az/az_test.go b/pkg/az/az_test.go
index 919be08598..d6dbebaae0 100644
--- a/pkg/az/az_test.go
+++ b/pkg/az/az_test.go
@@ -215,7 +215,7 @@ var _ = Describe("AZ", func() {
},
LocationType: ec2types.LocationTypeAvailabilityZone,
MaxResults: aws.Int32(100),
- }).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
NextToken: aws.String("token"),
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
@@ -249,7 +249,7 @@ var _ = Describe("AZ", func() {
LocationType: ec2types.LocationTypeAvailabilityZone,
MaxResults: aws.Int32(100),
NextToken: aws.String("token"),
- }).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
InstanceType: "t2.medium",
@@ -304,6 +304,128 @@ var _ = Describe("AZ", func() {
})
})
+ type unsupportedZoneEntry struct {
+ region string
+ zoneNameToIDs map[string]string
+ expectedZones []string
+ }
+ DescribeTable("region with unsupported zone IDs", func(e unsupportedZoneEntry) {
+ var azs []ec2types.AvailabilityZone
+ for zoneName, zoneID := range e.zoneNameToIDs {
+ azs = append(azs, createAvailabilityZoneWithID(e.region, ec2types.AvailabilityZoneStateAvailable, zoneName, zoneID))
+ }
+ mockProvider := mockprovider.NewMockProvider()
+ mockProvider.MockEC2().On("DescribeAvailabilityZones", mock.Anything, &ec2.DescribeAvailabilityZonesInput{
+ Filters: []ec2types.Filter{
+ {
+ Name: aws.String("region-name"),
+ Values: []string{e.region},
+ },
+ {
+ Name: aws.String("state"),
+ Values: []string{string(ec2types.AvailabilityZoneStateAvailable)},
+ },
+ {
+ Name: aws.String("zone-type"),
+ Values: []string{string(ec2types.LocationTypeAvailabilityZone)},
+ },
+ },
+ }).Return(&ec2.DescribeAvailabilityZonesOutput{
+ AvailabilityZones: azs,
+ }, nil)
+ mockProvider.MockEC2().On("DescribeInstanceTypeOfferings", mock.Anything, &ec2.DescribeInstanceTypeOfferingsInput{
+ Filters: []ec2types.Filter{
+ {
+ Name: aws.String("instance-type"),
+ Values: []string{"t2.small", "t2.medium"},
+ },
+ {
+ Name: aws.String("location"),
+ Values: []string{"zone1", "zone2", "zone3", "zone4"},
+ },
+ },
+ LocationType: ec2types.LocationTypeAvailabilityZone,
+ MaxResults: aws.Int32(100),
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
+ NextToken: aws.String("token"),
+ InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
+ {
+ InstanceType: "t2.small",
+ Location: aws.String("zone1"),
+ LocationType: "availability-zone",
+ },
+ {
+ InstanceType: "t2.small",
+ Location: aws.String("zone2"),
+ LocationType: "availability-zone",
+ },
+ {
+ InstanceType: "t2.small",
+ Location: aws.String("zone4"),
+ LocationType: "availability-zone",
+ },
+ {
+ InstanceType: "t2.small",
+ Location: aws.String("zone3"),
+ LocationType: "availability-zone",
+ },
+ },
+ }, nil)
+ clusterConfig := api.NewClusterConfig()
+ clusterConfig.Metadata.Region = e.region
+ clusterConfig.NodeGroups = []*api.NodeGroup{
+ {
+ NodeGroupBase: &api.NodeGroupBase{
+ Name: "test-az-1",
+ },
+ },
+ {
+ NodeGroupBase: &api.NodeGroupBase{
+ Name: "test-az-2",
+ },
+ },
+ }
+ zones, err := az.GetAvailabilityZones(context.Background(), mockProvider.MockEC2(), e.region, clusterConfig)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(zones).To(ConsistOf(e.expectedZones))
+ },
+ Entry(api.RegionCNNorth1, unsupportedZoneEntry{
+ region: api.RegionCNNorth1,
+ zoneNameToIDs: map[string]string{
+ "zone1": "cnn1-az1",
+ "zone2": "cnn1-az2",
+ "zone4": "cnn1-az4",
+ },
+ expectedZones: []string{"zone1", "zone2"},
+ }),
+ Entry(api.RegionUSEast1, unsupportedZoneEntry{
+ region: api.RegionUSEast1,
+ zoneNameToIDs: map[string]string{
+ "zone1": "use1-az1",
+ "zone2": "use1-az3",
+ "zone3": "use1-az2",
+ },
+ expectedZones: []string{"zone1", "zone3"},
+ }),
+ Entry(api.RegionUSWest1, unsupportedZoneEntry{
+ region: api.RegionUSWest1,
+ zoneNameToIDs: map[string]string{
+ "zone1": "usw1-az2",
+ "zone2": "usw1-az1",
+ "zone3": "usw1-az3",
+ },
+ expectedZones: []string{"zone2", "zone3"},
+ }),
+ Entry(api.RegionCACentral1, unsupportedZoneEntry{
+ region: api.RegionCACentral1,
+ zoneNameToIDs: map[string]string{
+ "zone1": "cac1-az1",
+ "zone2": "cac1-az2",
+ "zone3": "cac1-az3",
+ },
+ expectedZones: []string{"zone1", "zone2"},
+ }),
+ )
When("the region contains zones that are denylisted", func() {
BeforeEach(func() {
region = api.RegionCNNorth1
diff --git a/pkg/cfn/builder/cluster.go b/pkg/cfn/builder/cluster.go
index 0eac1ba87c..54cd0c5007 100644
--- a/pkg/cfn/builder/cluster.go
+++ b/pkg/cfn/builder/cluster.go
@@ -296,11 +296,12 @@ func (c *ClusterResourceSet) addResourcesForControlPlane(subnetDetails *SubnetDe
}
cluster := gfneks.Cluster{
- EncryptionConfig: encryptionConfigs,
- Logging: makeClusterLogging(c.spec),
- Name: gfnt.NewString(c.spec.Metadata.Name),
- ResourcesVpcConfig: clusterVPC,
- RoleArn: serviceRoleARN,
+ EncryptionConfig: encryptionConfigs,
+ Logging: makeClusterLogging(c.spec),
+ Name: gfnt.NewString(c.spec.Metadata.Name),
+ ResourcesVpcConfig: clusterVPC,
+ RoleArn: serviceRoleARN,
+ BootstrapSelfManagedAddons: gfnt.NewBoolean(false),
AccessConfig: &gfneks.Cluster_AccessConfig{
AuthenticationMode: gfnt.NewString(string(c.spec.AccessConfig.AuthenticationMode)),
BootstrapClusterCreatorAdminPermissions: gfnt.NewBoolean(!api.IsDisabled(c.spec.AccessConfig.BootstrapClusterCreatorAdminPermissions)),
@@ -334,6 +335,11 @@ func (c *ClusterResourceSet) addResourcesForControlPlane(subnetDetails *SubnetDe
kubernetesNetworkConfig.IpFamily = gfnt.NewString(strings.ToLower(ipFamily))
}
cluster.KubernetesNetworkConfig = kubernetesNetworkConfig
+ if c.spec.ZonalShiftConfig != nil && api.IsEnabled(c.spec.ZonalShiftConfig.Enabled) {
+ cluster.ZonalShiftConfig = &gfneks.Cluster_ZonalShift{
+ Enabled: gfnt.NewBoolean(true),
+ }
+ }
c.newResource("ControlPlane", &cluster)
diff --git a/pkg/cfn/builder/cluster_test.go b/pkg/cfn/builder/cluster_test.go
index f50448a291..102cec8b5f 100644
--- a/pkg/cfn/builder/cluster_test.go
+++ b/pkg/cfn/builder/cluster_test.go
@@ -2,6 +2,7 @@ package builder_test
import (
"context"
+ _ "embed"
"encoding/json"
"reflect"
@@ -20,8 +21,6 @@ import (
"github.com/weaveworks/eksctl/pkg/cfn/builder"
"github.com/weaveworks/eksctl/pkg/cfn/builder/fakes"
"github.com/weaveworks/eksctl/pkg/testutils/mockprovider"
-
- _ "embed"
)
var _ = Describe("Cluster Template Builder", func() {
@@ -669,6 +668,12 @@ var _ = Describe("Cluster Template Builder", func() {
Expect(accessConfig.BootstrapClusterCreatorAdminPermissions).To(BeFalse())
})
})
+
+ Context("bootstrapSelfManagedAddons in default config", func() {
+ It("should disable default addons", func() {
+ Expect(clusterTemplate.Resources["ControlPlane"].Properties.BootstrapSelfManagedAddons).To(BeFalse())
+ })
+ })
})
Describe("GetAllOutputs", func() {
diff --git a/pkg/cfn/builder/fakes/fake_cfn_template.go b/pkg/cfn/builder/fakes/fake_cfn_template.go
index 34de6e9e0e..5e554f6d7e 100644
--- a/pkg/cfn/builder/fakes/fake_cfn_template.go
+++ b/pkg/cfn/builder/fakes/fake_cfn_template.go
@@ -25,6 +25,7 @@ type Properties struct {
Description string
Tags []Tag
SecurityGroupIngress []SGIngress
+ BootstrapSelfManagedAddons bool
GroupID interface{}
SourceSecurityGroupID interface{}
DestinationSecurityGroupID interface{}
diff --git a/pkg/cfn/builder/iam.go b/pkg/cfn/builder/iam.go
index 422802c064..b534edfa46 100644
--- a/pkg/cfn/builder/iam.go
+++ b/pkg/cfn/builder/iam.go
@@ -217,12 +217,6 @@ func NewIAMRoleResourceSetForServiceAccount(spec *api.ClusterIAMServiceAccount,
}
}
-func NewIAMRoleResourceSetForPodIdentityWithTrustStatements(spec *api.PodIdentityAssociation, trustStatements []api.IAMStatement) *IAMRoleResourceSet {
- rs := NewIAMRoleResourceSetForPodIdentity(spec)
- rs.trustStatements = trustStatements
- return rs
-}
-
func NewIAMRoleResourceSetForPodIdentity(spec *api.PodIdentityAssociation) *IAMRoleResourceSet {
return &IAMRoleResourceSet{
template: cft.NewTemplate(),
diff --git a/pkg/cfn/builder/iam_test.go b/pkg/cfn/builder/iam_test.go
index d87303cd80..0d793850e5 100644
--- a/pkg/cfn/builder/iam_test.go
+++ b/pkg/cfn/builder/iam_test.go
@@ -295,6 +295,7 @@ var _ = Describe("template builder for IAM", func() {
}
]`))
Expect(t).To(HaveOutputWithValue(outputs.IAMServiceAccountRoleName, `{ "Fn::GetAtt": "Role1.Arn" }`))
+ Expect(t).To(HaveResourceWithPropertyValue("PolicyAWSLoadBalancerController", "PolicyDocument", expectedAWSLoadBalancerControllerPolicyDocument))
Expect(t).To(HaveResourceWithPropertyValue("PolicyEBSCSIController", "PolicyDocument", expectedEbsPolicyDocument))
})
@@ -467,6 +468,276 @@ const expectedAssumeRolePolicyDocument = `{
"Version": "2012-10-17"
}`
+const expectedAWSLoadBalancerControllerPolicyDocument = `{
+ "Statement": [
+ {
+ "Action": [
+ "iam:CreateServiceLinkedRole"
+ ],
+ "Condition": {
+ "StringEquals": {
+ "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "ec2:DescribeAccountAttributes",
+ "ec2:DescribeAddresses",
+ "ec2:DescribeAvailabilityZones",
+ "ec2:DescribeInternetGateways",
+ "ec2:DescribeVpcs",
+ "ec2:DescribeVpcPeeringConnections",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeSecurityGroups",
+ "ec2:DescribeInstances",
+ "ec2:DescribeNetworkInterfaces",
+ "ec2:DescribeTags",
+ "ec2:GetCoipPoolUsage",
+ "ec2:DescribeCoipPools",
+ "elasticloadbalancing:DescribeLoadBalancers",
+ "elasticloadbalancing:DescribeLoadBalancerAttributes",
+ "elasticloadbalancing:DescribeListeners",
+ "elasticloadbalancing:DescribeListenerAttributes",
+ "elasticloadbalancing:DescribeListenerCertificates",
+ "elasticloadbalancing:DescribeSSLPolicies",
+ "elasticloadbalancing:DescribeRules",
+ "elasticloadbalancing:DescribeTargetGroups",
+ "elasticloadbalancing:DescribeTargetGroupAttributes",
+ "elasticloadbalancing:DescribeTargetHealth",
+ "elasticloadbalancing:DescribeTags"
+ ],
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "cognito-idp:DescribeUserPoolClient",
+ "acm:ListCertificates",
+ "acm:DescribeCertificate",
+ "iam:ListServerCertificates",
+ "iam:GetServerCertificate",
+ "waf-regional:GetWebACL",
+ "waf-regional:GetWebACLForResource",
+ "waf-regional:AssociateWebACL",
+ "waf-regional:DisassociateWebACL",
+ "wafv2:GetWebACL",
+ "wafv2:GetWebACLForResource",
+ "wafv2:AssociateWebACL",
+ "wafv2:DisassociateWebACL",
+ "shield:GetSubscriptionState",
+ "shield:DescribeProtection",
+ "shield:CreateProtection",
+ "shield:DeleteProtection"
+ ],
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "ec2:AuthorizeSecurityGroupIngress",
+ "ec2:RevokeSecurityGroupIngress"
+ ],
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "ec2:CreateSecurityGroup"
+ ],
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "ec2:CreateTags"
+ ],
+ "Condition": {
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "false"
+ },
+ "StringEquals": {
+ "ec2:CreateAction": "CreateSecurityGroup"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": {
+ "Fn::Sub": "arn:${AWS::Partition}:ec2:*:*:security-group/*"
+ }
+ },
+ {
+ "Action": [
+ "ec2:CreateTags",
+ "ec2:DeleteTags"
+ ],
+ "Condition": {
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "true",
+ "aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": {
+ "Fn::Sub": "arn:${AWS::Partition}:ec2:*:*:security-group/*"
+ }
+ },
+ {
+ "Action": [
+ "ec2:AuthorizeSecurityGroupIngress",
+ "ec2:RevokeSecurityGroupIngress",
+ "ec2:DeleteSecurityGroup"
+ ],
+ "Condition": {
+ "Null": {
+ "aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "elasticloadbalancing:CreateLoadBalancer",
+ "elasticloadbalancing:CreateTargetGroup"
+ ],
+ "Condition": {
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "false"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "elasticloadbalancing:CreateListener",
+ "elasticloadbalancing:DeleteListener",
+ "elasticloadbalancing:CreateRule",
+ "elasticloadbalancing:DeleteRule"
+ ],
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:RemoveTags"
+ ],
+ "Condition": {
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "true",
+ "aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": [
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:targetgroup/*/*"
+ },
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:loadbalancer/net/*/*"
+ },
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:loadbalancer/app/*/*"
+ }
+ ]
+ },
+ {
+ "Action": [
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:RemoveTags"
+ ],
+ "Effect": "Allow",
+ "Resource": [
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:listener/net/*/*/*"
+ },
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:listener/app/*/*/*"
+ },
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:listener-rule/net/*/*/*"
+ },
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:listener-rule/app/*/*/*"
+ }
+ ]
+ },
+ {
+ "Action": [
+ "elasticloadbalancing:ModifyListenerAttributes",
+ "elasticloadbalancing:ModifyLoadBalancerAttributes",
+ "elasticloadbalancing:SetIpAddressType",
+ "elasticloadbalancing:SetSecurityGroups",
+ "elasticloadbalancing:SetSubnets",
+ "elasticloadbalancing:DeleteLoadBalancer",
+ "elasticloadbalancing:ModifyTargetGroup",
+ "elasticloadbalancing:ModifyTargetGroupAttributes",
+ "elasticloadbalancing:DeleteTargetGroup"
+ ],
+ "Condition": {
+ "Null": {
+ "aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": "*"
+ },
+ {
+ "Action": [
+ "elasticloadbalancing:AddTags"
+ ],
+ "Condition": {
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "false"
+ },
+ "StringEquals": {
+ "elasticloadbalancing:CreateAction": [
+ "CreateTargetGroup",
+ "CreateLoadBalancer"
+ ]
+ }
+ },
+ "Effect": "Allow",
+ "Resource": [
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:targetgroup/*/*"
+ },
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:loadbalancer/net/*/*"
+ },
+ {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:loadbalancer/app/*/*"
+ }
+ ]
+ },
+ {
+ "Action": [
+ "elasticloadbalancing:RegisterTargets",
+ "elasticloadbalancing:DeregisterTargets"
+ ],
+ "Effect": "Allow",
+ "Resource": {
+ "Fn::Sub": "arn:${AWS::Partition}:elasticloadbalancing:*:*:targetgroup/*/*"
+ }
+ },
+ {
+ "Action": [
+ "elasticloadbalancing:SetWebAcl",
+ "elasticloadbalancing:ModifyListener",
+ "elasticloadbalancing:AddListenerCertificates",
+ "elasticloadbalancing:RemoveListenerCertificates",
+ "elasticloadbalancing:ModifyRule"
+ ],
+ "Effect": "Allow",
+ "Resource": "*"
+ }
+ ],
+ "Version": "2012-10-17"
+}`
+
const expectedEbsPolicyDocument = `{
"Statement": [
{
diff --git a/pkg/cfn/builder/karpenter.go b/pkg/cfn/builder/karpenter.go
index dc1ac5c3fb..5a42218aba 100644
--- a/pkg/cfn/builder/karpenter.go
+++ b/pkg/cfn/builder/karpenter.go
@@ -49,9 +49,15 @@ const (
ec2DescribeImages = "ec2:DescribeImages"
ec2DescribeSpotPriceHistory = "ec2:DescribeSpotPriceHistory"
// IAM
- iamPassRole = "iam:PassRole"
- iamCreateServiceLinkedRole = "iam:CreateServiceLinkedRole"
- ssmGetParameter = "ssm:GetParameter"
+ iamPassRole = "iam:PassRole"
+ iamCreateServiceLinkedRole = "iam:CreateServiceLinkedRole"
+ iamGetInstanceProfile = "iam:GetInstanceProfile"
+ iamCreateInstanceProfile = "iam:CreateInstanceProfile"
+ iamDeleteInstanceProfile = "iam:DeleteInstanceProfile"
+ iamTagInstanceProfile = "iam:TagInstanceProfile"
+ iamAddRoleToInstanceProfile = "iam:AddRoleToInstanceProfile"
+ // SSM
+ ssmGetParameter = "ssm:GetParameter"
// Pricing
pricingGetProducts = "pricing:GetProducts"
// SQS
@@ -165,6 +171,11 @@ func (k *KarpenterResourceSet) addResourcesForKarpenter() error {
ec2DescribeSpotPriceHistory,
iamPassRole,
iamCreateServiceLinkedRole,
+ iamGetInstanceProfile,
+ iamCreateInstanceProfile,
+ iamDeleteInstanceProfile,
+ iamTagInstanceProfile,
+ iamAddRoleToInstanceProfile,
ssmGetParameter,
pricingGetProducts,
},
diff --git a/pkg/cfn/builder/karpenter_test.go b/pkg/cfn/builder/karpenter_test.go
index 11935ea3a1..39605cd7ce 100644
--- a/pkg/cfn/builder/karpenter_test.go
+++ b/pkg/cfn/builder/karpenter_test.go
@@ -125,6 +125,11 @@ var expectedTemplate = `{
"ec2:DescribeSpotPriceHistory",
"iam:PassRole",
"iam:CreateServiceLinkedRole",
+ "iam:GetInstanceProfile",
+ "iam:CreateInstanceProfile",
+ "iam:DeleteInstanceProfile",
+ "iam:TagInstanceProfile",
+ "iam:AddRoleToInstanceProfile",
"ssm:GetParameter",
"pricing:GetProducts"
],
@@ -262,6 +267,11 @@ var expectedTemplateWithPermissionBoundary = `{
"ec2:DescribeSpotPriceHistory",
"iam:PassRole",
"iam:CreateServiceLinkedRole",
+ "iam:GetInstanceProfile",
+ "iam:CreateInstanceProfile",
+ "iam:DeleteInstanceProfile",
+ "iam:TagInstanceProfile",
+ "iam:AddRoleToInstanceProfile",
"ssm:GetParameter",
"pricing:GetProducts"
],
@@ -424,6 +434,11 @@ var expectedTemplateWithSpotInterruptionQueue = `{
"ec2:DescribeSpotPriceHistory",
"iam:PassRole",
"iam:CreateServiceLinkedRole",
+ "iam:GetInstanceProfile",
+ "iam:CreateInstanceProfile",
+ "iam:DeleteInstanceProfile",
+ "iam:TagInstanceProfile",
+ "iam:AddRoleToInstanceProfile",
"ssm:GetParameter",
"pricing:GetProducts"
],
diff --git a/pkg/cfn/builder/managed_nodegroup.go b/pkg/cfn/builder/managed_nodegroup.go
index b90d07eb44..18cf5c4193 100644
--- a/pkg/cfn/builder/managed_nodegroup.go
+++ b/pkg/cfn/builder/managed_nodegroup.go
@@ -263,41 +263,45 @@ func validateLaunchTemplate(launchTemplateData *ec2types.ResponseLaunchTemplateD
func getAMIType(ng *api.ManagedNodeGroup, instanceType string) ekstypes.AMITypes {
amiTypeMapping := map[string]struct {
- X86x64 ekstypes.AMITypes
- X86GPU ekstypes.AMITypes
- ARM ekstypes.AMITypes
- ARMGPU ekstypes.AMITypes
+ X86x64 ekstypes.AMITypes
+ X86Nvidia ekstypes.AMITypes
+ X86Neuron ekstypes.AMITypes
+ ARM ekstypes.AMITypes
+ ARMGPU ekstypes.AMITypes
}{
api.NodeImageFamilyAmazonLinux2023: {
- X86x64: ekstypes.AMITypesAl2023X8664Standard,
- ARM: ekstypes.AMITypesAl2023Arm64Standard,
+ X86x64: ekstypes.AMITypesAl2023X8664Standard,
+ X86Nvidia: ekstypes.AMITypesAl2023X8664Nvidia,
+ X86Neuron: ekstypes.AMITypesAl2023X8664Neuron,
+ ARM: ekstypes.AMITypesAl2023Arm64Standard,
},
api.NodeImageFamilyAmazonLinux2: {
- X86x64: ekstypes.AMITypesAl2X8664,
- X86GPU: ekstypes.AMITypesAl2X8664Gpu,
- ARM: ekstypes.AMITypesAl2Arm64,
+ X86x64: ekstypes.AMITypesAl2X8664,
+ X86Nvidia: ekstypes.AMITypesAl2X8664Gpu,
+ X86Neuron: ekstypes.AMITypesAl2X8664Gpu,
+ ARM: ekstypes.AMITypesAl2Arm64,
},
api.NodeImageFamilyBottlerocket: {
- X86x64: ekstypes.AMITypesBottlerocketX8664,
- X86GPU: ekstypes.AMITypesBottlerocketX8664Nvidia,
- ARM: ekstypes.AMITypesBottlerocketArm64,
- ARMGPU: ekstypes.AMITypesBottlerocketArm64Nvidia,
+ X86x64: ekstypes.AMITypesBottlerocketX8664,
+ X86Nvidia: ekstypes.AMITypesBottlerocketX8664Nvidia,
+ ARM: ekstypes.AMITypesBottlerocketArm64,
+ ARMGPU: ekstypes.AMITypesBottlerocketArm64Nvidia,
},
api.NodeImageFamilyWindowsServer2019FullContainer: {
- X86x64: ekstypes.AMITypesWindowsFull2019X8664,
- X86GPU: ekstypes.AMITypesWindowsFull2019X8664,
+ X86x64: ekstypes.AMITypesWindowsFull2019X8664,
+ X86Nvidia: ekstypes.AMITypesWindowsFull2019X8664,
},
api.NodeImageFamilyWindowsServer2019CoreContainer: {
- X86x64: ekstypes.AMITypesWindowsCore2019X8664,
- X86GPU: ekstypes.AMITypesWindowsCore2019X8664,
+ X86x64: ekstypes.AMITypesWindowsCore2019X8664,
+ X86Nvidia: ekstypes.AMITypesWindowsCore2019X8664,
},
api.NodeImageFamilyWindowsServer2022FullContainer: {
- X86x64: ekstypes.AMITypesWindowsFull2022X8664,
- X86GPU: ekstypes.AMITypesWindowsFull2022X8664,
+ X86x64: ekstypes.AMITypesWindowsFull2022X8664,
+ X86Nvidia: ekstypes.AMITypesWindowsFull2022X8664,
},
api.NodeImageFamilyWindowsServer2022CoreContainer: {
- X86x64: ekstypes.AMITypesWindowsCore2022X8664,
- X86GPU: ekstypes.AMITypesWindowsCore2022X8664,
+ X86x64: ekstypes.AMITypesWindowsCore2022X8664,
+ X86Nvidia: ekstypes.AMITypesWindowsCore2022X8664,
},
}
@@ -307,13 +311,14 @@ func getAMIType(ng *api.ManagedNodeGroup, instanceType string) ekstypes.AMITypes
}
switch {
- case instanceutils.IsGPUInstanceType(instanceType):
- if instanceutils.IsARMInstanceType(instanceType) {
- return amiType.ARMGPU
- }
- return amiType.X86GPU
+ case instanceutils.IsARMGPUInstanceType(instanceType):
+ return amiType.ARMGPU
case instanceutils.IsARMInstanceType(instanceType):
return amiType.ARM
+ case instanceutils.IsNvidiaInstanceType(instanceType):
+ return amiType.X86Nvidia
+ case instanceutils.IsNeuronInstanceType(instanceType):
+ return amiType.X86Neuron
default:
return amiType.X86x64
}
diff --git a/pkg/cfn/builder/managed_nodegroup_ami_type_test.go b/pkg/cfn/builder/managed_nodegroup_ami_type_test.go
index 2f3772b1e5..3839b44939 100644
--- a/pkg/cfn/builder/managed_nodegroup_ami_type_test.go
+++ b/pkg/cfn/builder/managed_nodegroup_ami_type_test.go
@@ -77,23 +77,24 @@ var _ = DescribeTable("Managed Nodegroup AMI type", func(e amiTypeEntry) {
expectedAMIType: "AL2_x86_64",
}),
- Entry("AMI type", amiTypeEntry{
+ Entry("default Nvidia GPU instance type", amiTypeEntry{
nodeGroup: &api.ManagedNodeGroup{
NodeGroupBase: &api.NodeGroupBase{
- Name: "test",
+ Name: "test",
+ InstanceType: "p2.xlarge",
},
},
- expectedAMIType: "AL2023_x86_64_STANDARD",
+ expectedAMIType: "AL2023_x86_64_NVIDIA",
}),
- Entry("default GPU instance type", amiTypeEntry{
+ Entry("default Neuron GPU instance type", amiTypeEntry{
nodeGroup: &api.ManagedNodeGroup{
NodeGroupBase: &api.NodeGroupBase{
Name: "test",
- InstanceType: "p2.xlarge",
+ InstanceType: "inf1.2xlarge",
},
},
- expectedAMIType: "AL2_x86_64_GPU",
+ expectedAMIType: "AL2023_x86_64_NEURON",
}),
Entry("AL2 GPU instance type", amiTypeEntry{
@@ -107,6 +108,16 @@ var _ = DescribeTable("Managed Nodegroup AMI type", func(e amiTypeEntry) {
expectedAMIType: "AL2_x86_64_GPU",
}),
+ Entry("default ARM instance type", amiTypeEntry{
+ nodeGroup: &api.ManagedNodeGroup{
+ NodeGroupBase: &api.NodeGroupBase{
+ Name: "test",
+ InstanceType: "a1.2xlarge",
+ },
+ },
+ expectedAMIType: "AL2023_ARM_64_STANDARD",
+ }),
+
Entry("AL2 ARM instance type", amiTypeEntry{
nodeGroup: &api.ManagedNodeGroup{
NodeGroupBase: &api.NodeGroupBase{
diff --git a/pkg/cfn/builder/nodegroup.go b/pkg/cfn/builder/nodegroup.go
index 2a8cbd5c24..8dc4ff8fa5 100644
--- a/pkg/cfn/builder/nodegroup.go
+++ b/pkg/cfn/builder/nodegroup.go
@@ -940,6 +940,17 @@ func (n *NodeGroupResourceSet) newNodeGroupSpotOceanClusterResource(launchTempla
MaxMemoryGiB: l.MaxMemoryGiB,
}
}
+ if d := autoScaler.Down; d != nil {
+ cluster.AutoScaler.Down = &spot.AutoScalerDown{
+ EvaluationPeriods: d.EvaluationPeriods,
+ MaxScaleDownPercentage: d.MaxScaleDownPercentage,
+ }
+ if d.AggressiveScaleDown != nil {
+ cluster.AutoScaler.Down.AggressiveScaleDown = &spot.AggressiveScaleDown{
+ IsEnabled: d.AggressiveScaleDown.IsEnabled,
+ }
+ }
+ }
}
}
}
diff --git a/pkg/cfn/builder/nodegroup_subnets_test.go b/pkg/cfn/builder/nodegroup_subnets_test.go
index 8eeb3c19a7..75b13a1a6a 100644
--- a/pkg/cfn/builder/nodegroup_subnets_test.go
+++ b/pkg/cfn/builder/nodegroup_subnets_test.go
@@ -267,7 +267,7 @@ var _ = Describe("AssignSubnets", func() {
}
},
updateEC2Mocks: func(e *mocksv2.EC2) {
- e.On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything).
+ e.On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything, mock.Anything).
Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
@@ -321,7 +321,7 @@ var _ = Describe("AssignSubnets", func() {
}
},
updateEC2Mocks: func(e *mocksv2.EC2) {
- e.On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything).
+ e.On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything, mock.Anything).
Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
@@ -375,7 +375,7 @@ var _ = Describe("AssignSubnets", func() {
}
},
updateEC2Mocks: func(e *mocksv2.EC2) {
- e.On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything).
+ e.On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything, mock.Anything).
Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
@@ -472,7 +472,7 @@ func mockDescribeSubnets(ec2Mock *mocksv2.EC2, zoneName, vpcID string) {
}
func mockDescribeSubnetsWithOutpost(ec2Mock *mocksv2.EC2, zoneName, vpcID string, outpostARN *string) {
- ec2Mock.On("DescribeSubnets", mock.Anything, mock.Anything).Return(func(_ context.Context, input *ec2.DescribeSubnetsInput, _ ...func(options *ec2.Options)) *ec2.DescribeSubnetsOutput {
+ ec2Mock.On("DescribeSubnets", mock.Anything, mock.Anything, mock.Anything).Return(func(_ context.Context, input *ec2.DescribeSubnetsInput, _ ...func(options *ec2.Options)) *ec2.DescribeSubnetsOutput {
return &ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{
{
@@ -559,7 +559,7 @@ func mockSubnetsAndAZInstanceSupport(
AvailabilityZones: azs,
}, nil)
provider.MockEC2().
- On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything).
+ On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything, mock.Anything).
Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: offerings,
}, nil)
diff --git a/pkg/cfn/builder/statement.go b/pkg/cfn/builder/statement.go
index da2789a5a2..ad01b49c83 100644
--- a/pkg/cfn/builder/statement.go
+++ b/pkg/cfn/builder/statement.go
@@ -42,6 +42,7 @@ func loadBalancerControllerStatements() []cft.MapOfInterfaces {
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeListeners",
+ "elasticloadbalancing:DescribeListenerAttributes",
"elasticloadbalancing:DescribeListenerCertificates",
"elasticloadbalancing:DescribeSSLPolicies",
"elasticloadbalancing:DescribeRules",
@@ -190,6 +191,7 @@ func loadBalancerControllerStatements() []cft.MapOfInterfaces {
{
"Effect": effectAllow,
"Action": []string{
+ "elasticloadbalancing:ModifyListenerAttributes",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:SetIpAddressType",
"elasticloadbalancing:SetSecurityGroups",
diff --git a/pkg/cfn/builder/vpc_endpoint_test.go b/pkg/cfn/builder/vpc_endpoint_test.go
index 24b1ee4e78..0292d2c2ec 100644
--- a/pkg/cfn/builder/vpc_endpoint_test.go
+++ b/pkg/cfn/builder/vpc_endpoint_test.go
@@ -281,7 +281,7 @@ var _ = Describe("VPC Endpoint Builder", func() {
}
provider.MockEC2().On("DescribeRouteTables", mock.Anything, mock.MatchedBy(func(input *ec2.DescribeRouteTablesInput) bool {
return len(input.Filters) > 0
- })).Return(output, nil)
+ }), mock.Anything).Return(output, nil)
return provider
},
err: "subnets must be associated with a non-main route table",
@@ -440,7 +440,7 @@ func mockDescribeRouteTables(provider *mockprovider.MockProvider, subnetIDs []st
provider.MockEC2().On("DescribeRouteTables", mock.Anything, mock.MatchedBy(func(input *ec2.DescribeRouteTablesInput) bool {
return len(input.Filters) > 0
- })).Return(output, nil)
+ }), mock.Anything).Return(output, nil)
}
func mockDescribeRouteTablesSame(provider *mockprovider.MockProvider, subnetIDs []string) {
@@ -466,7 +466,15 @@ func mockDescribeRouteTablesSame(provider *mockprovider.MockProvider, subnetIDs
provider.MockEC2().On("DescribeRouteTables", mock.Anything, mock.MatchedBy(func(input *ec2.DescribeRouteTablesInput) bool {
return len(input.Filters) > 0
- })).Return(output, nil)
+ }), mock.Anything).Return(output, nil)
+}
+
+func makeZones(region string, count int) []string {
+ var ret []string
+ for i := 0; i < count; i++ {
+ ret = append(ret, fmt.Sprintf("%s%c", region, 'a'+i))
+ }
+ return ret
}
func makeZones(region string, count int) []string {
diff --git a/pkg/cfn/builder/vpc_existing_test.go b/pkg/cfn/builder/vpc_existing_test.go
index 39978f1b50..180bf4cbc8 100644
--- a/pkg/cfn/builder/vpc_existing_test.go
+++ b/pkg/cfn/builder/vpc_existing_test.go
@@ -222,7 +222,7 @@ var _ = Describe("Existing VPC", func() {
mockEC2.On("DescribeRouteTables", mock.Anything, mock.MatchedBy(func(input *ec2.DescribeRouteTablesInput) bool {
return len(input.Filters) > 0
- })).Return(mockResultFn, nil)
+ }), mock.Anything).Return(mockResultFn, nil)
})
It("the private subnet resource values are loaded into the VPCResource with route table association", func() {
@@ -245,7 +245,7 @@ var _ = Describe("Existing VPC", func() {
rtOutput.RouteTables[0].Associations[0].Main = aws.Bool(true)
mockEC2.On("DescribeRouteTables", mock.Anything, mock.MatchedBy(func(input *ec2.DescribeRouteTablesInput) bool {
return len(input.Filters) > 0
- })).Return(rtOutput, nil)
+ }), mock.Anything).Return(rtOutput, nil)
})
It("returns an error", func() {
@@ -258,7 +258,7 @@ var _ = Describe("Existing VPC", func() {
rtOutput.RouteTables[0].Associations[0].SubnetId = aws.String("fake")
mockEC2.On("DescribeRouteTables", mock.Anything, mock.MatchedBy(func(input *ec2.DescribeRouteTablesInput) bool {
return len(input.Filters) > 0
- })).Return(rtOutput, nil)
+ }), mock.Anything).Return(rtOutput, nil)
})
It("returns an error", func() {
diff --git a/pkg/cfn/manager/api_test.go b/pkg/cfn/manager/api_test.go
index cf650271a3..d6e3b6d574 100644
--- a/pkg/cfn/manager/api_test.go
+++ b/pkg/cfn/manager/api_test.go
@@ -403,7 +403,7 @@ var _ = Describe("StackCollection", func() {
})
It("can retrieve stacks", func() {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(&cfn.ListStacksOutput{
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(&cfn.ListStacksOutput{
StackSummaries: []types.StackSummary{
{
StackName: &stackNameWithEksctl,
@@ -418,7 +418,7 @@ var _ = Describe("StackCollection", func() {
When("the config stack doesn't match", func() {
It("returns no stack", func() {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(&cfn.ListStacksOutput{}, nil)
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(&cfn.ListStacksOutput{}, nil)
cfg.Metadata.Name = "not-this"
sm := NewStackCollection(p, cfg)
stack, err := sm.GetClusterStackIfExists(context.Background())
@@ -429,7 +429,7 @@ var _ = Describe("StackCollection", func() {
When("ListStacks errors", func() {
It("errors", func() {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(nil, errors.New("nope"))
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("nope"))
sm := NewStackCollection(p, cfg)
_, err := sm.GetClusterStackIfExists(context.Background())
Expect(err).To(MatchError(ContainSubstring("nope")))
diff --git a/pkg/cfn/manager/create_tasks.go b/pkg/cfn/manager/create_tasks.go
index 9380d7b70f..93e18c2edb 100644
--- a/pkg/cfn/manager/create_tasks.go
+++ b/pkg/cfn/manager/create_tasks.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
- "github.com/pkg/errors"
"github.com/weaveworks/eksctl/pkg/spot"
"github.com/kris-nova/logger"
@@ -26,7 +25,8 @@ import (
// NewTasksToCreateCluster defines all tasks required to create a cluster along
// with some nodegroups; see CreateAllNodeGroups for how onlyNodeGroupSubset works.
func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup,
- managedNodeGroups []*api.ManagedNodeGroup, accessConfig *api.AccessConfig, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) (*tasks.TaskTree, error) {
+ managedNodeGroups []*api.ManagedNodeGroup, accessConfig *api.AccessConfig, accessEntryCreator accessentry.CreatorInterface, nodeGroupParallelism int, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree {
+
taskTree := tasks.TaskTree{Parallel: false}
taskTree.Append(&createClusterTask{
@@ -40,29 +40,23 @@ func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroup
taskTree.Append(accessEntryCreator.CreateTasks(ctx, accessConfig.AccessEntries))
}
- if len(accessConfig.AccessEntries) > 0 {
- taskTree.Append(accessEntryCreator.CreateTasks(ctx, accessConfig.AccessEntries))
- }
-
- appendNodeGroupTasksTo := func(taskTree *tasks.TaskTree) error {
+ appendNodeGroupTasksTo := func(taskTree *tasks.TaskTree) {
vpcImporter := vpc.NewStackConfigImporter(c.MakeClusterStackName())
-
nodeGroupTasks := &tasks.TaskTree{
Parallel: true,
IsSubTask: true,
}
-
disableAccessEntryCreation := accessConfig.AuthenticationMode == ekstypes.AuthenticationModeConfigMap
if oceanManagedNodeGroupTasks, err := c.NewSpotOceanNodeGroupTask(ctx, vpcImporter); oceanManagedNodeGroupTasks.Len() > 0 && err == nil {
oceanManagedNodeGroupTasks.IsSubTask = true
nodeGroupTasks.Parallel = false
nodeGroupTasks.Append(oceanManagedNodeGroupTasks)
}
- if unmanagedNodeGroupTasks := c.NewUnmanagedNodeGroupTask(ctx, nodeGroups, false, false, disableAccessEntryCreation, vpcImporter); unmanagedNodeGroupTasks.Len() > 0 {
+ if unmanagedNodeGroupTasks := c.NewUnmanagedNodeGroupTask(ctx, nodeGroups, false, false, disableAccessEntryCreation, vpcImporter, nodeGroupParallelism); unmanagedNodeGroupTasks.Len() > 0 {
unmanagedNodeGroupTasks.IsSubTask = true
nodeGroupTasks.Append(unmanagedNodeGroupTasks)
}
- if managedNodeGroupTasks := c.NewManagedNodeGroupTask(ctx, managedNodeGroups, false, vpcImporter); managedNodeGroupTasks.Len() > 0 {
+ if managedNodeGroupTasks := c.NewManagedNodeGroupTask(ctx, managedNodeGroups, false, vpcImporter, nodeGroupParallelism); managedNodeGroupTasks.Len() > 0 {
managedNodeGroupTasks.IsSubTask = true
nodeGroupTasks.Append(managedNodeGroupTasks)
}
@@ -70,25 +64,20 @@ func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroup
if nodeGroupTasks.Len() > 0 {
taskTree.Append(nodeGroupTasks)
}
-
- return nil
}
- var appendErr error
-
if len(postClusterCreationTasks) > 0 {
postClusterCreationTaskTree := &tasks.TaskTree{
Parallel: false,
IsSubTask: true,
}
postClusterCreationTaskTree.Append(postClusterCreationTasks...)
- appendErr = appendNodeGroupTasksTo(postClusterCreationTaskTree)
+ appendNodeGroupTasksTo(postClusterCreationTaskTree)
taskTree.Append(postClusterCreationTaskTree)
} else {
- appendErr = appendNodeGroupTasksTo(&taskTree)
+ appendNodeGroupTasksTo(&taskTree)
}
-
- return &taskTree, appendErr
+ return &taskTree
}
// NewSpotOceanNodeGroupTask defines tasks required to create Ocean Cluster.
@@ -126,7 +115,7 @@ func (c *StackCollection) NewSpotOceanNodeGroupTask(ctx context.Context, vpcImpo
}
// NewUnmanagedNodeGroupTask returns tasks for creating self-managed nodegroups.
-func (c *StackCollection) NewUnmanagedNodeGroupTask(ctx context.Context, nodeGroups []*api.NodeGroup, forceAddCNIPolicy, skipEgressRules, disableAccessEntryCreation bool, vpcImporter vpc.Importer) *tasks.TaskTree {
+func (c *StackCollection) NewUnmanagedNodeGroupTask(ctx context.Context, nodeGroups []*api.NodeGroup, forceAddCNIPolicy, skipEgressRules, disableAccessEntryCreation bool, vpcImporter vpc.Importer, parallelism int) *tasks.TaskTree {
task := &UnmanagedNodeGroupTask{
ClusterConfig: c.spec,
NodeGroups: nodeGroups,
@@ -144,12 +133,13 @@ func (c *StackCollection) NewUnmanagedNodeGroupTask(ctx context.Context, nodeGro
SkipEgressRules: skipEgressRules,
DisableAccessEntryCreation: disableAccessEntryCreation,
VPCImporter: vpcImporter,
+ Parallelism: parallelism,
})
}
// NewManagedNodeGroupTask defines tasks required to create managed nodegroups
-func (c *StackCollection) NewManagedNodeGroupTask(ctx context.Context, nodeGroups []*api.ManagedNodeGroup, forceAddCNIPolicy bool, vpcImporter vpc.Importer) *tasks.TaskTree {
- taskTree := &tasks.TaskTree{Parallel: true}
+func (c *StackCollection) NewManagedNodeGroupTask(ctx context.Context, nodeGroups []*api.ManagedNodeGroup, forceAddCNIPolicy bool, vpcImporter vpc.Importer, nodeGroupParallelism int) *tasks.TaskTree {
+ taskTree := &tasks.TaskTree{Parallel: true, Limit: nodeGroupParallelism}
for _, ng := range nodeGroups {
// Disable parallelisation if any tags propagation is done
// since nodegroup must be created to propagate tags to its ASGs.
@@ -213,7 +203,7 @@ func (c *StackCollection) NewTasksToCreateIAMServiceAccounts(serviceAccounts []*
objectMeta.SetAnnotations(sa.AsObjectMeta().Annotations)
objectMeta.SetLabels(sa.AsObjectMeta().Labels)
if err := kubernetes.MaybeCreateServiceAccountOrUpdateMetadata(clientSet, objectMeta); err != nil {
- return errors.Wrapf(err, "failed to create service account %s/%s", objectMeta.GetNamespace(), objectMeta.GetName())
+ return fmt.Errorf("failed to create service account %s/%s: %w", objectMeta.GetNamespace(), objectMeta.GetName(), err)
}
return nil
},
diff --git a/pkg/cfn/manager/fakes/fake_stack_manager.go b/pkg/cfn/manager/fakes/fake_stack_manager.go
index 213fb8414a..f9316bc692 100644
--- a/pkg/cfn/manager/fakes/fake_stack_manager.go
+++ b/pkg/cfn/manager/fakes/fake_stack_manager.go
@@ -634,13 +634,14 @@ type FakeStackManager struct {
mustUpdateStackReturnsOnCall map[int]struct {
result1 error
}
- NewManagedNodeGroupTaskStub func(context.Context, []*v1alpha5.ManagedNodeGroup, bool, vpc.Importer) *tasks.TaskTree
+ NewManagedNodeGroupTaskStub func(context.Context, []*v1alpha5.ManagedNodeGroup, bool, vpc.Importer, int) *tasks.TaskTree
newManagedNodeGroupTaskMutex sync.RWMutex
newManagedNodeGroupTaskArgsForCall []struct {
arg1 context.Context
arg2 []*v1alpha5.ManagedNodeGroup
arg3 bool
arg4 vpc.Importer
+ arg5 int
}
newManagedNodeGroupTaskReturns struct {
result1 *tasks.TaskTree
@@ -663,7 +664,7 @@ type FakeStackManager struct {
newTaskToDeleteUnownedNodeGroupReturnsOnCall map[int]struct {
result1 tasks.Task
}
- NewTasksToCreateClusterStub func(context.Context, []*v1alpha5.NodeGroup, []*v1alpha5.ManagedNodeGroup, *v1alpha5.AccessConfig, accessentry.CreatorInterface, ...tasks.Task) (*tasks.TaskTree, error)
+ NewTasksToCreateClusterStub func(context.Context, []*v1alpha5.NodeGroup, []*v1alpha5.ManagedNodeGroup, *v1alpha5.AccessConfig, accessentry.CreatorInterface, int, ...tasks.Task) *tasks.TaskTree
newTasksToCreateClusterMutex sync.RWMutex
newTasksToCreateClusterArgsForCall []struct {
arg1 context.Context
@@ -671,7 +672,8 @@ type FakeStackManager struct {
arg3 []*v1alpha5.ManagedNodeGroup
arg4 *v1alpha5.AccessConfig
arg5 accessentry.CreatorInterface
- arg6 []tasks.Task
+ arg6 int
+ arg7 []tasks.Task
}
newTasksToCreateClusterReturns struct {
result1 *tasks.TaskTree
@@ -767,7 +769,7 @@ type FakeStackManager struct {
result1 *tasks.TaskTree
result2 error
}
- NewUnmanagedNodeGroupTaskStub func(context.Context, []*v1alpha5.NodeGroup, bool, bool, bool, vpc.Importer) *tasks.TaskTree
+ NewUnmanagedNodeGroupTaskStub func(context.Context, []*v1alpha5.NodeGroup, bool, bool, bool, vpc.Importer, int) *tasks.TaskTree
newUnmanagedNodeGroupTaskMutex sync.RWMutex
newUnmanagedNodeGroupTaskArgsForCall []struct {
arg1 context.Context
@@ -776,6 +778,7 @@ type FakeStackManager struct {
arg4 bool
arg5 bool
arg6 vpc.Importer
+ arg7 int
}
newUnmanagedNodeGroupTaskReturns struct {
result1 *tasks.TaskTree
@@ -3802,7 +3805,7 @@ func (fake *FakeStackManager) MustUpdateStackReturnsOnCall(i int, result1 error)
}{result1}
}
-func (fake *FakeStackManager) NewManagedNodeGroupTask(arg1 context.Context, arg2 []*v1alpha5.ManagedNodeGroup, arg3 bool, arg4 vpc.Importer) *tasks.TaskTree {
+func (fake *FakeStackManager) NewManagedNodeGroupTask(arg1 context.Context, arg2 []*v1alpha5.ManagedNodeGroup, arg3 bool, arg4 vpc.Importer, arg5 int) *tasks.TaskTree {
var arg2Copy []*v1alpha5.ManagedNodeGroup
if arg2 != nil {
arg2Copy = make([]*v1alpha5.ManagedNodeGroup, len(arg2))
@@ -3815,13 +3818,14 @@ func (fake *FakeStackManager) NewManagedNodeGroupTask(arg1 context.Context, arg2
arg2 []*v1alpha5.ManagedNodeGroup
arg3 bool
arg4 vpc.Importer
- }{arg1, arg2Copy, arg3, arg4})
+ arg5 int
+ }{arg1, arg2Copy, arg3, arg4, arg5})
stub := fake.NewManagedNodeGroupTaskStub
fakeReturns := fake.newManagedNodeGroupTaskReturns
- fake.recordInvocation("NewManagedNodeGroupTask", []interface{}{arg1, arg2Copy, arg3, arg4})
+ fake.recordInvocation("NewManagedNodeGroupTask", []interface{}{arg1, arg2Copy, arg3, arg4, arg5})
fake.newManagedNodeGroupTaskMutex.Unlock()
if stub != nil {
- return stub(arg1, arg2, arg3, arg4)
+ return stub(arg1, arg2, arg3, arg4, arg5)
}
if specificReturn {
return ret.result1
@@ -3835,17 +3839,17 @@ func (fake *FakeStackManager) NewManagedNodeGroupTaskCallCount() int {
return len(fake.newManagedNodeGroupTaskArgsForCall)
}
-func (fake *FakeStackManager) NewManagedNodeGroupTaskCalls(stub func(context.Context, []*v1alpha5.ManagedNodeGroup, bool, vpc.Importer) *tasks.TaskTree) {
+func (fake *FakeStackManager) NewManagedNodeGroupTaskCalls(stub func(context.Context, []*v1alpha5.ManagedNodeGroup, bool, vpc.Importer, int) *tasks.TaskTree) {
fake.newManagedNodeGroupTaskMutex.Lock()
defer fake.newManagedNodeGroupTaskMutex.Unlock()
fake.NewManagedNodeGroupTaskStub = stub
}
-func (fake *FakeStackManager) NewManagedNodeGroupTaskArgsForCall(i int) (context.Context, []*v1alpha5.ManagedNodeGroup, bool, vpc.Importer) {
+func (fake *FakeStackManager) NewManagedNodeGroupTaskArgsForCall(i int) (context.Context, []*v1alpha5.ManagedNodeGroup, bool, vpc.Importer, int) {
fake.newManagedNodeGroupTaskMutex.RLock()
defer fake.newManagedNodeGroupTaskMutex.RUnlock()
argsForCall := fake.newManagedNodeGroupTaskArgsForCall[i]
- return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
+ return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5
}
func (fake *FakeStackManager) NewManagedNodeGroupTaskReturns(result1 *tasks.TaskTree) {
@@ -3936,7 +3940,7 @@ func (fake *FakeStackManager) NewTaskToDeleteUnownedNodeGroupReturnsOnCall(i int
}{result1}
}
-func (fake *FakeStackManager) NewTasksToCreateCluster(arg1 context.Context, arg2 []*v1alpha5.NodeGroup, arg3 []*v1alpha5.ManagedNodeGroup, arg4 *v1alpha5.AccessConfig, arg5 accessentry.CreatorInterface, arg6 ...tasks.Task) (*tasks.TaskTree, error) {
+func (fake *FakeStackManager) NewTasksToCreateCluster(arg1 context.Context, arg2 []*v1alpha5.NodeGroup, arg3 []*v1alpha5.ManagedNodeGroup, arg4 *v1alpha5.AccessConfig, arg5 accessentry.CreatorInterface, arg6 int, arg7 ...tasks.Task) *tasks.TaskTree {
var arg2Copy []*v1alpha5.NodeGroup
if arg2 != nil {
arg2Copy = make([]*v1alpha5.NodeGroup, len(arg2))
@@ -3955,14 +3959,15 @@ func (fake *FakeStackManager) NewTasksToCreateCluster(arg1 context.Context, arg2
arg3 []*v1alpha5.ManagedNodeGroup
arg4 *v1alpha5.AccessConfig
arg5 accessentry.CreatorInterface
- arg6 []tasks.Task
- }{arg1, arg2Copy, arg3Copy, arg4, arg5, arg6})
+ arg6 int
+ arg7 []tasks.Task
+ }{arg1, arg2Copy, arg3Copy, arg4, arg5, arg6, arg7})
stub := fake.NewTasksToCreateClusterStub
fakeReturns := fake.newTasksToCreateClusterReturns
- fake.recordInvocation("NewTasksToCreateCluster", []interface{}{arg1, arg2Copy, arg3Copy, arg4, arg5, arg6})
+ fake.recordInvocation("NewTasksToCreateCluster", []interface{}{arg1, arg2Copy, arg3Copy, arg4, arg5, arg6, arg7})
fake.newTasksToCreateClusterMutex.Unlock()
if stub != nil {
- return stub(arg1, arg2, arg3, arg4, arg5, arg6...)
+ return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7...)
}
if specificReturn {
return ret.result1, ret.result2
@@ -3976,20 +3981,20 @@ func (fake *FakeStackManager) NewTasksToCreateClusterCallCount() int {
return len(fake.newTasksToCreateClusterArgsForCall)
}
-func (fake *FakeStackManager) NewTasksToCreateClusterCalls(stub func(context.Context, []*v1alpha5.NodeGroup, []*v1alpha5.ManagedNodeGroup, *v1alpha5.AccessConfig, accessentry.CreatorInterface, ...tasks.Task) (*tasks.TaskTree, error)) {
+func (fake *FakeStackManager) NewTasksToCreateClusterCalls(stub func(context.Context, []*v1alpha5.NodeGroup, []*v1alpha5.ManagedNodeGroup, *v1alpha5.AccessConfig, accessentry.CreatorInterface, int, ...tasks.Task) *tasks.TaskTree) {
fake.newTasksToCreateClusterMutex.Lock()
defer fake.newTasksToCreateClusterMutex.Unlock()
fake.NewTasksToCreateClusterStub = stub
}
-func (fake *FakeStackManager) NewTasksToCreateClusterArgsForCall(i int) (context.Context, []*v1alpha5.NodeGroup, []*v1alpha5.ManagedNodeGroup, *v1alpha5.AccessConfig, accessentry.CreatorInterface, []tasks.Task) {
+func (fake *FakeStackManager) NewTasksToCreateClusterArgsForCall(i int) (context.Context, []*v1alpha5.NodeGroup, []*v1alpha5.ManagedNodeGroup, *v1alpha5.AccessConfig, accessentry.CreatorInterface, int, []tasks.Task) {
fake.newTasksToCreateClusterMutex.RLock()
defer fake.newTasksToCreateClusterMutex.RUnlock()
argsForCall := fake.newTasksToCreateClusterArgsForCall[i]
- return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6
+ return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7
}
-func (fake *FakeStackManager) NewTasksToCreateClusterReturns(result1 *tasks.TaskTree, result2 error) {
+func (fake *FakeStackManager) NewTasksToCreateClusterReturns(result1 *tasks.TaskTree) {
fake.newTasksToCreateClusterMutex.Lock()
defer fake.newTasksToCreateClusterMutex.Unlock()
fake.NewTasksToCreateClusterStub = nil
@@ -3999,7 +4004,7 @@ func (fake *FakeStackManager) NewTasksToCreateClusterReturns(result1 *tasks.Task
}{result1, result2}
}
-func (fake *FakeStackManager) NewTasksToCreateClusterReturnsOnCall(i int, result1 *tasks.TaskTree, result2 error) {
+func (fake *FakeStackManager) NewTasksToCreateClusterReturnsOnCall(i int, result1 *tasks.TaskTree) {
fake.newTasksToCreateClusterMutex.Lock()
defer fake.newTasksToCreateClusterMutex.Unlock()
fake.NewTasksToCreateClusterStub = nil
@@ -4375,7 +4380,7 @@ func (fake *FakeStackManager) NewTasksToDeleteOIDCProviderWithIAMServiceAccounts
}{result1, result2}
}
-func (fake *FakeStackManager) NewUnmanagedNodeGroupTask(arg1 context.Context, arg2 []*v1alpha5.NodeGroup, arg3 bool, arg4 bool, arg5 bool, arg6 vpc.Importer) *tasks.TaskTree {
+func (fake *FakeStackManager) NewUnmanagedNodeGroupTask(arg1 context.Context, arg2 []*v1alpha5.NodeGroup, arg3 bool, arg4 bool, arg5 bool, arg6 vpc.Importer, arg7 int) *tasks.TaskTree {
var arg2Copy []*v1alpha5.NodeGroup
if arg2 != nil {
arg2Copy = make([]*v1alpha5.NodeGroup, len(arg2))
@@ -4390,13 +4395,14 @@ func (fake *FakeStackManager) NewUnmanagedNodeGroupTask(arg1 context.Context, ar
arg4 bool
arg5 bool
arg6 vpc.Importer
- }{arg1, arg2Copy, arg3, arg4, arg5, arg6})
+ arg7 int
+ }{arg1, arg2Copy, arg3, arg4, arg5, arg6, arg7})
stub := fake.NewUnmanagedNodeGroupTaskStub
fakeReturns := fake.newUnmanagedNodeGroupTaskReturns
- fake.recordInvocation("NewUnmanagedNodeGroupTask", []interface{}{arg1, arg2Copy, arg3, arg4, arg5, arg6})
+ fake.recordInvocation("NewUnmanagedNodeGroupTask", []interface{}{arg1, arg2Copy, arg3, arg4, arg5, arg6, arg7})
fake.newUnmanagedNodeGroupTaskMutex.Unlock()
if stub != nil {
- return stub(arg1, arg2, arg3, arg4, arg5, arg6)
+ return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7)
}
if specificReturn {
return ret.result1
@@ -4410,17 +4416,17 @@ func (fake *FakeStackManager) NewUnmanagedNodeGroupTaskCallCount() int {
return len(fake.newUnmanagedNodeGroupTaskArgsForCall)
}
-func (fake *FakeStackManager) NewUnmanagedNodeGroupTaskCalls(stub func(context.Context, []*v1alpha5.NodeGroup, bool, bool, bool, vpc.Importer) *tasks.TaskTree) {
+func (fake *FakeStackManager) NewUnmanagedNodeGroupTaskCalls(stub func(context.Context, []*v1alpha5.NodeGroup, bool, bool, bool, vpc.Importer, int) *tasks.TaskTree) {
fake.newUnmanagedNodeGroupTaskMutex.Lock()
defer fake.newUnmanagedNodeGroupTaskMutex.Unlock()
fake.NewUnmanagedNodeGroupTaskStub = stub
}
-func (fake *FakeStackManager) NewUnmanagedNodeGroupTaskArgsForCall(i int) (context.Context, []*v1alpha5.NodeGroup, bool, bool, bool, vpc.Importer) {
+func (fake *FakeStackManager) NewUnmanagedNodeGroupTaskArgsForCall(i int) (context.Context, []*v1alpha5.NodeGroup, bool, bool, bool, vpc.Importer, int) {
fake.newUnmanagedNodeGroupTaskMutex.RLock()
defer fake.newUnmanagedNodeGroupTaskMutex.RUnlock()
argsForCall := fake.newUnmanagedNodeGroupTaskArgsForCall[i]
- return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6
+ return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7
}
func (fake *FakeStackManager) NewUnmanagedNodeGroupTaskReturns(result1 *tasks.TaskTree) {
diff --git a/pkg/cfn/manager/interface.go b/pkg/cfn/manager/interface.go
index a233f0ceb0..ef887db7bc 100644
--- a/pkg/cfn/manager/interface.go
+++ b/pkg/cfn/manager/interface.go
@@ -84,15 +84,15 @@ type StackManager interface {
LookupCloudTrailEvents(ctx context.Context, i *Stack) ([]cttypes.Event, error)
MakeChangeSetName(action string) string
MakeClusterStackName() string
- NewManagedNodeGroupTask(ctx context.Context, nodeGroups []*api.ManagedNodeGroup, forceAddCNIPolicy bool, importer vpc.Importer) *tasks.TaskTree
+ NewManagedNodeGroupTask(ctx context.Context, nodeGroups []*api.ManagedNodeGroup, forceAddCNIPolicy bool, importer vpc.Importer, nodeGroupParallelism int) *tasks.TaskTree
NewTasksToDeleteClusterWithNodeGroups(ctx context.Context, clusterStack *Stack, nodeGroupStacks []NodeGroupStack, clusterOperable bool, newOIDCManager NewOIDCManager, newTasksToDeleteAddonIAM NewTasksToDeleteAddonIAM, newTasksToDeletePodIdentityRole NewTasksToDeletePodIdentityRole, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, wait, force bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error)
NewTasksToCreateIAMServiceAccounts(serviceAccounts []*api.ClusterIAMServiceAccount, oidc *iamoidc.OpenIDConnectManager, clientSetGetter kubernetes.ClientSetGetter) *tasks.TaskTree
NewTaskToDeleteUnownedNodeGroup(ctx context.Context, clusterName, nodegroup string, nodeGroupDeleter NodeGroupDeleter, waitCondition *DeleteWaitCondition) tasks.Task
- NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup, managedNodeGroups []*api.ManagedNodeGroup, accessConfig *api.AccessConfig, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) (*tasks.TaskTree, error)
+ NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup, managedNodeGroups []*api.ManagedNodeGroup, accessConfig *api.AccessConfig, accessEntryCreator accessentry.CreatorInterface, nodeGroupParallelism int, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree
NewTasksToDeleteIAMServiceAccounts(ctx context.Context, serviceAccounts []string, clientSetGetter kubernetes.ClientSetGetter, wait bool) (*tasks.TaskTree, error)
NewTasksToDeleteNodeGroups(stacks []NodeGroupStack, shouldDelete func(_ string) bool, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error)
NewTasksToDeleteOIDCProviderWithIAMServiceAccounts(ctx context.Context, newOIDCManager NewOIDCManager, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, force bool) (*tasks.TaskTree, error)
- NewUnmanagedNodeGroupTask(ctx context.Context, nodeGroups []*api.NodeGroup, forceAddCNIPolicy, skipEgressRules, disableAccessEntryCreation bool, importer vpc.Importer) *tasks.TaskTree
+ NewUnmanagedNodeGroupTask(ctx context.Context, nodeGroups []*api.NodeGroup, forceAddCNIPolicy, skipEgressRules, disableAccessEntryCreation bool, importer vpc.Importer, nodeGroupParallelism int) *tasks.TaskTree
PropagateManagedNodeGroupTagsToASG(ngName string, ngTags map[string]string, asgNames []string, errCh chan error) error
RefreshFargatePodExecutionRoleARN(ctx context.Context) error
StackStatusIsNotTransitional(s *Stack) bool
diff --git a/pkg/cfn/manager/nodegroup.go b/pkg/cfn/manager/nodegroup.go
index bf07fafebb..c31418f819 100644
--- a/pkg/cfn/manager/nodegroup.go
+++ b/pkg/cfn/manager/nodegroup.go
@@ -47,6 +47,7 @@ type CreateNodeGroupOptions struct {
DisableAccessEntryCreation bool
VPCImporter vpc.Importer
SharedTags []types.Tag
+ Parallelism int
}
// A NodeGroupStackManager describes and creates nodegroup stacks.
@@ -92,7 +93,7 @@ type OceanManagedNodeGroupTask struct {
// Create creates a TaskTree for creating nodegroups.
func (t *UnmanagedNodeGroupTask) Create(ctx context.Context, options CreateNodeGroupOptions) *tasks.TaskTree {
- taskTree := &tasks.TaskTree{Parallel: true}
+ taskTree := &tasks.TaskTree{Parallel: true, Limit: options.Parallelism}
for _, ng := range t.NodeGroups {
ng := ng
@@ -285,6 +286,7 @@ func (t *OceanManagedNodeGroupTask) maybeCreateAccessEntry(ctx context.Context,
return fmt.Errorf("creating access entry for ocean nodegroup %s: %w", ng.Name, err)
}
logger.Info("ocean nodegroup %s: created access entry for principal ARN %q", ng.Name, roleARN)
+
return nil
}
diff --git a/pkg/cfn/manager/tasks_test.go b/pkg/cfn/manager/tasks_test.go
index ea81694ae8..8edfa1a87e 100644
--- a/pkg/cfn/manager/tasks_test.go
+++ b/pkg/cfn/manager/tasks_test.go
@@ -80,22 +80,22 @@ var _ = Describe("StackCollection Tasks", func() {
// The supportsManagedNodes argument has no effect on the Describe call, so the values are alternated
// in these tests
{
- tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("bar", "foo"), false, false, true, fakeVPCImporter)
+ tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("bar", "foo"), false, false, true, fakeVPCImporter, 0)
Expect(tasks.Describe()).To(Equal(`
-2 parallel tasks: { create nodegroup "bar", create nodegroup "foo"
+2 parallel tasks: { create nodegroup "bar", create nodegroup "foo"
}
`))
}
{
- tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("bar"), false, false, true, fakeVPCImporter)
+ tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("bar"), false, false, true, fakeVPCImporter, 0)
Expect(tasks.Describe()).To(Equal(`1 task: { create nodegroup "bar" }`))
}
{
- tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("foo"), false, false, true, fakeVPCImporter)
+ tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), makeNodeGroups("foo"), false, false, true, fakeVPCImporter, 0)
Expect(tasks.Describe()).To(Equal(`1 task: { create nodegroup "foo" }`))
}
{
- tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), nil, false, false, true, fakeVPCImporter)
+ tasks := stackManager.NewUnmanagedNodeGroupTask(context.Background(), nil, false, false, true, fakeVPCImporter, 0)
Expect(tasks.Describe()).To(Equal(`no tasks`))
}
@@ -103,86 +103,86 @@ var _ = Describe("StackCollection Tasks", func() {
AuthenticationMode: ekstypes.AuthenticationModeConfigMap,
}
{
- tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), nil, accessConfig, nil)
+ tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), nil, accessConfig, nil, 0)
Expect(tasks.Describe()).To(Equal(`
-2 sequential tasks: { create cluster control plane "test-cluster",
- 2 parallel sub-tasks: {
+2 sequential tasks: { create cluster control plane "test-cluster",
+ 2 parallel sub-tasks: {
create nodegroup "bar",
create nodegroup "foo",
- }
+ }
}
`))
}
{
- tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, accessConfig, nil)
+ tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, accessConfig, nil, 0)
Expect(tasks.Describe()).To(Equal(`
-2 sequential tasks: { create cluster control plane "test-cluster", create nodegroup "bar"
+2 sequential tasks: { create cluster control plane "test-cluster", create nodegroup "bar"
}
`))
}
{
- tasks := stackManager.NewTasksToCreateCluster(context.Background(), nil, nil, accessConfig, nil)
+ tasks := stackManager.NewTasksToCreateCluster(context.Background(), nil, nil, accessConfig, nil, 0)
Expect(tasks.Describe()).To(Equal(`1 task: { create cluster control plane "test-cluster" }`))
}
{
- tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroups("m1", "m2"), accessConfig, nil)
+ tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroups("m1", "m2"), accessConfig, nil, 0)
Expect(tasks.Describe()).To(Equal(`
-2 sequential tasks: { create cluster control plane "test-cluster",
- 2 parallel sub-tasks: {
- 2 parallel sub-tasks: {
+2 sequential tasks: { create cluster control plane "test-cluster",
+ 2 parallel sub-tasks: {
+ 2 parallel sub-tasks: {
create nodegroup "bar",
create nodegroup "foo",
},
- 2 parallel sub-tasks: {
+ 2 parallel sub-tasks: {
create managed nodegroup "m1",
create managed nodegroup "m2",
},
- }
+ }
}
`))
}
{
- tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroupsWithPropagatedTags("m1", "m2"), accessConfig, nil)
+ tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroupsWithPropagatedTags("m1", "m2"), accessConfig, nil, 0)
Expect(tasks.Describe()).To(Equal(`
-2 sequential tasks: { create cluster control plane "test-cluster",
- 2 parallel sub-tasks: {
- 2 parallel sub-tasks: {
+2 sequential tasks: { create cluster control plane "test-cluster",
+ 2 parallel sub-tasks: {
+ 2 parallel sub-tasks: {
create nodegroup "bar",
create nodegroup "foo",
},
- 2 parallel sub-tasks: {
- 2 sequential sub-tasks: {
+ 2 parallel sub-tasks: {
+ 2 sequential sub-tasks: {
create managed nodegroup "m1",
propagate tags to ASG for managed nodegroup "m1",
},
- 2 sequential sub-tasks: {
+ 2 sequential sub-tasks: {
create managed nodegroup "m2",
propagate tags to ASG for managed nodegroup "m2",
},
},
- }
+ }
}
`))
}
{
- tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("foo"), makeManagedNodeGroups("m1"), accessConfig, nil)
+ tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("foo"), makeManagedNodeGroups("m1"), accessConfig, nil, 0)
Expect(tasks.Describe()).To(Equal(`
-2 sequential tasks: { create cluster control plane "test-cluster",
- 2 parallel sub-tasks: {
+2 sequential tasks: { create cluster control plane "test-cluster",
+ 2 parallel sub-tasks: {
create nodegroup "foo",
create managed nodegroup "m1",
- }
+ }
}
`))
}
{
- tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, accessConfig, nil, &task{id: 1})
+ tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, accessConfig, nil, 0, &task{id: 1})
Expect(tasks.Describe()).To(Equal(`
-2 sequential tasks: { create cluster control plane "test-cluster",
- 2 sequential sub-tasks: {
+2 sequential tasks: { create cluster control plane "test-cluster",
+ 2 sequential sub-tasks: {
task 1,
create nodegroup "bar",
- }
+ }
}
`))
}
@@ -203,20 +203,20 @@ var _ = Describe("StackCollection Tasks", func() {
stackManager = NewStackCollection(p, cfg)
})
It("returns an error", func() {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{}, nil)
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{}, nil)
ng := api.NewManagedNodeGroup()
fakeVPCImporter := new(vpcfakes.FakeImporter)
- tasks := stackManager.NewManagedNodeGroupTask(context.Background(), []*api.ManagedNodeGroup{ng}, false, fakeVPCImporter)
+ tasks := stackManager.NewManagedNodeGroupTask(context.Background(), []*api.ManagedNodeGroup{ng}, false, fakeVPCImporter, 0)
errs := tasks.DoAllSync()
Expect(errs).To(HaveLen(1))
Expect(errs[0]).To(MatchError(ContainSubstring("managed nodegroups cannot be created on IPv6 unowned clusters")))
})
When("finding the stack fails", func() {
It("returns the stack error", func() {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(nil, errors.New("not found"))
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("not found"))
ng := api.NewManagedNodeGroup()
fakeVPCImporter := new(vpcfakes.FakeImporter)
- tasks := stackManager.NewManagedNodeGroupTask(context.Background(), []*api.ManagedNodeGroup{ng}, false, fakeVPCImporter)
+ tasks := stackManager.NewManagedNodeGroupTask(context.Background(), []*api.ManagedNodeGroup{ng}, false, fakeVPCImporter, 0)
errs := tasks.DoAllSync()
Expect(errs).To(HaveLen(1))
Expect(errs[0]).To(MatchError(ContainSubstring("not found")))
@@ -253,7 +253,7 @@ var _ = Describe("StackCollection Tasks", func() {
},
Entry("an OIDC provider is associated with the cluster", oidcEntry{
mockProvider: func(p *mockprovider.MockProvider) {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
StackSummaries: []cfntypes.StackSummary{},
}, nil)
},
@@ -268,7 +268,7 @@ var _ = Describe("StackCollection Tasks", func() {
Entry("cluster has IAM service accounts", oidcEntry{
mockProvider: func(p *mockprovider.MockProvider) {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
StackSummaries: []cfntypes.StackSummary{
{
StackName: aws.String("eksctl-test-cluster-addon-iamserviceaccount-test"),
@@ -298,7 +298,7 @@ var _ = Describe("StackCollection Tasks", func() {
Entry("OIDC provider and service accounts do not exist for the cluster", oidcEntry{
mockProvider: func(p *mockprovider.MockProvider) {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
StackSummaries: []cfntypes.StackSummary{},
}, nil)
},
@@ -309,7 +309,7 @@ var _ = Describe("StackCollection Tasks", func() {
Entry("OIDC provider definitely does not exist for the cluster", oidcEntry{
mockProvider: func(p *mockprovider.MockProvider) {
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
StackSummaries: []cfntypes.StackSummary{},
}, nil)
},
diff --git a/pkg/ctl/cmdutils/configfile.go b/pkg/ctl/cmdutils/configfile.go
index 76cbbac34b..a87645db59 100644
--- a/pkg/ctl/cmdutils/configfile.go
+++ b/pkg/ctl/cmdutils/configfile.go
@@ -3,6 +3,7 @@ package cmdutils
import (
"encoding/csv"
"fmt"
+ "io"
"reflect"
"strconv"
"strings"
@@ -36,6 +37,7 @@ type ClusterConfigLoader interface {
type commonClusterConfigLoader struct {
*Cmd
+ configReader io.Reader
flagsIncompatibleWithConfigFile sets.Set[string]
flagsIncompatibleWithoutConfigFile sets.Set[string]
@@ -129,7 +131,7 @@ func (l *commonClusterConfigLoader) Load() error {
// The reference to ClusterConfig should only be reassigned if ClusterConfigFile is specified
// because other parts of the code store the pointer locally and access it directly instead of via
// the Cmd reference
- if l.ClusterConfig, err = eks.LoadConfigFromFile(l.ClusterConfigFile); err != nil {
+ if l.ClusterConfig, err = eks.LoadConfigWithReader(l.ClusterConfigFile, l.configReader); err != nil {
return err
}
meta := l.ClusterConfig.Metadata
@@ -203,6 +205,7 @@ func NewMetadataLoader(cmd *Cmd) ClusterConfigLoader {
// NewCreateClusterLoader will load config or use flags for 'eksctl create cluster'
func NewCreateClusterLoader(cmd *Cmd, ngFilter *filter.NodeGroupFilter, ng *api.NodeGroup, params *CreateClusterCmdParams) ClusterConfigLoader {
l := newCommonClusterConfigLoader(cmd)
+ l.configReader = params.ConfigReader
ngFilter.SetExcludeAll(params.WithoutNodeGroup)
@@ -313,6 +316,10 @@ func NewCreateClusterLoader(cmd *Cmd, ngFilter *filter.NodeGroupFilter, ng *api.
}
}
+ if err := validateBareCluster(clusterConfig); err != nil {
+ return err
+ }
+
shallCreatePodIdentityAssociations := func(cfg *api.ClusterConfig) bool {
if cfg.IAM != nil && len(cfg.IAM.PodIdentityAssociations) > 0 {
return true
@@ -452,6 +459,22 @@ func validateDryRunOptions(cmd *cobra.Command, incompatibleFlags []string) error
return nil
}
+// validateBareCluster validates a cluster for unsupported fields if VPC CNI is disabled.
+func validateBareCluster(clusterConfig *api.ClusterConfig) error {
+ if !clusterConfig.AddonsConfig.DisableDefaultAddons || slices.ContainsFunc(clusterConfig.Addons, func(addon *api.Addon) bool {
+ return addon.Name == api.VPCCNIAddon
+ }) {
+ return nil
+ }
+ if clusterConfig.HasNodes() || clusterConfig.IsFargateEnabled() || clusterConfig.Karpenter != nil || clusterConfig.HasGitOpsFluxConfigured() ||
+ (clusterConfig.IAM != nil && ((len(clusterConfig.IAM.ServiceAccounts) > 0) || len(clusterConfig.IAM.PodIdentityAssociations) > 0)) {
+ return errors.New("fields nodeGroups, managedNodeGroups, fargateProfiles, karpenter, gitops, iam.serviceAccounts, " +
+ "and iam.podIdentityAssociations are not supported during cluster creation in a cluster without VPC CNI; please remove these fields " +
+ "and add them back after cluster creation is successful")
+ }
+ return nil
+}
+
const updateAuthConfigMapFlagName = "update-auth-configmap"
// NewCreateNodeGroupLoader will load config or use flags for 'eksctl create nodegroup'
diff --git a/pkg/ctl/cmdutils/configfile_test.go b/pkg/ctl/cmdutils/configfile_test.go
index 70b5b58d68..e476fed432 100644
--- a/pkg/ctl/cmdutils/configfile_test.go
+++ b/pkg/ctl/cmdutils/configfile_test.go
@@ -3,11 +3,13 @@ package cmdutils
import (
"path/filepath"
+ "github.com/aws/aws-sdk-go-v2/aws"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
+ clusterutils "github.com/weaveworks/eksctl/integration/utilities/cluster"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/ctl/cmdutils/filter"
)
@@ -471,6 +473,123 @@ var _ = Describe("cmdutils configfile", func() {
testClusterEndpointAccessDefaults("test_data/cluster-with-vpc-private-access.yaml", true, true)
})
})
+
+ type bareClusterEntry struct {
+ updateClusterConfig func(*api.ClusterConfig)
+ expectErr bool
+ }
+
+ DescribeTable("Bare Cluster validation", func(e bareClusterEntry) {
+ cmd := &Cmd{
+ CobraCommand: newCmd(),
+ ClusterConfigFile: "-",
+ ClusterConfig: api.NewClusterConfig(),
+ ProviderConfig: api.ProviderConfig{},
+ }
+ clusterConfig := api.NewClusterConfig()
+ clusterConfig.Metadata.Name = "cluster"
+ clusterConfig.Metadata.Region = api.DefaultRegion
+ clusterConfig.AddonsConfig.DisableDefaultAddons = true
+ clusterConfig.Addons = []*api.Addon{
+ {
+ Name: api.CoreDNSAddon,
+ },
+ }
+ e.updateClusterConfig(clusterConfig)
+ err := NewCreateClusterLoader(cmd, filter.NewNodeGroupFilter(), nil, &CreateClusterCmdParams{
+ ConfigReader: clusterutils.Reader(clusterConfig),
+ }).Load()
+ if e.expectErr {
+ Expect(err).To(MatchError("fields nodeGroups, managedNodeGroups, fargateProfiles, karpenter, gitops, iam.serviceAccounts, " +
+ "and iam.podIdentityAssociations are not supported during cluster creation in a cluster without VPC CNI; please remove these fields " +
+ "and add them back after cluster creation is successful"))
+ } else {
+ Expect(err).NotTo(HaveOccurred())
+ }
+ },
+ Entry("nodeGroups", bareClusterEntry{
+ updateClusterConfig: func(c *api.ClusterConfig) {
+ ng := api.NewNodeGroup()
+ ng.Name = "ng"
+ ng.DesiredCapacity = aws.Int(1)
+ c.NodeGroups = []*api.NodeGroup{ng}
+ },
+ expectErr: true,
+ }),
+ Entry("managedNodeGroups", bareClusterEntry{
+ updateClusterConfig: func(c *api.ClusterConfig) {
+ ng := api.NewManagedNodeGroup()
+ ng.Name = "mng"
+ ng.DesiredCapacity = aws.Int(1)
+ c.ManagedNodeGroups = []*api.ManagedNodeGroup{ng}
+ },
+ expectErr: true,
+ }),
+ Entry("fargateProfiles", bareClusterEntry{
+ updateClusterConfig: func(c *api.ClusterConfig) {
+ c.FargateProfiles = []*api.FargateProfile{
+ {
+ Name: "test",
+ Selectors: []api.FargateProfileSelector{
+ {
+ Namespace: "default",
+ },
+ },
+ },
+ }
+ },
+ expectErr: true,
+ }),
+ Entry("gitops", bareClusterEntry{
+ updateClusterConfig: func(c *api.ClusterConfig) {
+ c.GitOps = &api.GitOps{
+ Flux: &api.Flux{
+ GitProvider: "github",
+ Flags: api.FluxFlags{
+ "owner": "aws",
+ },
+ },
+ }
+ },
+ expectErr: true,
+ }),
+ Entry("karpenter", bareClusterEntry{
+ updateClusterConfig: func(c *api.ClusterConfig) {
+ c.Karpenter = &api.Karpenter{}
+ },
+ expectErr: true,
+ }),
+ Entry("iam.serviceAccounts", bareClusterEntry{
+ updateClusterConfig: func(c *api.ClusterConfig) {
+ c.IAM.WithOIDC = api.Enabled()
+ c.IAM.ServiceAccounts = []*api.ClusterIAMServiceAccount{
+ {
+ ClusterIAMMeta: api.ClusterIAMMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ AttachPolicyARNs: []string{"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"},
+ },
+ }
+ },
+ expectErr: true,
+ }),
+ Entry("iam.podIdentityAssociations", bareClusterEntry{
+ updateClusterConfig: func(c *api.ClusterConfig) {
+ c.Addons = append(c.Addons, &api.Addon{Name: api.PodIdentityAgentAddon})
+ c.IAM.PodIdentityAssociations = []api.PodIdentityAssociation{
+ {
+ Namespace: "test",
+ PermissionPolicyARNs: []string{"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"},
+ },
+ }
+ },
+ expectErr: true,
+ }),
+ Entry("no unsupported field set", bareClusterEntry{
+ updateClusterConfig: func(c *api.ClusterConfig) {},
+ }),
+ )
})
Describe("SetLabelLoader", func() {
diff --git a/pkg/ctl/cmdutils/params.go b/pkg/ctl/cmdutils/create_cluster.go
similarity index 97%
rename from pkg/ctl/cmdutils/params.go
rename to pkg/ctl/cmdutils/create_cluster.go
index 92abb74b49..683ba740ae 100644
--- a/pkg/ctl/cmdutils/params.go
+++ b/pkg/ctl/cmdutils/create_cluster.go
@@ -1,6 +1,7 @@
package cmdutils
import (
+ "io"
"time"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
@@ -24,6 +25,7 @@ type CreateClusterCmdParams struct {
CreateNGOptions
CreateManagedNGOptions
CreateSpotOceanNodeGroupOptions
+ ConfigReader io.Reader
}
// NodeGroupOptions holds options for creating nodegroups.
@@ -48,6 +50,7 @@ type CreateNGOptions struct {
InstallNeuronDevicePlugin bool
InstallNvidiaDevicePlugin bool
DryRun bool
+ NodeGroupParallelism int
}
// CreateSpotOceanNodeGroupOptions holds options for creating a Spot Ocean nodegroup.
diff --git a/pkg/ctl/cmdutils/nodegroup_flags.go b/pkg/ctl/cmdutils/nodegroup_flags.go
index 574d3edb69..8f9b50d9ca 100644
--- a/pkg/ctl/cmdutils/nodegroup_flags.go
+++ b/pkg/ctl/cmdutils/nodegroup_flags.go
@@ -64,6 +64,7 @@ func AddCommonCreateNodeGroupAddonsFlags(fs *pflag.FlagSet, ng *api.NodeGroup, o
addCommonCreateNodeGroupIAMAddonsFlags(fs, ng)
fs.BoolVarP(&options.InstallNeuronDevicePlugin, "install-neuron-plugin", "", true, "install Neuron plugin for Inferentia and Trainium nodes")
fs.BoolVarP(&options.InstallNvidiaDevicePlugin, "install-nvidia-plugin", "", true, "install Nvidia plugin for GPU nodes")
+ fs.IntVarP(&options.NodeGroupParallelism, "nodegroup-parallelism", "", 8, "Number of self-managed or managed nodegroups to create in parallel")
}
// AddInstanceSelectorOptions adds flags for EC2 instance selector
diff --git a/pkg/ctl/cmdutils/zonal_shift_config.go b/pkg/ctl/cmdutils/zonal_shift_config.go
new file mode 100644
index 0000000000..999b849199
--- /dev/null
+++ b/pkg/ctl/cmdutils/zonal_shift_config.go
@@ -0,0 +1,33 @@
+package cmdutils
+
+import (
+ "errors"
+ "fmt"
+)
+
+// NewZonalShiftConfigLoader creates a new loader for zonal shift config.
+func NewZonalShiftConfigLoader(cmd *Cmd) ClusterConfigLoader {
+ l := newCommonClusterConfigLoader(cmd)
+ l.flagsIncompatibleWithConfigFile.Insert(
+ "enable-zonal-shift",
+ "cluster",
+ )
+
+ l.validateWithConfigFile = func() error {
+ if cmd.NameArg != "" {
+ return fmt.Errorf("config file and enable-zonal-shift %s", IncompatibleFlags)
+ }
+ if l.ClusterConfig.ZonalShiftConfig == nil || l.ClusterConfig.ZonalShiftConfig.Enabled == nil {
+ return errors.New("field zonalShiftConfig.enabled is required")
+ }
+ return nil
+ }
+
+ l.validateWithoutConfigFile = func() error {
+ if !cmd.CobraCommand.Flag("enable-zonal-shift").Changed {
+ return errors.New("--enable-zonal-shift is required when a config file is not specified")
+ }
+ return nil
+ }
+ return l
+}
diff --git a/pkg/ctl/create/cluster.go b/pkg/ctl/create/cluster.go
index 21d9ebc4a7..351f0673cf 100644
--- a/pkg/ctl/create/cluster.go
+++ b/pkg/ctl/create/cluster.go
@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"os/exec"
+ "strings"
"sync"
"github.com/aws/aws-sdk-go-v2/aws"
@@ -37,7 +38,6 @@ import (
"github.com/weaveworks/eksctl/pkg/utils/kubeconfig"
"github.com/weaveworks/eksctl/pkg/utils/names"
"github.com/weaveworks/eksctl/pkg/utils/nodes"
- "github.com/weaveworks/eksctl/pkg/utils/tasks"
"github.com/weaveworks/eksctl/pkg/vpc"
)
@@ -357,22 +357,18 @@ func doCreateCluster(cmd *cmdutils.Cmd, ngFilter *filter.NodeGroupFilter, params
logger.Info("if you encounter any issues, check CloudFormation console or try 'eksctl utils describe-stacks --region=%s --cluster=%s'", meta.Region, meta.Name)
eks.LogEnabledFeatures(cfg)
- postClusterCreationTasks := ctl.CreateExtraClusterConfigTasks(ctx, cfg)
- var preNodegroupAddons, postNodegroupAddons *tasks.TaskTree
- if len(cfg.Addons) > 0 {
- iamRoleCreator := &podidentityassociation.IAMRoleCreator{
- ClusterName: cfg.Metadata.Name,
- StackCreator: stackManager,
- }
- preNodegroupAddons, postNodegroupAddons = addon.CreateAddonTasks(ctx, cfg, ctl, iamRoleCreator, true, cmd.ProviderConfig.WaitTimeout)
- postClusterCreationTasks.Append(preNodegroupAddons)
+ iamRoleCreator := &podidentityassociation.IAMRoleCreator{
+ ClusterName: cfg.Metadata.Name,
+ StackCreator: stackManager,
}
-
- taskTree, err := stackManager.NewTasksToCreateCluster(ctx, cfg.NodeGroups, cfg.ManagedNodeGroups, cfg.AccessConfig, makeAccessEntryCreator(cfg.Metadata.Name, stackManager), postClusterCreationTasks)
- if err != nil {
- return fmt.Errorf("ocean: failed to create cluster nodegroup: %v", err)
+ preNodegroupAddons, postNodegroupAddons, updateVPCCNITask, autoDefaultAddons := addon.CreateAddonTasks(ctx, cfg, ctl, iamRoleCreator, true, cmd.ProviderConfig.WaitTimeout)
+ if len(autoDefaultAddons) > 0 {
+ logger.Info("default addons %s were not specified, will install them as EKS addons", strings.Join(autoDefaultAddons, ", "))
}
+ postClusterCreationTasks := ctl.CreateExtraClusterConfigTasks(ctx, cfg, preNodegroupAddons, updateVPCCNITask)
+
+ taskTree := stackManager.NewTasksToCreateCluster(ctx, cfg.NodeGroups, cfg.ManagedNodeGroups, cfg.AccessConfig, makeAccessEntryCreator(cfg.Metadata.Name, stackManager), params.NodeGroupParallelism, postClusterCreationTasks)
// Spot Ocean.
{
@@ -462,7 +458,7 @@ func doCreateCluster(cmd *cmdutils.Cmd, ngFilter *filter.NodeGroupFilter, params
// authorize self-managed nodes to join the cluster via aws-auth configmap
// only if EKS access entries are disabled
if cfg.AccessConfig.AuthenticationMode == ekstypes.AuthenticationModeConfigMap {
- if err := eks.UpdateAuthConfigMap(ngCtx, cfg.NodeGroups, clientSet); err != nil {
+ if err := eks.UpdateAuthConfigMap(cfg.NodeGroups, clientSet); err != nil {
return err
}
}
diff --git a/pkg/ctl/create/cluster_test.go b/pkg/ctl/create/cluster_test.go
index 4c2d388771..f876004ca4 100644
--- a/pkg/ctl/create/cluster_test.go
+++ b/pkg/ctl/create/cluster_test.go
@@ -273,6 +273,7 @@ var _ = Describe("create cluster", func() {
clusterConfig := api.NewClusterConfig()
clusterConfig.Metadata.Name = clusterName
+ clusterConfig.AddonsConfig.DisableDefaultAddons = true
clusterConfig.VPC.ClusterEndpoints = api.ClusterEndpointAccessDefaults()
clusterConfig.AccessConfig.AuthenticationMode = ekstypes.AuthenticationModeApiAndConfigMap
@@ -877,7 +878,7 @@ var (
updateMocksForNodegroups = func(status cftypes.StackStatus, outputs []cftypes.Output) func(mp *mockprovider.MockProvider) {
return func(mp *mockprovider.MockProvider) {
- mp.MockEC2().On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
+ mp.MockEC2().On("DescribeInstanceTypeOfferings", mock.Anything, mock.Anything, mock.Anything).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
InstanceType: "g3.xlarge",
@@ -951,7 +952,7 @@ func defaultProviderMocks(p *mockprovider.MockProvider, output []cftypes.Output,
ZoneId: aws.String("id"),
}},
}, nil)
- p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
+ p.MockCloudFormation().On("ListStacks", mock.Anything, mock.Anything, mock.Anything).Return(&cloudformation.ListStacksOutput{
StackSummaries: []cftypes.StackSummary{
{
StackName: aws.String(clusterStackName),
@@ -1039,7 +1040,7 @@ func defaultProviderMocks(p *mockprovider.MockProvider, output []cftypes.Output,
},
},
}, nil)
- p.MockEC2().On("DescribeSubnets", mock.Anything, mock.Anything).Return(&ec2.DescribeSubnetsOutput{
+ p.MockEC2().On("DescribeSubnets", mock.Anything, mock.Anything, mock.Anything).Return(&ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{},
}, nil)
p.MockEC2().On("DescribeVpcs", mock.Anything, mock.Anything).Return(&ec2.DescribeVpcsOutput{
@@ -1075,7 +1076,7 @@ func mockOutposts(provider *mockprovider.MockProvider, outpostID string) {
}, nil)
provider.MockOutposts().On("GetOutpostInstanceTypes", mock.Anything, &outposts.GetOutpostInstanceTypesInput{
OutpostId: aws.String(outpostID),
- }).Return(&outposts.GetOutpostInstanceTypesOutput{
+ }, mock.Anything).Return(&outposts.GetOutpostInstanceTypesOutput{
InstanceTypes: []outpoststypes.InstanceTypeItem{
{
InstanceType: aws.String("m5.xlarge"),
@@ -1084,7 +1085,7 @@ func mockOutposts(provider *mockprovider.MockProvider, outpostID string) {
}, nil)
provider.MockEC2().On("DescribeInstanceTypes", mock.Anything, &ec2.DescribeInstanceTypesInput{
InstanceTypes: []ec2types.InstanceType{"m5.xlarge"},
- }).Return(&ec2.DescribeInstanceTypesOutput{
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypesOutput{
InstanceTypes: []ec2types.InstanceTypeInfo{
{
InstanceType: "m5.xlarge",
diff --git a/pkg/ctl/create/nodegroup.go b/pkg/ctl/create/nodegroup.go
index e316c9b405..b771b6e6db 100644
--- a/pkg/ctl/create/nodegroup.go
+++ b/pkg/ctl/create/nodegroup.go
@@ -82,6 +82,7 @@ func createNodeGroupCmd(cmd *cmdutils.Cmd) {
},
SkipOutdatedAddonsCheck: options.SkipOutdatedAddonsCheck,
ConfigFileProvided: cmd.ClusterConfigFile != "",
+ Parallelism: options.NodeGroupParallelism,
}, ngFilter)
})
}
diff --git a/pkg/ctl/utils/update_addon.go b/pkg/ctl/utils/update_addon.go
new file mode 100644
index 0000000000..b10e50ae2c
--- /dev/null
+++ b/pkg/ctl/utils/update_addon.go
@@ -0,0 +1,53 @@
+package utils
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/eks"
+
+ defaultaddons "github.com/weaveworks/eksctl/pkg/addons/default"
+ "github.com/weaveworks/eksctl/pkg/ctl/cmdutils"
+ "github.com/weaveworks/eksctl/pkg/kubernetes"
+ "github.com/weaveworks/eksctl/pkg/utils/apierrors"
+)
+
+type handleAddonUpdate func(*kubernetes.RawClient, defaultaddons.AddonVersionDescriber) (updateRequired bool, err error)
+
+func updateAddon(ctx context.Context, cmd *cmdutils.Cmd, addonName string, handleUpdate handleAddonUpdate) error {
+ if err := cmdutils.NewMetadataLoader(cmd).Load(); err != nil {
+ return err
+ }
+ ctl, err := cmd.NewProviderForExistingCluster(ctx)
+ if err != nil {
+ return err
+ }
+ if ok, err := ctl.CanUpdate(cmd.ClusterConfig); !ok {
+ return err
+ }
+
+ eksAPI := ctl.AWSProvider.EKS()
+ switch _, err := eksAPI.DescribeAddon(ctx, &eks.DescribeAddonInput{
+ AddonName: aws.String(addonName),
+ ClusterName: aws.String(cmd.ClusterConfig.Metadata.Name),
+ }); {
+ case err == nil:
+ return fmt.Errorf("addon %s is installed as a managed EKS addon; to update it, use `eksctl update addon` instead", addonName)
+ case apierrors.IsNotFoundError(err):
+
+ default:
+ return fmt.Errorf("error describing addon %s: %w", addonName, err)
+ }
+
+ rawClient, err := ctl.NewRawClient(cmd.ClusterConfig)
+ if err != nil {
+ return err
+ }
+ updateRequired, err := handleUpdate(rawClient, eksAPI)
+ if err != nil {
+ return err
+ }
+ cmdutils.LogPlanModeWarning(cmd.Plan && updateRequired)
+ return nil
+}
diff --git a/pkg/ctl/utils/update_aws_node.go b/pkg/ctl/utils/update_aws_node.go
index 4dadc1aec6..6e53d0aede 100644
--- a/pkg/ctl/utils/update_aws_node.go
+++ b/pkg/ctl/utils/update_aws_node.go
@@ -9,6 +9,7 @@ import (
defaultaddons "github.com/weaveworks/eksctl/pkg/addons/default"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/ctl/cmdutils"
+ "github.com/weaveworks/eksctl/pkg/kubernetes"
)
func updateAWSNodeCmd(cmd *cmdutils.Cmd) {
@@ -34,37 +35,11 @@ func updateAWSNodeCmd(cmd *cmdutils.Cmd) {
}
func doUpdateAWSNode(cmd *cmdutils.Cmd) error {
- if err := cmdutils.NewMetadataLoader(cmd).Load(); err != nil {
- return err
- }
-
- cfg := cmd.ClusterConfig
- meta := cmd.ClusterConfig.Metadata
-
ctx := context.TODO()
- ctl, err := cmd.NewProviderForExistingCluster(ctx)
- if err != nil {
- return err
- }
-
- if ok, err := ctl.CanUpdate(cfg); !ok {
- return err
- }
-
- rawClient, err := ctl.NewRawClient(cfg)
- if err != nil {
- return err
- }
-
- updateRequired, err := defaultaddons.UpdateAWSNode(ctx, defaultaddons.AddonInput{
- RawClient: rawClient,
- Region: meta.Region,
- }, cmd.Plan)
- if err != nil {
- return err
- }
-
- cmdutils.LogPlanModeWarning(cmd.Plan && updateRequired)
-
- return nil
+ return updateAddon(ctx, cmd, api.VPCCNIAddon, func(rawClient *kubernetes.RawClient, _ defaultaddons.AddonVersionDescriber) (bool, error) {
+ return defaultaddons.UpdateAWSNode(ctx, defaultaddons.AddonInput{
+ RawClient: rawClient,
+ Region: cmd.ClusterConfig.Metadata.Region,
+ }, cmd.Plan)
+ })
}
diff --git a/pkg/ctl/utils/update_coredns.go b/pkg/ctl/utils/update_coredns.go
index 3728f2a32c..5e37fb2f02 100644
--- a/pkg/ctl/utils/update_coredns.go
+++ b/pkg/ctl/utils/update_coredns.go
@@ -9,6 +9,7 @@ import (
defaultaddons "github.com/weaveworks/eksctl/pkg/addons/default"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/ctl/cmdutils"
+ "github.com/weaveworks/eksctl/pkg/kubernetes"
)
func updateCoreDNSCmd(cmd *cmdutils.Cmd) {
@@ -34,44 +35,16 @@ func updateCoreDNSCmd(cmd *cmdutils.Cmd) {
}
func doUpdateCoreDNS(cmd *cmdutils.Cmd) error {
- if err := cmdutils.NewMetadataLoader(cmd).Load(); err != nil {
- return err
- }
-
- cfg := cmd.ClusterConfig
- meta := cmd.ClusterConfig.Metadata
-
ctx := context.TODO()
- ctl, err := cmd.NewProviderForExistingCluster(ctx)
- if err != nil {
- return err
- }
-
- if ok, err := ctl.CanUpdate(cfg); !ok {
- return err
- }
-
- rawClient, err := ctl.NewRawClient(cfg)
- if err != nil {
- return err
- }
-
- kubernetesVersion, err := rawClient.ServerVersion()
- if err != nil {
- return err
- }
-
- updateRequired, err := defaultaddons.UpdateCoreDNS(ctx, defaultaddons.AddonInput{
- RawClient: rawClient,
- ControlPlaneVersion: kubernetesVersion,
- Region: meta.Region,
- }, cmd.Plan)
-
- if err != nil {
- return err
- }
-
- cmdutils.LogPlanModeWarning(cmd.Plan && updateRequired)
-
- return nil
+ return updateAddon(ctx, cmd, api.CoreDNSAddon, func(rawClient *kubernetes.RawClient, _ defaultaddons.AddonVersionDescriber) (bool, error) {
+ kubernetesVersion, err := rawClient.ServerVersion()
+ if err != nil {
+ return false, err
+ }
+ return defaultaddons.UpdateCoreDNS(ctx, defaultaddons.AddonInput{
+ RawClient: rawClient,
+ ControlPlaneVersion: kubernetesVersion,
+ Region: cmd.ClusterConfig.Metadata.Region,
+ }, cmd.Plan)
+ })
}
diff --git a/pkg/ctl/utils/update_kube_proxy.go b/pkg/ctl/utils/update_kube_proxy.go
index 9e9a390315..a6681406af 100644
--- a/pkg/ctl/utils/update_kube_proxy.go
+++ b/pkg/ctl/utils/update_kube_proxy.go
@@ -9,6 +9,7 @@ import (
defaultaddons "github.com/weaveworks/eksctl/pkg/addons/default"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/ctl/cmdutils"
+ "github.com/weaveworks/eksctl/pkg/kubernetes"
)
func updateKubeProxyCmd(cmd *cmdutils.Cmd) {
@@ -34,44 +35,17 @@ func updateKubeProxyCmd(cmd *cmdutils.Cmd) {
}
func doUpdateKubeProxy(cmd *cmdutils.Cmd) error {
- if err := cmdutils.NewMetadataLoader(cmd).Load(); err != nil {
- return err
- }
-
- cfg := cmd.ClusterConfig
- meta := cmd.ClusterConfig.Metadata
-
ctx := context.TODO()
- ctl, err := cmd.NewProviderForExistingCluster(ctx)
- if err != nil {
- return err
- }
-
- if ok, err := ctl.CanUpdate(cfg); !ok {
- return err
- }
-
- rawClient, err := ctl.NewRawClient(cfg)
- if err != nil {
- return err
- }
-
- kubernetesVersion, err := rawClient.ServerVersion()
- if err != nil {
- return err
- }
-
- updateRequired, err := defaultaddons.UpdateKubeProxy(ctx, defaultaddons.AddonInput{
- RawClient: rawClient,
- ControlPlaneVersion: kubernetesVersion,
- Region: meta.Region,
- EKSAPI: ctl.AWSProvider.EKS(),
- }, cmd.Plan)
- if err != nil {
- return err
- }
-
- cmdutils.LogPlanModeWarning(cmd.Plan && updateRequired)
-
- return nil
+ return updateAddon(ctx, cmd, api.KubeProxyAddon, func(rawClient *kubernetes.RawClient, addonDescriber defaultaddons.AddonVersionDescriber) (bool, error) {
+ kubernetesVersion, err := rawClient.ServerVersion()
+ if err != nil {
+ return false, err
+ }
+ return defaultaddons.UpdateKubeProxy(ctx, defaultaddons.AddonInput{
+ RawClient: rawClient,
+ ControlPlaneVersion: kubernetesVersion,
+ Region: cmd.ClusterConfig.Metadata.Region,
+ AddonVersionDescriber: addonDescriber,
+ }, cmd.Plan)
+ })
}
diff --git a/pkg/ctl/utils/update_zonal_shift_config.go b/pkg/ctl/utils/update_zonal_shift_config.go
new file mode 100644
index 0000000000..701f0c99f9
--- /dev/null
+++ b/pkg/ctl/utils/update_zonal_shift_config.go
@@ -0,0 +1,84 @@
+package utils
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/eks"
+ ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types"
+
+ "github.com/kris-nova/logger"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
+ "github.com/weaveworks/eksctl/pkg/ctl/cmdutils"
+)
+
+func updateZonalShiftConfig(cmd *cmdutils.Cmd, handler func(*cmdutils.Cmd) error) {
+ cfg := api.NewClusterConfig()
+ cmd.ClusterConfig = cfg
+
+ cmd.SetDescription("update-zonal-shift-config", "update zonal shift config", "update zonal shift config on a cluster")
+
+ var enableZonalShift bool
+ cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error {
+ cmd.NameArg = cmdutils.GetNameArg(args)
+ if err := cmdutils.NewZonalShiftConfigLoader(cmd).Load(); err != nil {
+ return err
+ }
+ if cmd.ClusterConfigFile == "" {
+ cfg.ZonalShiftConfig = &api.ZonalShiftConfig{
+ Enabled: &enableZonalShift,
+ }
+ }
+ return handler(cmd)
+ }
+
+ cmdutils.AddCommonFlagsForAWS(cmd, &cmd.ProviderConfig, false)
+
+ cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) {
+ cmdutils.AddClusterFlag(fs, cfg.Metadata)
+ cmdutils.AddRegionFlag(fs, &cmd.ProviderConfig)
+ cmdutils.AddConfigFileFlag(fs, &cmd.ClusterConfigFile)
+ fs.BoolVar(&enableZonalShift, "enable-zonal-shift", true, "Enable zonal shift on a cluster")
+ })
+
+}
+
+func updateZonalShiftConfigCmd(cmd *cmdutils.Cmd) {
+ updateZonalShiftConfig(cmd, doUpdateZonalShiftConfig)
+}
+
+func doUpdateZonalShiftConfig(cmd *cmdutils.Cmd) error {
+ cfg := cmd.ClusterConfig
+ ctx := context.Background()
+ if cfg.Metadata.Name == "" {
+ return cmdutils.ErrMustBeSet(cmdutils.ClusterNameFlag(cmd))
+ }
+ ctl, err := cmd.NewProviderForExistingCluster(ctx)
+ if err != nil {
+ return err
+ }
+ makeZonalShiftStatus := func(enabled *bool) string {
+ if api.IsEnabled(enabled) {
+ return "enabled"
+ }
+ return "disabled"
+ }
+ if zsc := ctl.Status.ClusterInfo.Cluster.ZonalShiftConfig; zsc != nil && *zsc.Enabled == api.IsEnabled(cfg.ZonalShiftConfig.Enabled) {
+ logger.Info("zonal shift is already %s", makeZonalShiftStatus(zsc.Enabled))
+ return nil
+ }
+ if err := ctl.UpdateClusterConfig(ctx, &eks.UpdateClusterConfigInput{
+ Name: aws.String(cfg.Metadata.Name),
+ ZonalShiftConfig: &ekstypes.ZonalShiftConfigRequest{
+ Enabled: cfg.ZonalShiftConfig.Enabled,
+ },
+ }); err != nil {
+ return fmt.Errorf("updating zonal shift config: %w", err)
+ }
+ logger.Info("zonal shift %s successfully", makeZonalShiftStatus(cfg.ZonalShiftConfig.Enabled))
+ return nil
+}
diff --git a/pkg/ctl/utils/utils.go b/pkg/ctl/utils/utils.go
index 85c6d7a022..29f396dfa3 100644
--- a/pkg/ctl/utils/utils.go
+++ b/pkg/ctl/utils/utils.go
@@ -33,6 +33,7 @@ func Command(flagGrouping *cmdutils.FlagGrouping) *cobra.Command {
cmdutils.AddResourceCmd(flagGrouping, verbCmd, describeAddonConfigurationCmd)
cmdutils.AddResourceCmd(flagGrouping, verbCmd, migrateToPodIdentityCmd)
cmdutils.AddResourceCmd(flagGrouping, verbCmd, migrateAccessEntryCmd)
+ cmdutils.AddResourceCmd(flagGrouping, verbCmd, updateZonalShiftConfigCmd)
return verbCmd
}
diff --git a/pkg/eks/api.go b/pkg/eks/api.go
index 9c8e8582e5..e1ba830214 100644
--- a/pkg/eks/api.go
+++ b/pkg/eks/api.go
@@ -232,7 +232,12 @@ func ParseConfig(data []byte) (*api.ClusterConfig, error) {
// LoadConfigFromFile loads ClusterConfig from configFile
func LoadConfigFromFile(configFile string) (*api.ClusterConfig, error) {
- data, err := readConfig(configFile)
+ return LoadConfigWithReader(configFile, nil)
+}
+
+// LoadConfigWithReader loads ClusterConfig from configFile or configReader.
+func LoadConfigWithReader(configFile string, configReader io.Reader) (*api.ClusterConfig, error) {
+ data, err := readConfig(configFile, configReader)
if err != nil {
return nil, errors.Wrapf(err, "reading config file %q", configFile)
}
@@ -241,12 +246,14 @@ func LoadConfigFromFile(configFile string) (*api.ClusterConfig, error) {
return nil, errors.Wrapf(err, "loading config file %q", configFile)
}
return clusterConfig, nil
-
}
-func readConfig(configFile string) ([]byte, error) {
+func readConfig(configFile string, reader io.Reader) ([]byte, error) {
if configFile == "-" {
- return io.ReadAll(os.Stdin)
+ if reader == nil {
+ reader = os.Stdin
+ }
+ return io.ReadAll(reader)
}
return os.ReadFile(configFile)
}
diff --git a/pkg/eks/api_test.go b/pkg/eks/api_test.go
index ffae2269bb..8eb683c08d 100644
--- a/pkg/eks/api_test.go
+++ b/pkg/eks/api_test.go
@@ -260,8 +260,8 @@ var _ = Describe("eksctl API", func() {
})
- testEnsureAMI := func(matcher gomegatypes.GomegaMatcher) {
- err := ResolveAMI(context.Background(), provider, "1.14", ng)
+ testEnsureAMI := func(matcher gomegatypes.GomegaMatcher, version string) {
+ err := ResolveAMI(context.Background(), provider, version, ng)
ExpectWithOffset(1, err).NotTo(HaveOccurred())
ExpectWithOffset(1, ng.AMI).To(matcher)
}
@@ -275,7 +275,7 @@ var _ = Describe("eksctl API", func() {
},
}, nil)
- testEnsureAMI(Equal("ami-ssm"))
+ testEnsureAMI(Equal("ami-ssm"), "1.14")
})
It("should fall back to auto resolution for Ubuntu1804", func() {
@@ -283,7 +283,44 @@ var _ = Describe("eksctl API", func() {
mockDescribeImages(provider, "ami-ubuntu", func(input *ec2.DescribeImagesInput) bool {
return input.Owners[0] == "099720109477"
})
- testEnsureAMI(Equal("ami-ubuntu"))
+ testEnsureAMI(Equal("ami-ubuntu"), "1.14")
+ })
+
+ It("should fall back to auto resolution for Ubuntu2004 on 1.14", func() {
+ ng.AMIFamily = api.NodeImageFamilyUbuntu2004
+ mockDescribeImages(provider, "ami-ubuntu", func(input *ec2.DescribeImagesInput) bool {
+ return input.Owners[0] == "099720109477"
+ })
+ testEnsureAMI(Equal("ami-ubuntu"), "1.14")
+ })
+
+ It("should resolve AMI using SSM Parameter Store for Ubuntu2004 on 1.29", func() {
+ provider.MockSSM().On("GetParameter", mock.Anything, &ssm.GetParameterInput{
+ Name: aws.String("/aws/service/canonical/ubuntu/eks/20.04/1.29/stable/current/amd64/hvm/ebs-gp2/ami-id"),
+ }).Return(&ssm.GetParameterOutput{
+ Parameter: &ssmtypes.Parameter{
+ Value: aws.String("ami-ubuntu"),
+ },
+ }, nil)
+ ng.AMIFamily = api.NodeImageFamilyUbuntu2004
+
+ testEnsureAMI(Equal("ami-ubuntu"), "1.29")
+ })
+
+ It("should fall back to auto resolution for Ubuntu2204", func() {
+ ng.AMIFamily = api.NodeImageFamilyUbuntu2204
+ mockDescribeImages(provider, "ami-ubuntu", func(input *ec2.DescribeImagesInput) bool {
+ return input.Owners[0] == "099720109477"
+ })
+ testEnsureAMI(Equal("ami-ubuntu"), "1.14")
+ })
+
+ It("should fall back to auto resolution for UbuntuPro2204", func() {
+ ng.AMIFamily = api.NodeImageFamilyUbuntuPro2204
+ mockDescribeImages(provider, "ami-ubuntu", func(input *ec2.DescribeImagesInput) bool {
+ return input.Owners[0] == "099720109477"
+ })
+ testEnsureAMI(Equal("ami-ubuntu"), "1.14")
})
It("should fall back to auto resolution for Ubuntu2004", func() {
@@ -317,7 +354,7 @@ var _ = Describe("eksctl API", func() {
return len(input.ImageIds) == 0
})
- testEnsureAMI(Equal("ami-auto"))
+ testEnsureAMI(Equal("ami-auto"), "1.14")
})
})
@@ -470,7 +507,7 @@ var _ = Describe("CheckInstanceAvailability", func() {
},
LocationType: ec2types.LocationTypeAvailabilityZone,
MaxResults: aws.Int32(100),
- }).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
InstanceType: "t2.nano",
@@ -610,7 +647,7 @@ var _ = Describe("CheckInstanceAvailability", func() {
},
LocationType: ec2types.LocationTypeAvailabilityZone,
MaxResults: aws.Int32(100),
- }).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
InstanceType: "t2.nano",
@@ -665,7 +702,7 @@ var _ = Describe("CheckInstanceAvailability", func() {
},
LocationType: ec2types.LocationTypeAvailabilityZone,
MaxResults: aws.Int32(100),
- }).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypeOfferingsOutput{
InstanceTypeOfferings: []ec2types.InstanceTypeOffering{
{
InstanceType: "t2.nano",
diff --git a/pkg/eks/client.go b/pkg/eks/client.go
index fed5822258..1f8ae0252b 100644
--- a/pkg/eks/client.go
+++ b/pkg/eks/client.go
@@ -155,7 +155,7 @@ func (c *KubernetesProvider) WaitForControlPlane(meta *api.ClusterMeta, clientSe
}
// UpdateAuthConfigMap creates or adds a nodegroup IAM role in the auth ConfigMap for the given nodegroup.
-func UpdateAuthConfigMap(ctx context.Context, nodeGroups []*api.NodeGroup, clientSet kubernetes.Interface) error {
+func UpdateAuthConfigMap(nodeGroups []*api.NodeGroup, clientSet kubernetes.Interface) error {
for _, ng := range nodeGroups {
// skip ocean cluster
if ng.SpotOcean != nil && ng.Name == api.SpotOceanClusterNodeGroupName {
@@ -166,13 +166,6 @@ func UpdateAuthConfigMap(ctx context.Context, nodeGroups []*api.NodeGroup, clien
if err := authconfigmap.AddNodeGroup(clientSet, ng); err != nil {
return err
}
-
- // wait for nodes to join
- if ng.SpotOcean == nil {
- if err := WaitForNodes(ctx, clientSet, ng); err != nil {
- return err
- }
- }
}
return nil
}
diff --git a/pkg/eks/eks_test.go b/pkg/eks/eks_test.go
index 02a4d4d6b1..173a740325 100644
--- a/pkg/eks/eks_test.go
+++ b/pkg/eks/eks_test.go
@@ -110,7 +110,7 @@ var _ = Describe("EKS API wrapper", func() {
}
}
return matches == len(expectedStatusFilter)
- })).Return(&cfn.ListStacksOutput{}, nil)
+ }), mock.Anything).Return(&cfn.ListStacksOutput{}, nil)
})
JustBeforeEach(func() {
diff --git a/pkg/eks/nodegroup_service_test.go b/pkg/eks/nodegroup_service_test.go
index eaed9b0b2c..33d08e72d8 100644
--- a/pkg/eks/nodegroup_service_test.go
+++ b/pkg/eks/nodegroup_service_test.go
@@ -326,13 +326,13 @@ func mockOutpostInstanceTypes(provider *mockprovider.MockProvider) {
instanceTypes[i] = it.InstanceType
}
- provider.MockOutposts().On("GetOutpostInstanceTypes", mock.Anything, mock.Anything).Return(&awsoutposts.GetOutpostInstanceTypesOutput{
+ provider.MockOutposts().On("GetOutpostInstanceTypes", mock.Anything, mock.Anything, mock.Anything).Return(&awsoutposts.GetOutpostInstanceTypesOutput{
InstanceTypes: instanceTypeItems,
}, nil)
provider.MockEC2().On("DescribeInstanceTypes", mock.Anything, &ec2.DescribeInstanceTypesInput{
InstanceTypes: instanceTypes,
- }).Return(&ec2.DescribeInstanceTypesOutput{
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypesOutput{
InstanceTypes: instanceTypeInfoList,
}, nil)
}
diff --git a/pkg/eks/retryer_v2.go b/pkg/eks/retryer_v2.go
index b491bb1103..30bf2dd950 100644
--- a/pkg/eks/retryer_v2.go
+++ b/pkg/eks/retryer_v2.go
@@ -1,15 +1,14 @@
package eks
import (
+ "errors"
"net/http"
"github.com/aws/aws-sdk-go-v2/aws"
-
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/smithy-go"
- "github.com/pkg/errors"
)
const (
@@ -39,7 +38,10 @@ func (r *RetryerV2) IsErrorRetryable(err error) bool {
}
var oe *smithy.OperationError
- return errors.As(err, &oe) && oe.Err != nil && isErrorRetryable(oe.Err)
+ if !errors.As(err, &oe) {
+ return true
+ }
+ return oe.Err != nil && isErrorRetryable(oe.Err)
}
func isErrorRetryable(err error) bool {
diff --git a/pkg/eks/services_v2.go b/pkg/eks/services_v2.go
index 26154f6c1f..573a75417f 100644
--- a/pkg/eks/services_v2.go
+++ b/pkg/eks/services_v2.go
@@ -5,6 +5,7 @@ import (
"sync"
"github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/ratelimit"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/service/cloudformation"
"github.com/aws/aws-sdk-go-v2/service/ec2"
@@ -85,6 +86,7 @@ func (s *ServicesV2) CloudFormation() awsapi.CloudFormation {
o.StandardOptions = []func(*retry.StandardOptions){
func(so *retry.StandardOptions) {
so.MaxAttempts = maxRetries
+ so.RateLimiter = ratelimit.None
},
}
})
diff --git a/pkg/eks/tasks.go b/pkg/eks/tasks.go
index 3d9cc5eb4e..c17e28672e 100644
--- a/pkg/eks/tasks.go
+++ b/pkg/eks/tasks.go
@@ -28,14 +28,13 @@ import (
"github.com/weaveworks/eksctl/pkg/actions/irsa"
"github.com/weaveworks/eksctl/pkg/addons"
+ api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/cfn/manager"
"github.com/weaveworks/eksctl/pkg/fargate"
iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc"
+ "github.com/weaveworks/eksctl/pkg/kubernetes"
instanceutils "github.com/weaveworks/eksctl/pkg/utils/instance"
"github.com/weaveworks/eksctl/pkg/utils/tasks"
-
- api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
- "github.com/weaveworks/eksctl/pkg/kubernetes"
)
type clusterConfigTask struct {
@@ -279,10 +278,11 @@ func (t *restartDaemonsetTask) Do(errCh chan error) error {
}
// CreateExtraClusterConfigTasks returns all tasks for updating cluster configuration
-func (c *ClusterProvider) CreateExtraClusterConfigTasks(ctx context.Context, cfg *api.ClusterConfig) *tasks.TaskTree {
+func (c *ClusterProvider) CreateExtraClusterConfigTasks(ctx context.Context, cfg *api.ClusterConfig, preNodeGroupAddons *tasks.TaskTree, updateVPCCNITask *tasks.GenericTask) *tasks.TaskTree {
newTasks := &tasks.TaskTree{
Parallel: false,
IsSubTask: true,
+ Tasks: []tasks.Task{preNodeGroupAddons},
}
newTasks.Append(&tasks.GenericTask{
@@ -302,6 +302,13 @@ func (c *ClusterProvider) CreateExtraClusterConfigTasks(ctx context.Context, cfg
},
})
+ if api.IsEnabled(cfg.IAM.WithOIDC) {
+ c.appendCreateTasksForIAMServiceAccounts(ctx, cfg, newTasks)
+ if updateVPCCNITask != nil {
+ newTasks.Append(updateVPCCNITask)
+ }
+ }
+
if cfg.HasClusterCloudWatchLogging() {
if logRetentionDays := cfg.CloudWatch.ClusterLogging.LogRetentionInDays; logRetentionDays != 0 {
newTasks.Append(&clusterConfigTask{
@@ -334,10 +341,6 @@ func (c *ClusterProvider) CreateExtraClusterConfigTasks(ctx context.Context, cfg
})
}
- if api.IsEnabled(cfg.IAM.WithOIDC) {
- c.appendCreateTasksForIAMServiceAccounts(ctx, cfg, newTasks)
- }
-
if len(cfg.IdentityProviders) > 0 {
newTasks.Append(identityproviders.NewAssociateProvidersTask(ctx, *cfg.Metadata, cfg.IdentityProviders, c.AWSProvider.EKS()))
}
@@ -525,16 +528,10 @@ func (c *ClusterProvider) appendCreateTasksForIAMServiceAccounts(ctx context.Con
// given a clientSet getter and OpenIDConnectManager reference we can build out
// the list of tasks for each of the service accounts that need to be created
newTasks := c.NewStackManager(cfg).NewTasksToCreateIAMServiceAccounts(
- api.IAMServiceAccountsWithImplicitServiceAccounts(cfg),
+ cfg.IAM.ServiceAccounts,
oidcPlaceholder,
clientSet,
)
newTasks.IsSubTask = true
tasks.Append(newTasks)
- tasks.Append(&restartDaemonsetTask{
- namespace: "kube-system",
- name: "aws-node",
- clusterProvider: c,
- spec: cfg,
- })
}
diff --git a/pkg/nodebootstrap/al2023.go b/pkg/nodebootstrap/al2023.go
index be587be681..1943f92823 100644
--- a/pkg/nodebootstrap/al2023.go
+++ b/pkg/nodebootstrap/al2023.go
@@ -49,6 +49,7 @@ func newAL2023Bootstrapper(cfg *api.ClusterConfig, np api.NodePool, clusterDNS s
cfg: cfg,
nodePool: np,
clusterDNS: clusterDNS,
+ scripts: []string{assets.AL2023XTablesLock},
}
}
diff --git a/pkg/nodebootstrap/al2023_test.go b/pkg/nodebootstrap/al2023_test.go
index 5b62bffe9e..56568adaaa 100644
--- a/pkg/nodebootstrap/al2023_test.go
+++ b/pkg/nodebootstrap/al2023_test.go
@@ -48,13 +48,13 @@ var _ = DescribeTable("Unmanaged AL2023", func(e al2023Entry) {
Expect(actual).To(Equal(e.expectedUserData))
},
Entry("default", al2023Entry{
- expectedUserData: wrapMIMEParts(nodeConfig),
+ expectedUserData: wrapMIMEParts(xTablesLock + nodeConfig),
}),
Entry("efa enabled", al2023Entry{
overrideNodegroupSettings: func(np api.NodePool) {
np.BaseNodeGroup().EFAEnabled = aws.Bool(true)
},
- expectedUserData: wrapMIMEParts(efaScript + nodeConfig),
+ expectedUserData: wrapMIMEParts(xTablesLock + efaScript + nodeConfig),
}),
)
@@ -83,26 +83,26 @@ var _ = DescribeTable("Managed AL2023", func(e al2023Entry) {
Expect(actual).To(Equal(e.expectedUserData))
},
Entry("native AMI", al2023Entry{
- expectedUserData: "",
+ expectedUserData: wrapMIMEParts(xTablesLock),
}),
Entry("native AMI && EFA enabled", al2023Entry{
overrideNodegroupSettings: func(np api.NodePool) {
np.BaseNodeGroup().EFAEnabled = aws.Bool(true)
},
- expectedUserData: wrapMIMEParts(efaCloudhook),
+ expectedUserData: wrapMIMEParts(xTablesLock + efaCloudhook),
}),
Entry("custom AMI", al2023Entry{
overrideNodegroupSettings: func(np api.NodePool) {
np.BaseNodeGroup().AMI = "ami-xxxx"
},
- expectedUserData: wrapMIMEParts(managedNodeConfig),
+ expectedUserData: wrapMIMEParts(xTablesLock + managedNodeConfig),
}),
Entry("custom AMI && EFA enabled", al2023Entry{
overrideNodegroupSettings: func(np api.NodePool) {
np.BaseNodeGroup().AMI = "ami-xxxx"
np.BaseNodeGroup().EFAEnabled = aws.Bool(true)
},
- expectedUserData: wrapMIMEParts(efaCloudhook + managedNodeConfig),
+ expectedUserData: wrapMIMEParts(xTablesLock + efaCloudhook + managedNodeConfig),
}),
)
@@ -274,6 +274,13 @@ Content-Type: multipart/mixed; boundary=//
`
}
+ xTablesLock = fmt.Sprintf(`--//
+Content-Type: text/x-shellscript
+Content-Type: charset="us-ascii"
+
+%s
+`, assets.AL2023XTablesLock)
+
efaCloudhook = fmt.Sprintf(`--//
Content-Type: text/cloud-boothook
Content-Type: charset="us-ascii"
diff --git a/pkg/nodebootstrap/assets/assets.go b/pkg/nodebootstrap/assets/assets.go
index c77c5bc450..94fdc480b2 100644
--- a/pkg/nodebootstrap/assets/assets.go
+++ b/pkg/nodebootstrap/assets/assets.go
@@ -20,6 +20,11 @@ var BootstrapHelperSh string
//go:embed scripts/bootstrap.ubuntu.sh
var BootstrapUbuntuSh string
+// AL2023XTablesLock holds the contents for creating a lock file for AL2023 AMIs.
+//
+//go:embed scripts/al2023-xtables.lock.sh
+var AL2023XTablesLock string
+
// EfaAl2Sh holds the efa.al2.sh contents
//
//go:embed scripts/efa.al2.sh
diff --git a/pkg/nodebootstrap/assets/scripts/al2023-xtables.lock.sh b/pkg/nodebootstrap/assets/scripts/al2023-xtables.lock.sh
new file mode 100644
index 0000000000..5fa346b947
--- /dev/null
+++ b/pkg/nodebootstrap/assets/scripts/al2023-xtables.lock.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+set -o nounset
+
+touch /run/xtables.lock
diff --git a/pkg/nodebootstrap/assets/scripts/efa.al2.sh b/pkg/nodebootstrap/assets/scripts/efa.al2.sh
index 8179c983af..f99ae6b962 100644
--- a/pkg/nodebootstrap/assets/scripts/efa.al2.sh
+++ b/pkg/nodebootstrap/assets/scripts/efa.al2.sh
@@ -7,6 +7,7 @@ set -o nounset
yum install -y wget
wget -q --timeout=20 https://s3-us-west-2.amazonaws.com/aws-efa-installer/aws-efa-installer-latest.tar.gz -O /tmp/aws-efa-installer.tar.gz
tar -xf /tmp/aws-efa-installer.tar.gz -C /tmp
+rm -rf /tmp/aws-efa-installer.tar.gz
cd /tmp/aws-efa-installer
./efa_installer.sh -y -g
/opt/amazon/efa/bin/fi_info -p efa
diff --git a/pkg/nodebootstrap/assets/scripts/efa.al2023.sh b/pkg/nodebootstrap/assets/scripts/efa.al2023.sh
index 3aef0ce36f..b73f630813 100644
--- a/pkg/nodebootstrap/assets/scripts/efa.al2023.sh
+++ b/pkg/nodebootstrap/assets/scripts/efa.al2023.sh
@@ -7,6 +7,7 @@ set -o nounset
dnf install -y wget
wget -q --timeout=20 https://s3-us-west-2.amazonaws.com/aws-efa-installer/aws-efa-installer-latest.tar.gz -O /tmp/aws-efa-installer.tar.gz
tar -xf /tmp/aws-efa-installer.tar.gz -C /tmp
+rm -rf /tmp/aws-efa-installer.tar.gz
cd /tmp/aws-efa-installer
./efa_installer.sh -y -g
/opt/amazon/efa/bin/fi_info -p efa
diff --git a/pkg/nodebootstrap/assets/scripts/efa.managed.al2023.boothook b/pkg/nodebootstrap/assets/scripts/efa.managed.al2023.boothook
index 5d2a081688..d8440a4520 100644
--- a/pkg/nodebootstrap/assets/scripts/efa.managed.al2023.boothook
+++ b/pkg/nodebootstrap/assets/scripts/efa.managed.al2023.boothook
@@ -2,6 +2,7 @@ cloud-init-per once dnf_wget dnf install -y wget
cloud-init-per once wget_efa wget -q --timeout=20 https://s3-us-west-2.amazonaws.com/aws-efa-installer/aws-efa-installer-latest.tar.gz -O /tmp/aws-efa-installer-latest.tar.gz
cloud-init-per once tar_efa tar -xf /tmp/aws-efa-installer-latest.tar.gz -C /tmp
+cloud-init-per once rm_efa_gz rm -rf /tmp/aws-efa-installer-latest.tar.gz
pushd /tmp/aws-efa-installer
cloud-init-per once install_efa ./efa_installer.sh -y -g
pop /tmp/aws-efa-installer
diff --git a/pkg/nodebootstrap/assets/scripts/efa.managed.boothook b/pkg/nodebootstrap/assets/scripts/efa.managed.boothook
index 32e191cd24..d2863d42c6 100644
--- a/pkg/nodebootstrap/assets/scripts/efa.managed.boothook
+++ b/pkg/nodebootstrap/assets/scripts/efa.managed.boothook
@@ -2,6 +2,7 @@ cloud-init-per once yum_wget yum install -y wget
cloud-init-per once wget_efa wget -q --timeout=20 https://s3-us-west-2.amazonaws.com/aws-efa-installer/aws-efa-installer-latest.tar.gz -O /tmp/aws-efa-installer-latest.tar.gz
cloud-init-per once tar_efa tar -xf /tmp/aws-efa-installer-latest.tar.gz -C /tmp
+cloud-init-per once rm_efa_gz rm -rf /tmp/aws-efa-installer-latest.tar.gz
pushd /tmp/aws-efa-installer
cloud-init-per once install_efa ./efa_installer.sh -y -g
pop /tmp/aws-efa-installer
diff --git a/pkg/nodebootstrap/managed_al2_test.go b/pkg/nodebootstrap/managed_al2_test.go
index d463eea253..6b9c08dcd4 100644
--- a/pkg/nodebootstrap/managed_al2_test.go
+++ b/pkg/nodebootstrap/managed_al2_test.go
@@ -111,6 +111,7 @@ cloud-init-per once yum_wget yum install -y wget
cloud-init-per once wget_efa wget -q --timeout=20 https://s3-us-west-2.amazonaws.com/aws-efa-installer/aws-efa-installer-latest.tar.gz -O /tmp/aws-efa-installer-latest.tar.gz
cloud-init-per once tar_efa tar -xf /tmp/aws-efa-installer-latest.tar.gz -C /tmp
+cloud-init-per once rm_efa_gz rm -rf /tmp/aws-efa-installer-latest.tar.gz
pushd /tmp/aws-efa-installer
cloud-init-per once install_efa ./efa_installer.sh -y -g
pop /tmp/aws-efa-installer
@@ -143,6 +144,7 @@ cloud-init-per once yum_wget yum install -y wget
cloud-init-per once wget_efa wget -q --timeout=20 https://s3-us-west-2.amazonaws.com/aws-efa-installer/aws-efa-installer-latest.tar.gz -O /tmp/aws-efa-installer-latest.tar.gz
cloud-init-per once tar_efa tar -xf /tmp/aws-efa-installer-latest.tar.gz -C /tmp
+cloud-init-per once rm_efa_gz rm -rf /tmp/aws-efa-installer-latest.tar.gz
pushd /tmp/aws-efa-installer
cloud-init-per once install_efa ./efa_installer.sh -y -g
pop /tmp/aws-efa-installer
diff --git a/pkg/outposts/cluster_extender_test.go b/pkg/outposts/cluster_extender_test.go
index 33d416069f..045e64fef5 100644
--- a/pkg/outposts/cluster_extender_test.go
+++ b/pkg/outposts/cluster_extender_test.go
@@ -728,7 +728,7 @@ func mockDescribeSubnets(provider *mockprovider.MockProvider, clusterSubnets *ap
Values: []string{"vpc-1234"},
},
},
- }).Return(&ec2.DescribeSubnetsOutput{
+ }, mock.Anything).Return(&ec2.DescribeSubnetsOutput{
Subnets: allSubnets,
}, nil)
}
diff --git a/pkg/outposts/outposts_test.go b/pkg/outposts/outposts_test.go
index e72cc368d4..090bfc6231 100644
--- a/pkg/outposts/outposts_test.go
+++ b/pkg/outposts/outposts_test.go
@@ -229,13 +229,13 @@ func mockOutpostInstanceTypes(provider *mockprovider.MockProvider) {
}
instanceTypes[i] = it.InstanceType
}
- provider.MockOutposts().On("GetOutpostInstanceTypes", mock.Anything, mock.Anything).Return(&awsoutposts.GetOutpostInstanceTypesOutput{
+ provider.MockOutposts().On("GetOutpostInstanceTypes", mock.Anything, mock.Anything, mock.Anything).Return(&awsoutposts.GetOutpostInstanceTypesOutput{
InstanceTypes: instanceTypeItems,
}, nil)
provider.MockEC2().On("DescribeInstanceTypes", mock.Anything, &ec2.DescribeInstanceTypesInput{
InstanceTypes: instanceTypes,
- }).Return(&ec2.DescribeInstanceTypesOutput{
+ }, mock.Anything).Return(&ec2.DescribeInstanceTypesOutput{
InstanceTypes: instanceTypeInfoList,
}, nil)
}
diff --git a/pkg/printers/testdata/jsontest_2clusters.golden b/pkg/printers/testdata/jsontest_2clusters.golden
index 4c919cf353..7845b1206a 100644
--- a/pkg/printers/testdata/jsontest_2clusters.golden
+++ b/pkg/printers/testdata/jsontest_2clusters.golden
@@ -35,7 +35,9 @@
"RoleArn": null,
"Status": "ACTIVE",
"Tags": null,
- "Version": null
+ "Version": null,
+ "UpgradePolicy": null,
+ "ZonalShiftConfig": null
},
{
"Id": null,
@@ -73,6 +75,8 @@
"RoleArn": null,
"Status": "ACTIVE",
"Tags": null,
- "Version": null
+ "Version": null,
+ "UpgradePolicy": null,
+ "ZonalShiftConfig": null
}
]
diff --git a/pkg/printers/testdata/jsontest_single.golden b/pkg/printers/testdata/jsontest_single.golden
index 1c8e2d653a..8403c144dc 100644
--- a/pkg/printers/testdata/jsontest_single.golden
+++ b/pkg/printers/testdata/jsontest_single.golden
@@ -34,6 +34,8 @@
"RoleArn": null,
"Status": "ACTIVE",
"Tags": null,
- "Version": null
+ "Version": null,
+ "UpgradePolicy": null,
+ "ZonalShiftConfig": null
}
]
diff --git a/pkg/printers/testdata/yamltest_2clusters.golden b/pkg/printers/testdata/yamltest_2clusters.golden
index 60e61d8f3a..17306cded2 100644
--- a/pkg/printers/testdata/yamltest_2clusters.golden
+++ b/pkg/printers/testdata/yamltest_2clusters.golden
@@ -30,6 +30,8 @@
Status: ACTIVE
Tags: null
Version: null
+ UpgradePolicy: null
+ ZonalShiftConfig: null
- Id: null
Arn: arn-87654321
CertificateAuthority: null
@@ -62,3 +64,5 @@
Status: ACTIVE
Tags: null
Version: null
+ UpgradePolicy: null
+ ZonalShiftConfig: null
diff --git a/pkg/printers/testdata/yamltest_single.golden b/pkg/printers/testdata/yamltest_single.golden
index 9c99e5aca2..55026b7e2f 100644
--- a/pkg/printers/testdata/yamltest_single.golden
+++ b/pkg/printers/testdata/yamltest_single.golden
@@ -30,3 +30,5 @@
Status: ACTIVE
Tags: null
Version: null
+ UpgradePolicy: null
+ ZonalShiftConfig: null
diff --git a/pkg/spot/types.go b/pkg/spot/types.go
index 9dbdcff81a..faddef9351 100644
--- a/pkg/spot/types.go
+++ b/pkg/spot/types.go
@@ -155,6 +155,7 @@ type (
ResourceLimits *ResourceLimits `json:"resourceLimits,omitempty"`
Headroom *Headroom `json:"headroom,omitempty"` // cluster
Headrooms []*Headroom `json:"headrooms,omitempty"` // virtualnodegroup
+ Down *AutoScalerDown `json:"down,omitempty"`
}
Headroom struct {
@@ -171,6 +172,16 @@ type (
MaxInstanceCount *int `json:"maxInstanceCount,omitempty"`
}
+ AutoScalerDown struct {
+ EvaluationPeriods *int `json:"evaluationPeriods,omitempty"`
+ MaxScaleDownPercentage *float64 `json:"maxScaleDownPercentage,omitempty"`
+ AggressiveScaleDown *AggressiveScaleDown `json:"aggressiveScaleDown,omitempty"`
+ }
+
+ AggressiveScaleDown struct {
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+ }
+
Label struct {
Key *string `json:"key,omitempty"`
Value *string `json:"value,omitempty"`
diff --git a/pkg/utils/instance/instance.go b/pkg/utils/instance/instance.go
index 024012bb3a..211c668702 100644
--- a/pkg/utils/instance/instance.go
+++ b/pkg/utils/instance/instance.go
@@ -13,6 +13,7 @@ func IsARMInstanceType(instanceType string) bool {
strings.HasPrefix(instanceType, "t4g") ||
strings.HasPrefix(instanceType, "m6g") ||
strings.HasPrefix(instanceType, "m7g") ||
+ strings.HasPrefix(instanceType, "m8g") ||
strings.HasPrefix(instanceType, "c6g") ||
strings.HasPrefix(instanceType, "c7g") ||
strings.HasPrefix(instanceType, "r6g") ||
@@ -20,6 +21,7 @@ func IsARMInstanceType(instanceType string) bool {
strings.HasPrefix(instanceType, "im4g") ||
strings.HasPrefix(instanceType, "is4g") ||
strings.HasPrefix(instanceType, "g5g") ||
+ strings.HasPrefix(instanceType, "hpc7g") ||
strings.HasPrefix(instanceType, "x2g")
}
diff --git a/pkg/utils/tasks/tasks.go b/pkg/utils/tasks/tasks.go
index 6ccb0befde..a7fc346eb1 100644
--- a/pkg/utils/tasks/tasks.go
+++ b/pkg/utils/tasks/tasks.go
@@ -6,6 +6,7 @@ import (
"sync"
"github.com/kris-nova/logger"
+ "golang.org/x/sync/errgroup"
)
// Task is a common interface for the stack manager tasks.
@@ -50,6 +51,7 @@ type TaskTree struct {
Parallel bool
PlanMode bool
IsSubTask bool
+ Limit int
}
// Append new tasks to the set
@@ -147,7 +149,11 @@ func (t *TaskTree) Do(allErrs chan error) error {
errs := make(chan error)
if t.Parallel {
- go doParallelTasks(errs, t.Tasks)
+ if t.Limit > 0 {
+ go runInErrorGroup(t.Tasks, t.Limit, errs)
+ } else {
+ go doParallelTasks(errs, t.Tasks)
+ }
} else {
go doSequentialTasks(errs, t.Tasks)
}
@@ -173,7 +179,11 @@ func (t *TaskTree) DoAllSync() []error {
errs := make(chan error)
if t.Parallel {
- go doParallelTasks(errs, t.Tasks)
+ if t.Limit > 0 {
+ go runInErrorGroup(t.Tasks, t.Limit, errs)
+ } else {
+ go doParallelTasks(errs, t.Tasks)
+ }
} else {
go doSequentialTasks(errs, t.Tasks)
}
@@ -217,6 +227,24 @@ func doParallelTasks(allErrs chan error, tasks []Task) {
close(allErrs)
}
+func runInErrorGroup(tasks []Task, limit int, errs chan error) {
+ var eg errgroup.Group
+ eg.SetLimit(limit)
+ for _, t := range tasks {
+ t := t
+ eg.Go(func() error {
+ if ok := doSingleTask(errs, t); !ok {
+ logger.Debug("failed task: %s (will continue until other parallel tasks are completed)", t.Describe())
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ logger.Debug("error running tasks: %v", err)
+ }
+ close(errs)
+}
+
func doSequentialTasks(allErrs chan error, tasks []Task) {
for t := range tasks {
if ok := doSingleTask(allErrs, tasks[t]); !ok {
diff --git a/pkg/version/release.go b/pkg/version/release.go
index 40045543f1..0e5faf4cf5 100644
--- a/pkg/version/release.go
+++ b/pkg/version/release.go
@@ -3,7 +3,7 @@ package version
// This file was generated by release_generate.go; DO NOT EDIT.
// Version is the version number in semver format X.Y.Z
-var Version = "0.183.0"
+var Version = "0.194.0"
// PreReleaseID can be empty for releases, "rc.X" for release candidates and "dev" for snapshots
var PreReleaseID = "dev"
diff --git a/pkg/vpc/vpc_test.go b/pkg/vpc/vpc_test.go
index e4090852e4..c4c9ef8985 100644
--- a/pkg/vpc/vpc_test.go
+++ b/pkg/vpc/vpc_test.go
@@ -499,7 +499,7 @@ var _ = Describe("VPC", func() {
},
mockEC2: func(ec2Mock *mocksv2.EC2) {
- ec2Mock.On("DescribeSubnets", Anything, Anything).Return(func(_ context.Context, input *ec2.DescribeSubnetsInput, _ ...func(options *ec2.Options)) *ec2.DescribeSubnetsOutput {
+ ec2Mock.On("DescribeSubnets", Anything, Anything, Anything).Return(func(_ context.Context, input *ec2.DescribeSubnetsInput, _ ...func(options *ec2.Options)) *ec2.DescribeSubnetsOutput {
if len(input.Filters) > 0 {
return &ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{
@@ -590,7 +590,7 @@ var _ = Describe("VPC", func() {
},
mockEC2: func(ec2Mock *mocksv2.EC2) {
- ec2Mock.On("DescribeSubnets", Anything, Anything).Return(func(_ context.Context, input *ec2.DescribeSubnetsInput, _ ...func(options *ec2.Options)) *ec2.DescribeSubnetsOutput {
+ ec2Mock.On("DescribeSubnets", Anything, Anything, Anything).Return(func(_ context.Context, input *ec2.DescribeSubnetsInput, _ ...func(options *ec2.Options)) *ec2.DescribeSubnetsOutput {
if len(input.Filters) > 0 {
return &ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{
@@ -673,7 +673,7 @@ var _ = Describe("VPC", func() {
},
},
mockEC2: func(ec2Mock *mocksv2.EC2) {
- ec2Mock.On("DescribeSubnets", Anything, Anything).Return(func(_ context.Context, input *ec2.DescribeSubnetsInput, _ ...func(options *ec2.Options)) *ec2.DescribeSubnetsOutput {
+ ec2Mock.On("DescribeSubnets", Anything, Anything, Anything).Return(func(_ context.Context, input *ec2.DescribeSubnetsInput, _ ...func(options *ec2.Options)) *ec2.DescribeSubnetsOutput {
if len(input.Filters) > 0 {
return &ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{
@@ -1218,6 +1218,7 @@ var _ = Describe("VPC", func() {
}, {
Name: strings.Pointer("cidr-block"), Values: []string{"192.168.64.0/18"},
}}},
+ Anything,
).Return(&ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{
{
@@ -1235,6 +1236,7 @@ var _ = Describe("VPC", func() {
}, {
Name: strings.Pointer("availability-zone"), Values: []string{"az3"},
}}},
+ Anything,
).Return(&ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{
{
@@ -1248,6 +1250,7 @@ var _ = Describe("VPC", func() {
p.MockEC2().On("DescribeSubnets",
Anything,
&ec2.DescribeSubnetsInput{SubnetIds: []string{"private1"}},
+ Anything,
).Return(&ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{
{
@@ -1262,6 +1265,7 @@ var _ = Describe("VPC", func() {
p.MockEC2().On("DescribeSubnets",
Anything,
&ec2.DescribeSubnetsInput{SubnetIds: []string{"public1"}},
+ Anything,
).Return(&ec2.DescribeSubnetsOutput{
Subnets: []ec2types.Subnet{
{
diff --git a/userdocs/requirements.txt b/userdocs/requirements.txt
index 8e30421a4d..46363cfdf2 100644
--- a/userdocs/requirements.txt
+++ b/userdocs/requirements.txt
@@ -4,11 +4,11 @@ mkdocs-redirects
mkdocs-minify-plugin
mkdocs-glightbox
pymdown-extensions >= 9.9.1
-jinja2 == 3.1.3
-pillow
+jinja2 == 3.1.4
+pillow
cairosvg
-# Dependencies from material theme
+# Dependencies from material theme
mkdocs-material-extensions>=1.1
pygments>=2.12
-markdown>=3.2
\ No newline at end of file
+markdown>=3.2
diff --git a/userdocs/src/getting-started.md b/userdocs/src/getting-started.md
index a41463088d..5bd302c39c 100644
--- a/userdocs/src/getting-started.md
+++ b/userdocs/src/getting-started.md
@@ -1,6 +1,8 @@
# Getting started
!!! tip "New for 2024"
+ `eksctl` now supports new region Kuala Lumpur (`ap-southeast-5`)
+
EKS Add-ons now support receiving IAM permissions via [EKS Pod Identity Associations](/usage/pod-identity-associations/#eks-add-ons-support-for-pod-identity-associations)
`eksctl` now supports AMIs based on AmazonLinux2023
@@ -122,7 +124,7 @@ eksctl create cluster --name=cluster-1 --nodes=4
### Supported versions
-EKS supports versions `1.23` (extended), `1.24` (extended), `1.25`, `1.26`, `1.27`, `1.28`, `1.29` and **`1.30`** (default).
+EKS supports versions `1.23` (extended), `1.24` (extended), `1.25`, `1.26`, `1.27`, `1.28`, `1.29`, **`1.30`** (default) and `1.31`.
With `eksctl` you can deploy any of the supported versions by passing `--version`.
```sh
diff --git a/userdocs/src/usage/addon-upgrade.md b/userdocs/src/usage/addon-upgrade.md
index e8899cac41..e7fec55176 100644
--- a/userdocs/src/usage/addon-upgrade.md
+++ b/userdocs/src/usage/addon-upgrade.md
@@ -1,5 +1,12 @@
# Default add-on updates
+!!! warning "New for 2024"
+ eksctl now installs default addons as EKS addons instead of self-managed addons. Read more about its implications in [Cluster creation flexibility for default networking addons](#cluster-creation-flexibility-for-default-networking-addons).
+
+!!! warning "New for 2024"
+ For updating addons, `eksctl utils update-*` cannot be used for clusters created with eksctl v0.184.0 and above.
+ This guide is only valid for clusters created before this change.
+
There are 3 default add-ons that get included in each EKS cluster:
- `kube-proxy`
- `aws-node`
diff --git a/userdocs/src/usage/addons.md b/userdocs/src/usage/addons.md
index 05600fa74b..236d6dd884 100644
--- a/userdocs/src/usage/addons.md
+++ b/userdocs/src/usage/addons.md
@@ -6,6 +6,12 @@ CNI plugin through the EKS API
## Creating addons (and providing IAM permissions via IRSA)
+!!! tip "New for 2024"
+ eksctl now supports creating clusters without any default networking addons: [Cluster creation flexibility for default networking addons](#cluster-creation-flexibility-for-default-networking-addons).
+
+!!! warning "New for 2024"
+ eksctl now installs default addons as EKS addons instead of self-managed addons. Read more about its implications in [Cluster creation flexibility for default networking addons](#cluster-creation-flexibility-for-default-networking-addons).
+
!!! tip "New for 2024"
EKS Add-ons now support receiving IAM permissions, required to connect with AWS services outside of cluster, via [EKS Pod Identity Associations](/usage/pod-identity-associations/#eks-add-ons-support-for-pod-identity-associations)
@@ -87,8 +93,8 @@ addons:
For addon create, the `resolveConflicts` field supports three distinct values:
-- `none` - EKS doesn't change the value. The create might fail.
-- `overwrite` - EKS overwrites any config changes back to EKS default values.
+- `none` - EKS doesn't change the value. The create might fail.
+- `overwrite` - EKS overwrites any config changes back to EKS default values.
- `preserve` - EKS doesn't change the value. The create might fail. (Similarly to `none`, but different from [`preserve` in updating addons](#updating-addons))
## Listing enabled addons
@@ -141,7 +147,7 @@ eksctl utils describe-addon-configuration --name vpc-cni --version v1.12.0-eksbu
This returns a JSON schema of the various options available for this addon.
## Working with configuration values
-`ConfigurationValues` can be provided in the configuration file during the creation or update of addons. Only JSON and YAML formats are supported.
+`ConfigurationValues` can be provided in the configuration file during the creation or update of addons. Only JSON and YAML formats are supported.
For eg.,
@@ -202,10 +208,10 @@ addons:
resolveConflicts: preserve
```
-For addon update, the `resolveConflicts` field accepts three distinct values:
+For addon update, the `resolveConflicts` field accepts three distinct values:
- `none` - EKS doesn't change the value. The update might fail.
-- `overwrite` - EKS overwrites any config changes back to EKS default values.
+- `overwrite` - EKS overwrites any config changes back to EKS default values.
- `preserve` - EKS preserves the value. If you choose this option, we recommend that you test any field and value changes on a non-production cluster before updating the add-on on your production cluster.
## Deleting addons
@@ -216,3 +222,48 @@ eksctl delete addon --cluster eksctl create cluster
Check out latest eksctl features
+ ap-southeast-5
)eksctl
is now fully maintained by AWS. For more details check out
eksctl Support Status Update.
+ eksctl
now supports Cluster creation flexibility for networking add-ons.
+
+ eksctl
now installs default addons as EKS addons instead of self-managed addons. To understand its implications, check out
+ Cluster creation flexibility for networking add-ons.
+