diff --git a/api/v1alpha1/conversion.go b/api/v1alpha1/conversion.go index ba45ceac2..319415ca7 100644 --- a/api/v1alpha1/conversion.go +++ b/api/v1alpha1/conversion.go @@ -47,11 +47,14 @@ func Convert_v1alpha2_LinodeMachineSpec_To_v1alpha1_LinodeMachineSpec(in *infras return autoConvert_v1alpha2_LinodeMachineSpec_To_v1alpha1_LinodeMachineSpec(in, out, s) } +func Convert_v1alpha1_LinodeMachineSpec_To_v1alpha2_LinodeMachineSpec(in *LinodeMachineSpec, out *infrastructurev1alpha2.LinodeMachineSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_LinodeMachineSpec_To_v1alpha2_LinodeMachineSpec(in, out, s) +} + func Convert_v1alpha1_LinodeObjectStorageBucketSpec_To_v1alpha2_LinodeObjectStorageBucketSpec(in *LinodeObjectStorageBucketSpec, out *infrastructurev1alpha2.LinodeObjectStorageBucketSpec, s conversion.Scope) error { // WARNING: in.Cluster requires manual conversion: does not exist in peer-type out.Region = in.Cluster out.CredentialsRef = in.CredentialsRef - out.SecretType = in.SecretType return nil } func Convert_v1alpha1_LinodeObjectStorageBucketStatus_To_v1alpha2_LinodeObjectStorageBucketStatus(in *LinodeObjectStorageBucketStatus, out *infrastructurev1alpha2.LinodeObjectStorageBucketStatus, s conversion.Scope) error { @@ -71,7 +74,7 @@ func Convert_v1alpha2_LinodeObjectStorageBucketSpec_To_v1alpha1_LinodeObjectStor out.Cluster = in.Region out.CredentialsRef = in.CredentialsRef out.KeyGeneration = ptr.To(0) - out.SecretType = in.SecretType + out.SecretType = DefaultSecretTypeObjectStorageBucket return nil } diff --git a/api/v1alpha1/linodemachine_conversion_test.go b/api/v1alpha1/linodemachine_conversion_test.go index 7578ce9fa..91181de6d 100644 --- a/api/v1alpha1/linodemachine_conversion_test.go +++ b/api/v1alpha1/linodemachine_conversion_test.go @@ -40,7 +40,6 @@ func TestLinodeMachineConvertTo(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test-machine"}, Spec: LinodeMachineSpec{ ProviderID: ptr.To("linode://1234"), - InstanceID: ptr.To(1234), Region: "us-mia", Type: "g6-standard-2", Group: "", @@ -53,7 +52,6 @@ func TestLinodeMachineConvertTo(t *testing.T) { BackupsEnabled: false, PrivateIP: ptr.To(true), Tags: []string{"test instance"}, - FirewallID: 123, OSDisk: ptr.To(InstanceDisk{ DiskID: 0, Size: *resource.NewQuantity(12, resource.DecimalSI), @@ -83,7 +81,6 @@ func TestLinodeMachineConvertTo(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test-machine"}, Spec: infrav1alpha2.LinodeMachineSpec{ ProviderID: ptr.To("linode://1234"), - InstanceID: ptr.To(1234), Region: "us-mia", Type: "g6-standard-2", Group: "", @@ -96,7 +93,6 @@ func TestLinodeMachineConvertTo(t *testing.T) { BackupsEnabled: false, PrivateIP: ptr.To(true), Tags: []string{"test instance"}, - FirewallID: 123, OSDisk: ptr.To(infrav1alpha2.InstanceDisk{ DiskID: 0, Size: *resource.NewQuantity(12, resource.DecimalSI), @@ -170,7 +166,6 @@ func TestLinodeMachineConvertFrom(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test-machine"}, Spec: infrav1alpha2.LinodeMachineSpec{ ProviderID: ptr.To("linode://1234"), - InstanceID: ptr.To(1234), Region: "us-mia", Type: "g6-standard-2", Group: "", @@ -183,7 +178,6 @@ func TestLinodeMachineConvertFrom(t *testing.T) { BackupsEnabled: false, PrivateIP: ptr.To(true), Tags: []string{"test instance"}, - FirewallID: 123, OSDisk: ptr.To(infrav1alpha2.InstanceDisk{ DiskID: 0, Size: *resource.NewQuantity(12, resource.DecimalSI), @@ -218,7 +212,6 @@ func TestLinodeMachineConvertFrom(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "test-machine"}, Spec: LinodeMachineSpec{ ProviderID: ptr.To("linode://1234"), - InstanceID: ptr.To(1234), Region: "us-mia", Type: "g6-standard-2", Group: "", @@ -231,7 +224,6 @@ func TestLinodeMachineConvertFrom(t *testing.T) { BackupsEnabled: false, PrivateIP: ptr.To(true), Tags: []string{"test instance"}, - FirewallID: 123, OSDisk: ptr.To(InstanceDisk{ DiskID: 0, Size: *resource.NewQuantity(12, resource.DecimalSI), diff --git a/api/v1alpha1/linodemachinetemplate_conversion_test.go b/api/v1alpha1/linodemachinetemplate_conversion_test.go index 20b2afe2c..c64ec1ea2 100644 --- a/api/v1alpha1/linodemachinetemplate_conversion_test.go +++ b/api/v1alpha1/linodemachinetemplate_conversion_test.go @@ -42,7 +42,6 @@ func TestLinodeMachineTemplateConvertTo(t *testing.T) { Template: LinodeMachineTemplateResource{ Spec: LinodeMachineSpec{ ProviderID: ptr.To("linode://1234"), - InstanceID: ptr.To(1234), Region: "us-mia", Type: "g6-standard-2", Group: "", @@ -55,7 +54,6 @@ func TestLinodeMachineTemplateConvertTo(t *testing.T) { BackupsEnabled: false, PrivateIP: ptr.To(true), Tags: []string{"test instance"}, - FirewallID: 123, OSDisk: ptr.To(InstanceDisk{ DiskID: 0, Size: *resource.NewQuantity(12, resource.DecimalSI), @@ -88,7 +86,6 @@ func TestLinodeMachineTemplateConvertTo(t *testing.T) { Template: infrav1alpha2.LinodeMachineTemplateResource{ Spec: infrav1alpha2.LinodeMachineSpec{ ProviderID: ptr.To("linode://1234"), - InstanceID: ptr.To(1234), Region: "us-mia", Type: "g6-standard-2", Group: "", @@ -101,7 +98,6 @@ func TestLinodeMachineTemplateConvertTo(t *testing.T) { BackupsEnabled: false, PrivateIP: ptr.To(true), Tags: []string{"test instance"}, - FirewallID: 123, OSDisk: ptr.To(infrav1alpha2.InstanceDisk{ DiskID: 0, Size: *resource.NewQuantity(12, resource.DecimalSI), @@ -178,7 +174,6 @@ func TestLinodeMachineTemplateConvertFrom(t *testing.T) { Template: infrav1alpha2.LinodeMachineTemplateResource{ Spec: infrav1alpha2.LinodeMachineSpec{ ProviderID: ptr.To("linode://1234"), - InstanceID: ptr.To(1234), Region: "us-mia", Type: "g6-standard-2", Group: "", @@ -191,7 +186,6 @@ func TestLinodeMachineTemplateConvertFrom(t *testing.T) { BackupsEnabled: false, PrivateIP: ptr.To(true), Tags: []string{"test instance"}, - FirewallID: 123, OSDisk: ptr.To(infrav1alpha2.InstanceDisk{ DiskID: 0, Size: *resource.NewQuantity(12, resource.DecimalSI), @@ -224,7 +218,6 @@ func TestLinodeMachineTemplateConvertFrom(t *testing.T) { Template: LinodeMachineTemplateResource{ Spec: LinodeMachineSpec{ ProviderID: ptr.To("linode://1234"), - InstanceID: ptr.To(1234), Region: "us-mia", Type: "g6-standard-2", Group: "", @@ -237,7 +230,6 @@ func TestLinodeMachineTemplateConvertFrom(t *testing.T) { BackupsEnabled: false, PrivateIP: ptr.To(true), Tags: []string{"test instance"}, - FirewallID: 123, OSDisk: ptr.To(InstanceDisk{ DiskID: 0, Size: *resource.NewQuantity(12, resource.DecimalSI), diff --git a/api/v1alpha1/linodeobjectstoragebucket_conversion_test.go b/api/v1alpha1/linodeobjectstoragebucket_conversion_test.go index 83f6276be..78c53e960 100644 --- a/api/v1alpha1/linodeobjectstoragebucket_conversion_test.go +++ b/api/v1alpha1/linodeobjectstoragebucket_conversion_test.go @@ -59,7 +59,6 @@ func TestLinodeObjectStorageBucketConvertTo(t *testing.T) { Namespace: "default", Name: "cred-secret", }, - SecretType: "Opaque", }, Status: infrav1alpha2.LinodeObjectStorageBucketStatus{}, } @@ -95,7 +94,6 @@ func TestLinodeObjectStorageBucketFrom(t *testing.T) { Namespace: "default", Name: "cred-secret", }, - SecretType: "Opaque", }, Status: infrav1alpha2.LinodeObjectStorageBucketStatus{}, } @@ -103,7 +101,7 @@ func TestLinodeObjectStorageBucketFrom(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-bucket", Annotations: map[string]string{ - ConversionDataAnnotation: `{"spec":{"credentialsRef":{"name":"cred-secret","namespace":"default"},"region":"us-mia-1","secretType":"Opaque"},"status":{"ready":false}}`, + ConversionDataAnnotation: `{"spec":{"credentialsRef":{"name":"cred-secret","namespace":"default"},"region":"us-mia-1"},"status":{"ready":false}}`, }, }, Spec: LinodeObjectStorageBucketSpec{ @@ -113,7 +111,7 @@ func TestLinodeObjectStorageBucketFrom(t *testing.T) { Name: "cred-secret", }, KeyGeneration: ptr.To(0), - SecretType: "Opaque", + SecretType: DefaultSecretTypeObjectStorageBucket, }, Status: LinodeObjectStorageBucketStatus{}, } diff --git a/api/v1alpha1/linodeobjectstoragebucket_types.go b/api/v1alpha1/linodeobjectstoragebucket_types.go index 16e01f1b2..cfbc42de1 100644 --- a/api/v1alpha1/linodeobjectstoragebucket_types.go +++ b/api/v1alpha1/linodeobjectstoragebucket_types.go @@ -25,7 +25,8 @@ import ( const ( // ObjectStorageBucketFinalizer allows ReconcileLinodeObjectStorageBucket to clean up Linode resources associated // with LinodeObjectStorageBucket before removing it from the apiserver. - ObjectStorageBucketFinalizer = "linodeobjectstoragebucket.infrastructure.cluster.x-k8s.io" + ObjectStorageBucketFinalizer = "linodeobjectstoragebucket.infrastructure.cluster.x-k8s.io" + DefaultSecretTypeObjectStorageBucket = "addons.cluster.x-k8s.io/resource-set" ) // LinodeObjectStorageBucketSpec defines the desired state of LinodeObjectStorageBucket diff --git a/api/v1alpha1/zz_generated.conversion.go b/api/v1alpha1/zz_generated.conversion.go index 0c0672efd..975be8c87 100644 --- a/api/v1alpha1/zz_generated.conversion.go +++ b/api/v1alpha1/zz_generated.conversion.go @@ -170,11 +170,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*LinodeMachineSpec)(nil), (*v1alpha2.LinodeMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_LinodeMachineSpec_To_v1alpha2_LinodeMachineSpec(a.(*LinodeMachineSpec), b.(*v1alpha2.LinodeMachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*LinodeMachineStatus)(nil), (*v1alpha2.LinodeMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_LinodeMachineStatus_To_v1alpha2_LinodeMachineStatus(a.(*LinodeMachineStatus), b.(*v1alpha2.LinodeMachineStatus), scope) }); err != nil { @@ -305,6 +300,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*LinodeMachineSpec)(nil), (*v1alpha2.LinodeMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_LinodeMachineSpec_To_v1alpha2_LinodeMachineSpec(a.(*LinodeMachineSpec), b.(*v1alpha2.LinodeMachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*LinodeObjectStorageBucketSpec)(nil), (*v1alpha2.LinodeObjectStorageBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_LinodeObjectStorageBucketSpec_To_v1alpha2_LinodeObjectStorageBucketSpec(a.(*LinodeObjectStorageBucketSpec), b.(*v1alpha2.LinodeObjectStorageBucketSpec), scope) }); err != nil { @@ -765,11 +765,6 @@ func autoConvert_v1alpha1_LinodeMachineSpec_To_v1alpha2_LinodeMachineSpec(in *Li return nil } -// Convert_v1alpha1_LinodeMachineSpec_To_v1alpha2_LinodeMachineSpec is an autogenerated conversion function. -func Convert_v1alpha1_LinodeMachineSpec_To_v1alpha2_LinodeMachineSpec(in *LinodeMachineSpec, out *v1alpha2.LinodeMachineSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_LinodeMachineSpec_To_v1alpha2_LinodeMachineSpec(in, out, s) -} - func autoConvert_v1alpha2_LinodeMachineSpec_To_v1alpha1_LinodeMachineSpec(in *v1alpha2.LinodeMachineSpec, out *LinodeMachineSpec, s conversion.Scope) error { out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) out.InstanceID = (*int)(unsafe.Pointer(in.InstanceID)) @@ -792,6 +787,7 @@ func autoConvert_v1alpha2_LinodeMachineSpec_To_v1alpha1_LinodeMachineSpec(in *v1 out.CredentialsRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsRef)) // WARNING: in.Configuration requires manual conversion: does not exist in peer-type // WARNING: in.PlacementGroupRef requires manual conversion: does not exist in peer-type + // WARNING: in.FirewallRef requires manual conversion: does not exist in peer-type return nil } @@ -1014,7 +1010,7 @@ func autoConvert_v1alpha1_LinodeObjectStorageBucketSpec_To_v1alpha2_LinodeObject // WARNING: in.Cluster requires manual conversion: does not exist in peer-type out.CredentialsRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsRef)) // WARNING: in.KeyGeneration requires manual conversion: does not exist in peer-type - out.SecretType = in.SecretType + // WARNING: in.SecretType requires manual conversion: does not exist in peer-type return nil } @@ -1023,7 +1019,6 @@ func autoConvert_v1alpha2_LinodeObjectStorageBucketSpec_To_v1alpha1_LinodeObject // WARNING: in.ACL requires manual conversion: does not exist in peer-type // WARNING: in.CorsEnabled requires manual conversion: does not exist in peer-type out.CredentialsRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsRef)) - out.SecretType = in.SecretType return nil } diff --git a/api/v1alpha2/linodemachine_types.go b/api/v1alpha2/linodemachine_types.go index b3b52792b..7e27f82a8 100644 --- a/api/v1alpha2/linodemachine_types.go +++ b/api/v1alpha2/linodemachine_types.go @@ -38,6 +38,7 @@ type LinodeMachineSpec struct { ProviderID *string `json:"providerID,omitempty"` // InstanceID is the Linode instance ID for this machine. // +optional + // +kubebuilder:deprecatedversion:warning="ProviderID deprecates InstanceID" InstanceID *int `json:"instanceID,omitempty"` // +kubebuilder:validation:Required @@ -67,6 +68,7 @@ type LinodeMachineSpec struct { // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Tags []string `json:"tags,omitempty"` // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:deprecatedversion:warning="Firewalls should be referenced via FirewallRef" FirewallID int `json:"firewallID,omitempty"` // OSDisk is configuration for the root disk that includes the OS, // if not specified this defaults to whatever space is not taken up by the DataDisks @@ -96,6 +98,10 @@ type LinodeMachineSpec struct { // +optional // PlacementGroupRef is a reference to a placement group object. This makes the linode to be launched in that specific group. PlacementGroupRef *corev1.ObjectReference `json:"placementGroupRef,omitempty"` + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + // FirewallRef is a reference to a firewall object. This makes the linode use the specified firewall. + FirewallRef *corev1.ObjectReference `json:"firewallRef,omitempty"` } // InstanceDisk defines a list of disks to use for an instance @@ -210,7 +216,7 @@ type LinodeMachineStatus struct { // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this LinodeMachine belongs" // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.instanceState",description="Linode instance state" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status" -// +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".spec.providerID",description="Linode instance ID" +// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID" // +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this LinodeMachine" // LinodeMachine is the Schema for the linodemachines API diff --git a/api/v1alpha2/linodeobjectstoragebucket_types.go b/api/v1alpha2/linodeobjectstoragebucket_types.go index 4a3371444..9b74067ea 100644 --- a/api/v1alpha2/linodeobjectstoragebucket_types.go +++ b/api/v1alpha2/linodeobjectstoragebucket_types.go @@ -26,13 +26,10 @@ type ObjectStorageACL string // ObjectStorageACL options represent the access control level of a bucket. const ( - // ObjectStorageBucketFinalizer allows ReconcileLinodeObjectStorageBucket to clean up Linode resources associated - // with LinodeObjectStorageBucket before removing it from the apiserver. - ObjectStorageBucketFinalizer = "linodeobjectstoragebucket.infrastructure.cluster.x-k8s.io" - ACLPrivate ObjectStorageACL = "private" - ACLPublicRead ObjectStorageACL = "public-read" - ACLAuthenticatedRead ObjectStorageACL = "authenticated-read" - ACLPublicReadWrite ObjectStorageACL = "public-read-write" + ACLPrivate ObjectStorageACL = "private" + ACLPublicRead ObjectStorageACL = "public-read" + ACLAuthenticatedRead ObjectStorageACL = "authenticated-read" + ACLPublicReadWrite ObjectStorageACL = "public-read-write" ) // LinodeObjectStorageBucketSpec defines the desired state of LinodeObjectStorageBucket @@ -44,7 +41,7 @@ type LinodeObjectStorageBucketSpec struct { // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Region string `json:"region"` - // ACL sets The Access Control Level of the bucket using a canned ACL string + // Acl sets the Access Control Level of the bucket using a canned ACL string // +optional // +kubebuilder:default=private // +kubebuilder:validation:Enum=private;public-read;authenticated-read;public-read-write @@ -53,17 +50,12 @@ type LinodeObjectStorageBucketSpec struct { // corsEnabled enables for all origins in the bucket .If set to false, CORS is disabled for all origins in the bucket // +optional // +kubebuilder:default=true - CorsEnabled *bool `json:"corsEnabled,omitempty"` + CorsEnabled bool `json:"corsEnabled,omitempty"` // CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning the bucket. // If not supplied then the credentials of the controller will be used. // +optional CredentialsRef *corev1.SecretReference `json:"credentialsRef"` - - // SecretType sets the type for the bucket-details secret that will be generated by the controller. - // +optional - // +kubebuilder:default=addons.cluster.x-k8s.io/resource-set - SecretType string `json:"secretType,omitempty"` } // LinodeObjectStorageBucketStatus defines the observed state of LinodeObjectStorageBucket diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go index cbf30fdb6..c16a8c698 100644 --- a/api/v1alpha2/zz_generated.deepcopy.go +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -612,6 +612,11 @@ func (in *LinodeMachineSpec) DeepCopyInto(out *LinodeMachineSpec) { *out = new(v1.ObjectReference) **out = **in } + if in.FirewallRef != nil { + in, out := &in.FirewallRef, &out.FirewallRef + *out = new(v1.ObjectReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeMachineSpec. @@ -838,11 +843,6 @@ func (in *LinodeObjectStorageBucketList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinodeObjectStorageBucketSpec) DeepCopyInto(out *LinodeObjectStorageBucketSpec) { *out = *in - if in.CorsEnabled != nil { - in, out := &in.CorsEnabled, &out.CorsEnabled - *out = new(bool) - **out = **in - } if in.CredentialsRef != nil { in, out := &in.CredentialsRef, &out.CredentialsRef *out = new(v1.SecretReference) diff --git a/clients/clients.go b/clients/clients.go index 533b8fab0..93ff0d2a9 100644 --- a/clients/clients.go +++ b/clients/clients.go @@ -75,6 +75,8 @@ type LinodeNodeBalancerClient interface { type LinodeObjectStorageClient interface { GetObjectStorageBucket(ctx context.Context, regionID, label string) (*linodego.ObjectStorageBucket, error) CreateObjectStorageBucket(ctx context.Context, opts linodego.ObjectStorageBucketCreateOptions) (*linodego.ObjectStorageBucket, error) + GetObjectStorageBucketAccess(ctx context.Context, clusterOrRegionID, label string) (*linodego.ObjectStorageBucketAccess, error) + UpdateObjectStorageBucketAccess(ctx context.Context, clusterOrRegionID, label string, opts linodego.ObjectStorageBucketUpdateAccessOptions) error GetObjectStorageKey(ctx context.Context, keyID int) (*linodego.ObjectStorageKey, error) CreateObjectStorageKey(ctx context.Context, opts linodego.ObjectStorageKeyCreateOptions) (*linodego.ObjectStorageKey, error) DeleteObjectStorageKey(ctx context.Context, keyID int) error diff --git a/cloud/scope/object_storage_bucket.go b/cloud/scope/object_storage_bucket.go index 5a77928db..4e3dd7f4e 100644 --- a/cloud/scope/object_storage_bucket.go +++ b/cloud/scope/object_storage_bucket.go @@ -8,7 +8,6 @@ import ( "github.com/go-logr/logr" "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" @@ -88,13 +87,3 @@ func (s *ObjectStorageBucketScope) PatchObject(ctx context.Context) error { func (s *ObjectStorageBucketScope) Close(ctx context.Context) error { return s.PatchObject(ctx) } - -// AddFinalizer adds a finalizer if not present and immediately patches the -// object to avoid any race conditions. -func (s *ObjectStorageBucketScope) AddFinalizer(ctx context.Context) error { - if controllerutil.AddFinalizer(s.Bucket, infrav1alpha2.ObjectStorageBucketFinalizer) { - return s.Close(ctx) - } - - return nil -} diff --git a/cloud/scope/object_storage_bucket_test.go b/cloud/scope/object_storage_bucket_test.go index e0c0a8064..2d204305a 100644 --- a/cloud/scope/object_storage_bucket_test.go +++ b/cloud/scope/object_storage_bucket_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -135,7 +134,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { }, }, { - name: "Error - ValidateClusterScopeParams triggers error because ClusterScopeParams is empty", + name: "Error - ValidateObjectStorageBucketScopeParams triggers error because ObjectStorageBucketScopeParams is empty", args: args{ apiKey: "apikey", params: ObjectStorageBucketScopeParams{}, @@ -159,7 +158,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { }, }, { - name: "Error - Using getCredentialDataFromRef(), func returns an error. Unable to create a valid ClusterScope", + name: "Error - Using getCredentialDataFromRef(), func returns an error. Unable to create a valid ObjectStorageBucketScope", args: args{ apiKey: "test-key", params: ObjectStorageBucketScopeParams{ @@ -181,7 +180,7 @@ func TestNewObjectStorageBucketScope(t *testing.T) { }, }, { - name: "Error - createLinodeCluster throws an error for passing empty apiKey. Unable to create a valid ClusterScope", + name: "Error - createObjectStorageBucket throws an error for passing empty apiKey. Unable to create a valid ObjectStorageBucketScope", args: args{ apiKey: "", params: ObjectStorageBucketScopeParams{ @@ -193,6 +192,21 @@ func TestNewObjectStorageBucketScope(t *testing.T) { expectedErr: fmt.Errorf("failed to create linode client: token cannot be empty"), expects: func(mock *mock.MockK8sClient) {}, }, + { + name: "Error - kind is not registered", + args: args{ + apiKey: "apikey", + params: ObjectStorageBucketScopeParams{ + Client: nil, + Bucket: &infrav1alpha2.LinodeObjectStorageBucket{}, + Logger: &logr.Logger{}, + }, + }, + expectedErr: fmt.Errorf("no kind is registered for the type v1alpha2.LinodeObjectStorageBucket"), + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT().Scheme().Return(runtime.NewScheme()) + }, + }, } for _, tt := range tests { testcase := tt @@ -218,73 +232,3 @@ func TestNewObjectStorageBucketScope(t *testing.T) { }) } } - -func TestObjectStorageBucketScopeMethods(t *testing.T) { - t.Parallel() - tests := []struct { - name string - Bucket *infrav1alpha2.LinodeObjectStorageBucket - expects func(mock *mock.MockK8sClient) - }{ - { - name: "Success - finalizer should be added to the Linode Object Storage Bucket object", - Bucket: &infrav1alpha2.LinodeObjectStorageBucket{}, - expects: func(mock *mock.MockK8sClient) { - mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { - s := runtime.NewScheme() - infrav1alpha2.AddToScheme(s) - return s - }).Times(2) - mock.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - }, - }, - { - name: "Failure - finalizer should not be added to the Bucket object. Function returns nil since it was already present", - Bucket: &infrav1alpha2.LinodeObjectStorageBucket{ - ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1alpha2.ObjectStorageBucketFinalizer}, - }, - }, - expects: func(mock *mock.MockK8sClient) { - mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { - s := runtime.NewScheme() - infrav1alpha2.AddToScheme(s) - return s - }).Times(1) - }, - }, - } - for _, tt := range tests { - testcase := tt - t.Run(testcase.name, func(t *testing.T) { - t.Parallel() - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockK8sClient := mock.NewMockK8sClient(ctrl) - - testcase.expects(mockK8sClient) - - objScope, err := NewObjectStorageBucketScope( - context.Background(), - ClientConfig{Token: "test-key"}, - ObjectStorageBucketScopeParams{ - Client: mockK8sClient, - Bucket: testcase.Bucket, - Logger: &logr.Logger{}, - }) - if err != nil { - t.Errorf("NewObjectStorageBucketScope() error = %v", err) - } - - if err := objScope.AddFinalizer(context.Background()); err != nil { - t.Errorf("ClusterScope.AddFinalizer() error = %v", err) - } - - if objScope.Bucket.Finalizers[0] != infrav1alpha2.ObjectStorageBucketFinalizer { - t.Errorf("Finalizer was not added") - } - }) - } -} diff --git a/cloud/scope/object_storage_key.go b/cloud/scope/object_storage_key.go index 6158852b3..201da7560 100644 --- a/cloud/scope/object_storage_key.go +++ b/cloud/scope/object_storage_key.go @@ -107,6 +107,7 @@ const ( apiVersion: v1 metadata: name: %s + namespace: kube-system stringData: bucket_name: %s bucket_region: %s diff --git a/cloud/services/domains_test.go b/cloud/services/domains_test.go index 199789a80..5edebd9a0 100644 --- a/cloud/services/domains_test.go +++ b/cloud/services/domains_test.go @@ -66,7 +66,7 @@ func TestAddIPToEdgeDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -129,7 +129,7 @@ func TestAddIPToEdgeDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -228,7 +228,7 @@ func TestRemoveIPFromEdgeDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -298,7 +298,7 @@ func TestRemoveIPFromEdgeDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -398,7 +398,7 @@ func TestAddIPToDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -472,7 +472,7 @@ func TestAddIPToDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -545,7 +545,7 @@ func TestAddIPToDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -613,7 +613,7 @@ func TestAddIPToDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -687,7 +687,7 @@ func TestAddIPToDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -754,7 +754,7 @@ func TestAddIPToDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: nil, @@ -811,7 +811,7 @@ func TestAddIPToDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -916,7 +916,7 @@ func TestDeleteIPFromDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -991,7 +991,7 @@ func TestDeleteIPFromDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -1066,7 +1066,7 @@ func TestDeleteIPFromDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, }, @@ -1113,7 +1113,7 @@ func TestDeleteIPFromDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -1174,7 +1174,7 @@ func TestDeleteIPFromDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ @@ -1240,7 +1240,7 @@ func TestDeleteIPFromDNS(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, Status: infrav1alpha2.LinodeMachineStatus{ Addresses: []clusterv1.MachineAddress{ diff --git a/cloud/services/loadbalancers.go b/cloud/services/loadbalancers.go index ee2cf81f9..a545a3974 100644 --- a/cloud/services/loadbalancers.go +++ b/cloud/services/loadbalancers.go @@ -122,8 +122,13 @@ func AddNodeToNB( return nil } + instanceID, err := util.GetInstanceID(machineScope.LinodeMachine.Spec.ProviderID) + if err != nil { + logger.Error(err, "Failed to parse instance ID from provider ID") + return err + } // Get the private IP that was assigned - addresses, err := machineScope.LinodeClient.GetInstanceIPAddresses(ctx, *machineScope.LinodeMachine.Spec.InstanceID) + addresses, err := machineScope.LinodeClient.GetInstanceIPAddresses(ctx, instanceID) if err != nil { logger.Error(err, "Failed get instance IP addresses") @@ -200,11 +205,16 @@ func DeleteNodeFromNB( return nil } - err := machineScope.LinodeClient.DeleteNodeBalancerNode( + instanceID, err := util.GetInstanceID(machineScope.LinodeMachine.Spec.ProviderID) + if err != nil { + logger.Error(err, "Failed to parse instance ID from provider ID") + return err + } + err = machineScope.LinodeClient.DeleteNodeBalancerNode( ctx, *machineScope.LinodeCluster.Spec.Network.NodeBalancerID, *machineScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID, - *machineScope.LinodeMachine.Spec.InstanceID, + instanceID, ) if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { logger.Error(err, "Failed to update Node Balancer") @@ -217,7 +227,7 @@ func DeleteNodeFromNB( ctx, *machineScope.LinodeCluster.Spec.Network.NodeBalancerID, *portConfig.NodeBalancerConfigID, - *machineScope.LinodeMachine.Spec.InstanceID, + instanceID, ) if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { logger.Error(err, "Failed to update Node Balancer") diff --git a/cloud/services/loadbalancers_test.go b/cloud/services/loadbalancers_test.go index cedd17277..28ca3f1b0 100644 --- a/cloud/services/loadbalancers_test.go +++ b/cloud/services/loadbalancers_test.go @@ -450,7 +450,7 @@ func TestAddNodeToNBConditions(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, }, @@ -488,7 +488,7 @@ func TestAddNodeToNBConditions(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, }, @@ -522,7 +522,7 @@ func TestAddNodeToNBConditions(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, }, @@ -631,7 +631,7 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, }, @@ -687,7 +687,7 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, }, @@ -782,7 +782,7 @@ func TestDeleteNodeFromNB(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, LinodeCluster: &infrav1alpha2.LinodeCluster{ @@ -818,7 +818,7 @@ func TestDeleteNodeFromNB(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, LinodeCluster: &infrav1alpha2.LinodeCluster{ @@ -867,7 +867,7 @@ func TestDeleteNodeFromNB(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, LinodeCluster: &infrav1alpha2.LinodeCluster{ @@ -910,7 +910,7 @@ func TestDeleteNodeFromNB(t *testing.T) { UID: "test-uid", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(123), + ProviderID: ptr.To("linode://123"), }, }, LinodeCluster: &infrav1alpha2.LinodeCluster{ diff --git a/cloud/services/object_storage_buckets.go b/cloud/services/object_storage_buckets.go index cd912dc57..528970da7 100644 --- a/cloud/services/object_storage_buckets.go +++ b/cloud/services/object_storage_buckets.go @@ -11,7 +11,7 @@ import ( "github.com/linode/cluster-api-provider-linode/util" ) -func EnsureObjectStorageBucket(ctx context.Context, bScope *scope.ObjectStorageBucketScope) (*linodego.ObjectStorageBucket, error) { +func EnsureAndUpdateObjectStorageBucket(ctx context.Context, bScope *scope.ObjectStorageBucketScope) (*linodego.ObjectStorageBucket, error) { bucket, err := bScope.LinodeClient.GetObjectStorageBucket( ctx, bScope.Bucket.Spec.Region, @@ -20,24 +20,45 @@ func EnsureObjectStorageBucket(ctx context.Context, bScope *scope.ObjectStorageB if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { return nil, fmt.Errorf("failed to get bucket from region %s: %w", bScope.Bucket.Spec.Region, err) } - if bucket != nil { - bScope.Logger.Info("Bucket exists") + if bucket == nil { + opts := linodego.ObjectStorageBucketCreateOptions{ + Region: bScope.Bucket.Spec.Region, + Label: bScope.Bucket.Name, + ACL: linodego.ObjectStorageACL(bScope.Bucket.Spec.ACL), + CorsEnabled: &bScope.Bucket.Spec.CorsEnabled, + } + + if bucket, err = bScope.LinodeClient.CreateObjectStorageBucket(ctx, opts); err != nil { + return nil, fmt.Errorf("failed to create bucket: %w", err) + } + + bScope.Logger.Info("Created bucket") return bucket, nil } - opts := linodego.ObjectStorageBucketCreateOptions{ - Region: bScope.Bucket.Spec.Region, - Label: bScope.Bucket.Name, - ACL: linodego.ObjectStorageACL(bScope.Bucket.Spec.ACL), - CorsEnabled: bScope.Bucket.Spec.CorsEnabled, + bucketAccess, err := bScope.LinodeClient.GetObjectStorageBucketAccess( + ctx, + bucket.Region, + bucket.Label, + ) + if err != nil { + return nil, fmt.Errorf("failed to get bucket access details for %s: %w", bScope.Bucket.Name, err) } - if bucket, err = bScope.LinodeClient.CreateObjectStorageBucket(ctx, opts); err != nil { - return nil, fmt.Errorf("failed to create bucket: %w", err) + if bucketAccess.ACL == linodego.ObjectStorageACL(bScope.Bucket.Spec.ACL) && bucketAccess.CorsEnabled == bScope.Bucket.Spec.CorsEnabled { + return bucket, nil + } + + opts := linodego.ObjectStorageBucketUpdateAccessOptions{ + ACL: linodego.ObjectStorageACL(bScope.Bucket.Spec.ACL), + CorsEnabled: &bScope.Bucket.Spec.CorsEnabled, + } + if err = bScope.LinodeClient.UpdateObjectStorageBucketAccess(ctx, bucket.Region, bucket.Label, opts); err != nil { + return nil, fmt.Errorf("failed to update the bucket access options for %s: %w", bScope.Bucket.Name, err) } - bScope.Logger.Info("Created bucket") + bScope.Logger.Info("Updated Bucket") return bucket, nil } diff --git a/cloud/services/object_storage_buckets_test.go b/cloud/services/object_storage_buckets_test.go index da29152de..601d32340 100644 --- a/cloud/services/object_storage_buckets_test.go +++ b/cloud/services/object_storage_buckets_test.go @@ -33,7 +33,9 @@ func TestEnsureObjectStorageBucket(t *testing.T) { Name: "test-bucket", }, Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{ - Region: "test-region", + Region: "test-region", + ACL: infrav1alpha2.ACLPrivate, + CorsEnabled: true, }, }, }, @@ -44,6 +46,10 @@ func TestEnsureObjectStorageBucket(t *testing.T) { mockClient.EXPECT().GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucket{ Label: "test-bucket", }, nil) + mockClient.EXPECT().GetObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucketAccess{ + ACL: linodego.ACLPrivate, + CorsEnabled: true, + }, nil) }, }, { @@ -103,6 +109,78 @@ func TestEnsureObjectStorageBucket(t *testing.T) { }, expectedError: fmt.Errorf("failed to create bucket:"), }, + { + name: "Success - Successfully update the OBJ bucket", + bScope: &scope.ObjectStorageBucketScope{ + Bucket: &infrav1alpha2.LinodeObjectStorageBucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{ + Region: "test-region", + ACL: infrav1alpha2.ACLPublicRead, + CorsEnabled: true, + }, + }, + }, + want: &linodego.ObjectStorageBucket{ + Label: "test-bucket", + }, + expects: func(mockClient *mock.MockLinodeClient) { + mockClient.EXPECT().GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucket{ + Label: "test-bucket", + }, nil) + mockClient.EXPECT().GetObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucketAccess{ + ACL: linodego.ACLPrivate, + CorsEnabled: true, + }, nil) + mockClient.EXPECT().UpdateObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + }, + { + name: "Error - unable to update the OBJ bucket", + bScope: &scope.ObjectStorageBucketScope{ + Bucket: &infrav1alpha2.LinodeObjectStorageBucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{ + Region: "test-region", + }, + }, + }, + expects: func(mockClient *mock.MockLinodeClient) { + mockClient.EXPECT().GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucket{ + Label: "test-bucket", + }, nil) + mockClient.EXPECT().GetObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucketAccess{ + ACL: linodego.ACLPrivate, + CorsEnabled: true, + }, nil) + mockClient.EXPECT().UpdateObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("error in updating object storage bucket")) + }, + expectedError: fmt.Errorf("failed to update the bucket access options"), + }, + { + name: "Error - unable to fetch the OBJ bucket Access", + bScope: &scope.ObjectStorageBucketScope{ + Bucket: &infrav1alpha2.LinodeObjectStorageBucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{ + Region: "test-region", + }, + }, + }, + expects: func(mockClient *mock.MockLinodeClient) { + mockClient.EXPECT().GetObjectStorageBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageBucket{ + Label: "test-bucket", + }, nil) + mockClient.EXPECT().GetObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error in fetching object storage bucket access details")) + }, + expectedError: fmt.Errorf("failed to get bucket access details"), + }, } for _, tt := range tests { testcase := tt @@ -118,7 +196,7 @@ func TestEnsureObjectStorageBucket(t *testing.T) { testcase.expects(mockClient) - got, err := EnsureObjectStorageBucket(context.Background(), testcase.bScope) + got, err := EnsureAndUpdateObjectStorageBucket(context.Background(), testcase.bScope) if testcase.expectedError != nil { assert.ErrorContains(t, err, testcase.expectedError.Error()) } else { diff --git a/cloud/services/stackscripts.go b/cloud/services/stackscripts.go index 138a2b021..7c624c4d2 100644 --- a/cloud/services/stackscripts.go +++ b/cloud/services/stackscripts.go @@ -32,7 +32,7 @@ func EnsureStackscript(ctx context.Context, machineScope *scope.MachineScope) (i if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { return 0, fmt.Errorf("failed to get stackscript with label %s: %w", stackscriptName, err) } - if stackscripts != nil { + if len(stackscripts) != 0 { return stackscripts[0].ID, nil } stackscriptCreateOptions := linodego.StackscriptCreateOptions{ diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml index be1ce5d5c..fc38c501d 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml @@ -399,9 +399,9 @@ spec: jsonPath: .status.ready name: Ready type: string - - description: Linode instance ID + - description: Provider ID jsonPath: .spec.providerID - name: InstanceID + name: ProviderID type: string - description: Machine object which owns with this LinodeMachine jsonPath: .metadata.ownerReferences[?(@.kind=="Machine")].name @@ -536,6 +536,54 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf + firewallRef: + description: FirewallRef is a reference to a firewall object. This + makes the linode use the specified firewall. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf group: type: string x-kubernetes-validations: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachinetemplates.yaml index 964a0208d..9b127c45b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachinetemplates.yaml @@ -403,6 +403,54 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf + firewallRef: + description: FirewallRef is a reference to a firewall object. + This makes the linode use the specified firewall. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf group: type: string x-kubernetes-validations: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml index ac9a20bd4..5eadb8d28 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml @@ -224,7 +224,7 @@ spec: properties: acl: default: private - description: ACL sets The Access Control Level of the bucket using + description: Acl sets the Access Control Level of the bucket using a canned ACL string enum: - private @@ -259,11 +259,6 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf - secretType: - default: addons.cluster.x-k8s.io/resource-set - description: SecretType sets the type for the bucket-details secret - that will be generated by the controller. - type: string required: - region type: object diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index 339f7bee9..bf2080b10 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -306,6 +306,7 @@ func (r *LinodeClusterReconciler) SetupWithManager(mgr ctrl.Manager, options crc err := ctrl.NewControllerManagedBy(mgr). For(&infrav1alpha2.LinodeCluster{}). WithOptions(options). + // we care about reconciling on metadata updates for LinodeClusters because the OwnerRef for the Cluster is needed WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue)). Watches( &clusterv1.Cluster{}, diff --git a/controller/linodemachine_controller.go b/controller/linodemachine_controller.go index ab571ecf9..939858a94 100644 --- a/controller/linodemachine_controller.go +++ b/controller/linodemachine_controller.go @@ -286,9 +286,18 @@ func (r *LinodeMachineReconciler) reconcileCreate( } tags := []string{machineScope.LinodeCluster.Name} + var instanceID *int + if machineScope.LinodeMachine.Spec.ProviderID != nil { + instID, err := util.GetInstanceID(machineScope.LinodeMachine.Spec.ProviderID) + if err != nil { + logger.Error(err, "Failed to parse instance ID from provider ID") + return ctrl.Result{}, err + } + instanceID = util.Pointer(instID) + } listFilter := util.Filter{ - ID: machineScope.LinodeMachine.Spec.InstanceID, + ID: instanceID, Label: machineScope.LinodeMachine.Name, Tags: tags, } @@ -342,7 +351,7 @@ func (r *LinodeMachineReconciler) reconcileCreate( } conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightCreated) - machineScope.LinodeMachine.Spec.InstanceID = &linodeInstance.ID + machineScope.LinodeMachine.Spec.ProviderID = util.Pointer(fmt.Sprintf("linode://%d", linodeInstance.ID)) return r.reconcileInstanceCreate(ctx, logger, machineScope, linodeInstance) } @@ -445,8 +454,6 @@ func (r *LinodeMachineReconciler) reconcileInstanceCreate( conditions.MarkTrue(machineScope.LinodeMachine, ConditionPreflightLoadBalancing) } - machineScope.LinodeMachine.Spec.ProviderID = util.Pointer(fmt.Sprintf("linode://%d", linodeInstance.ID)) - // Set the instance state to signal preflight process is done machineScope.LinodeMachine.Status.InstanceState = util.Pointer(linodego.InstanceOffline) @@ -667,11 +674,13 @@ func (r *LinodeMachineReconciler) reconcileUpdate( res = ctrl.Result{} - if machineScope.LinodeMachine.Spec.InstanceID == nil { - return res, nil, errors.New("missing instance ID") + instanceID, err := util.GetInstanceID(machineScope.LinodeMachine.Spec.ProviderID) + if err != nil { + logger.Error(err, "Failed to parse instance ID from provider ID") + return ctrl.Result{}, nil, err } - if linodeInstance, err = machineScope.LinodeClient.GetInstance(ctx, *machineScope.LinodeMachine.Spec.InstanceID); err != nil { + if linodeInstance, err = machineScope.LinodeClient.GetInstance(ctx, instanceID); err != nil { if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { logger.Error(err, "Failed to get Linode machine instance") @@ -681,7 +690,6 @@ func (r *LinodeMachineReconciler) reconcileUpdate( // Create new machine machineScope.LinodeMachine.Spec.ProviderID = nil - machineScope.LinodeMachine.Spec.InstanceID = nil machineScope.LinodeMachine.Status.InstanceState = nil machineScope.LinodeMachine.Status.Conditions = nil @@ -726,7 +734,7 @@ func (r *LinodeMachineReconciler) reconcileDelete( ) (ctrl.Result, error) { logger.Info("deleting machine") - if machineScope.LinodeMachine.Spec.InstanceID == nil { + if machineScope.LinodeMachine.Spec.ProviderID == nil { logger.Info("Machine ID is missing, nothing to do") if err := machineScope.RemoveCredentialsRefFinalizer(ctx); err != nil { @@ -747,7 +755,13 @@ func (r *LinodeMachineReconciler) reconcileDelete( return ctrl.Result{}, fmt.Errorf("Failed to remove linodecluster finalizer %w", err) } - if err := machineScope.LinodeClient.DeleteInstance(ctx, *machineScope.LinodeMachine.Spec.InstanceID); err != nil { + instanceID, err := util.GetInstanceID(machineScope.LinodeMachine.Spec.ProviderID) + if err != nil { + logger.Error(err, "Failed to parse instance ID from provider ID") + return ctrl.Result{}, err + } + + if err := machineScope.LinodeClient.DeleteInstance(ctx, instanceID); err != nil { if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { logger.Error(err, "Failed to delete Linode instance") @@ -766,7 +780,6 @@ func (r *LinodeMachineReconciler) reconcileDelete( r.Recorder.Event(machineScope.LinodeMachine, corev1.EventTypeNormal, clusterv1.DeletedReason, "instance has cleaned up") machineScope.LinodeMachine.Spec.ProviderID = nil - machineScope.LinodeMachine.Spec.InstanceID = nil machineScope.LinodeMachine.Status.InstanceState = nil if err := machineScope.RemoveCredentialsRefFinalizer(ctx); err != nil { @@ -809,6 +822,7 @@ func (r *LinodeMachineReconciler) SetupWithManager(mgr ctrl.Manager, options crc handler.EnqueueRequestsFromMapFunc(linodeMachineMapper), builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), ). + // we care about reconciling on metadata updates for LinodeMachines because the OwnerRef for the Machine is needed WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue)). Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) if err != nil { diff --git a/controller/linodemachine_controller_helpers.go b/controller/linodemachine_controller_helpers.go index f44a03a45..161589487 100644 --- a/controller/linodemachine_controller_helpers.go +++ b/controller/linodemachine_controller_helpers.go @@ -119,6 +119,15 @@ func (r *LinodeMachineReconciler) newCreateConfig(ctx context.Context, machineSc } } + if machineScope.LinodeMachine.Spec.FirewallRef != nil { + fwID, err := r.getFirewallID(ctx, machineScope, logger) + if err != nil { + logger.Error(err, "Failed to get Firewall config") + return nil, err + } + createConfig.FirewallID = fwID + } + return createConfig, nil } @@ -335,6 +344,31 @@ func (r *LinodeMachineReconciler) getPlacementGroupID(ctx context.Context, machi return *linodePlacementGroup.Spec.PGID, nil } +func (r *LinodeMachineReconciler) getFirewallID(ctx context.Context, machineScope *scope.MachineScope, logger logr.Logger) (int, error) { + name := machineScope.LinodeMachine.Spec.FirewallRef.Name + namespace := machineScope.LinodeMachine.Spec.FirewallRef.Namespace + if namespace == "" { + namespace = machineScope.LinodeMachine.Namespace + } + + logger = logger.WithValues("firewallName", name, "firewallNamespace", namespace) + + linodeFirewall := infrav1alpha2.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + objectKey := client.ObjectKeyFromObject(&linodeFirewall) + err := r.Get(ctx, objectKey, &linodeFirewall) + if err != nil { + logger.Error(err, "Failed to fetch LinodeFirewall") + return -1, err + } + + return *linodeFirewall.Spec.FirewallID, nil +} + func (r *LinodeMachineReconciler) getVPCInterfaceConfig(ctx context.Context, machineScope *scope.MachineScope, logger logr.Logger) (*linodego.InstanceConfigInterfaceCreateOptions, error) { name := machineScope.LinodeCluster.Spec.VPCRef.Name namespace := machineScope.LinodeCluster.Spec.VPCRef.Namespace diff --git a/controller/linodemachine_controller_helpers_test.go b/controller/linodemachine_controller_helpers_test.go index 885c673dc..1a5fc9ef0 100644 --- a/controller/linodemachine_controller_helpers_test.go +++ b/controller/linodemachine_controller_helpers_test.go @@ -58,7 +58,6 @@ func TestLinodeMachineSpecToCreateInstanceConfig(t *testing.T) { BackupsEnabled: true, PrivateIP: util.Pointer(true), Tags: []string{"tag"}, - FirewallID: 1, } createConfig := linodeMachineSpecToInstanceCreateConfig(machineSpec) diff --git a/controller/linodemachine_controller_test.go b/controller/linodemachine_controller_test.go index c00e315d0..9a7041e44 100644 --- a/controller/linodemachine_controller_test.go +++ b/controller/linodemachine_controller_test.go @@ -41,6 +41,7 @@ import ( infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" "github.com/linode/cluster-api-provider-linode/cloud/scope" "github.com/linode/cluster-api-provider-linode/mock" + "github.com/linode/cluster-api-provider-linode/util" rutil "github.com/linode/cluster-api-provider-linode/util/reconciler" . "github.com/linode/cluster-api-provider-linode/mock/mocktest" @@ -112,7 +113,7 @@ var _ = Describe("create", Label("machine", "create"), func() { UID: "12345", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(0), + ProviderID: ptr.To("linode://123"), Type: "g6-nanode-1", Image: rutil.DefaultMachineControllerLinodeImage, DiskEncryption: string(linodego.InstanceDiskEncryptionEnabled), @@ -213,7 +214,6 @@ var _ = Describe("create", Label("machine", "create"), func() { Expect(rutil.ConditionTrue(&linodeMachine, ConditionPreflightReady)).To(BeTrue()) Expect(*linodeMachine.Status.InstanceState).To(Equal(linodego.InstanceOffline)) - Expect(*linodeMachine.Spec.InstanceID).To(Equal(123)) Expect(*linodeMachine.Spec.ProviderID).To(Equal("linode://123")) Expect(linodeMachine.Status.Addresses).To(Equal([]clusterv1.MachineAddress{ {Type: clusterv1.MachineExternalIP, Address: "172.0.0.2"}, @@ -464,7 +464,6 @@ var _ = Describe("create", Label("machine", "create"), func() { Expect(rutil.ConditionTrue(&linodeMachine, ConditionPreflightBootTriggered)).To(BeTrue()) Expect(rutil.ConditionTrue(&linodeMachine, ConditionPreflightReady)).To(BeTrue()) - Expect(*linodeMachine.Spec.InstanceID).To(Equal(123)) Expect(*linodeMachine.Spec.ProviderID).To(Equal("linode://123")) Expect(linodeMachine.Status.Addresses).To(Equal([]clusterv1.MachineAddress{ {Type: clusterv1.MachineExternalIP, Address: "172.0.0.2"}, @@ -652,7 +651,6 @@ var _ = Describe("create", Label("machine", "create"), func() { Expect(rutil.ConditionTrue(&linodeMachine, ConditionPreflightReady)).To(BeTrue()) Expect(*linodeMachine.Status.InstanceState).To(Equal(linodego.InstanceOffline)) - Expect(*linodeMachine.Spec.InstanceID).To(Equal(123)) Expect(*linodeMachine.Spec.ProviderID).To(Equal("linode://123")) Expect(linodeMachine.Status.Addresses).To(Equal([]clusterv1.MachineAddress{ {Type: clusterv1.MachineExternalIP, Address: "172.0.0.2"}, @@ -727,7 +725,7 @@ var _ = Describe("createDNS", Label("machine", "createDNS"), func() { UID: "12345", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(0), + ProviderID: ptr.To("linode://0"), Type: "g6-nanode-1", Image: rutil.DefaultMachineControllerLinodeImage, }, @@ -828,7 +826,6 @@ var _ = Describe("createDNS", Label("machine", "createDNS"), func() { Expect(rutil.ConditionTrue(&linodeMachine, ConditionPreflightReady)).To(BeTrue()) Expect(*linodeMachine.Status.InstanceState).To(Equal(linodego.InstanceOffline)) - Expect(*linodeMachine.Spec.InstanceID).To(Equal(123)) Expect(*linodeMachine.Spec.ProviderID).To(Equal("linode://123")) Expect(linodeMachine.Status.Addresses).To(Equal([]clusterv1.MachineAddress{ {Type: clusterv1.MachineExternalIP, Address: "172.0.0.2"}, @@ -859,7 +856,7 @@ var _ = Describe("machine-lifecycle", Ordered, Label("machine", "machine-lifecyc linodeMachine := &infrav1alpha2.LinodeMachine{ ObjectMeta: metadata, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(0), + ProviderID: ptr.To("linode://0"), Type: "g6-nanode-1", Image: rutil.DefaultMachineControllerLinodeImage, Configuration: &infrav1alpha2.InstanceConfiguration{Kernel: "test"}, @@ -950,23 +947,30 @@ var _ = Describe("machine-lifecycle", Ordered, Label("machine", "machine-lifecyc OneOf( Path( Call("machine is not created because there was an error creating instance", func(ctx context.Context, mck Mock) { - listInst := mck.LinodeClient.EXPECT(). - ListInstances(ctx, gomock.Any()). - Return([]linodego.Instance{}, nil) - getRegion := mck.LinodeClient.EXPECT(). - GetRegion(ctx, gomock.Any()). - After(listInst). - Return(&linodego.Region{Capabilities: []string{"Metadata"}}, nil) - getImage := mck.LinodeClient.EXPECT(). - GetImage(ctx, gomock.Any()). - After(getRegion). - Return(&linodego.Image{Capabilities: []string{"cloud-init"}}, nil) - mck.LinodeClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()). - After(getImage). - Return(nil, errors.New("failed to ensure instance")) }), OneOf( + Path(Result("create error", func(ctx context.Context, mck Mock) { + linodeMachine.Spec.ProviderID = util.Pointer("linode://foo") + _, err := reconciler.reconcile(ctx, mck.Logger(), mScope) + Expect(err).To(HaveOccurred()) + Expect(mck.Logs()).To(ContainSubstring("Failed to parse instance ID from provider ID")) + })), Path(Result("create requeues", func(ctx context.Context, mck Mock) { + linodeMachine.Spec.ProviderID = util.Pointer("linode://123") + listInst := mck.LinodeClient.EXPECT(). + ListInstances(ctx, gomock.Any()). + Return([]linodego.Instance{}, nil) + getRegion := mck.LinodeClient.EXPECT(). + GetRegion(ctx, gomock.Any()). + After(listInst). + Return(&linodego.Region{Capabilities: []string{"Metadata"}}, nil) + getImage := mck.LinodeClient.EXPECT(). + GetImage(ctx, gomock.Any()). + After(getRegion). + Return(&linodego.Image{Capabilities: []string{"cloud-init"}}, nil) + mck.LinodeClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()). + After(getImage). + Return(nil, errors.New("failed to ensure instance")) res, err := reconciler.reconcile(ctx, mck.Logger(), mScope) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(rutil.DefaultMachineControllerWaitForRunningDelay)) @@ -975,6 +979,20 @@ var _ = Describe("machine-lifecycle", Ordered, Label("machine", "machine-lifecyc Path(Result("create machine error - timeout error", func(ctx context.Context, mck Mock) { tempTimeout := reconciler.ReconcileTimeout reconciler.ReconcileTimeout = time.Nanosecond + listInst := mck.LinodeClient.EXPECT(). + ListInstances(ctx, gomock.Any()). + Return([]linodego.Instance{}, nil) + getRegion := mck.LinodeClient.EXPECT(). + GetRegion(ctx, gomock.Any()). + After(listInst). + Return(&linodego.Region{Capabilities: []string{"Metadata"}}, nil) + getImage := mck.LinodeClient.EXPECT(). + GetImage(ctx, gomock.Any()). + After(getRegion). + Return(&linodego.Image{Capabilities: []string{"cloud-init"}}, nil) + mck.LinodeClient.EXPECT().CreateInstance(gomock.Any(), gomock.Any()). + After(getImage). + Return(nil, errors.New("failed to ensure instance")) _, err := reconciler.reconcile(ctx, mck.Logger(), mScope) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("failed to ensure instance")) @@ -1147,7 +1165,6 @@ var _ = Describe("machine-lifecycle", Ordered, Label("machine", "machine-lifecyc Expect(rutil.ConditionTrue(linodeMachine, ConditionPreflightReady)).To(BeTrue()) Expect(*linodeMachine.Status.InstanceState).To(Equal(linodego.InstanceOffline)) - Expect(*linodeMachine.Spec.InstanceID).To(Equal(123)) Expect(*linodeMachine.Spec.ProviderID).To(Equal("linode://123")) Expect(linodeMachine.Status.Addresses).To(Equal([]clusterv1.MachineAddress{ {Type: clusterv1.MachineExternalIP, Address: "172.0.0.2"}, @@ -1185,11 +1202,11 @@ var _ = Describe("machine-delete", Ordered, Label("machine", "machine-delete"), Network: infrav1alpha2.NetworkSpec{}, }, } - instanceID := 12345 + providerID := "linode://12345" linodeMachine := &infrav1alpha2.LinodeMachine{ ObjectMeta: metadata, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: &instanceID, + ProviderID: &providerID, }, } machine := &clusterv1.Machine{ @@ -1235,11 +1252,20 @@ var _ = Describe("machine-delete", Ordered, Label("machine", "machine-delete"), OneOf( Path( Call("machine is not deleted because there was an error deleting instance", func(ctx context.Context, mck Mock) { - mck.LinodeClient.EXPECT().DeleteInstance(gomock.Any(), gomock.Any()). - Return(errors.New("failed to delete instance")) }), OneOf( + Path(Result("delete error", func(ctx context.Context, mck Mock) { + tmpProviderID := linodeMachine.Spec.ProviderID + linodeMachine.Spec.ProviderID = util.Pointer("linode://foo") + _, err := reconciler.reconcileDelete(ctx, mck.Logger(), mScope) + Expect(err).To(HaveOccurred()) + Expect(mck.Logs()).To(ContainSubstring("Failed to parse instance ID from provider ID")) + linodeMachine.Spec.ProviderID = tmpProviderID + + })), Path(Result("delete requeues", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().DeleteInstance(gomock.Any(), gomock.Any()). + Return(errors.New("failed to delete instance")) res, err := reconciler.reconcileDelete(ctx, mck.Logger(), mScope) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(rutil.DefaultMachineControllerRetryDelay)) @@ -1248,6 +1274,8 @@ var _ = Describe("machine-delete", Ordered, Label("machine", "machine-delete"), Path(Result("create machine error - timeout error", func(ctx context.Context, mck Mock) { tempTimeout := reconciler.ReconcileTimeout reconciler.ReconcileTimeout = time.Nanosecond + mck.LinodeClient.EXPECT().DeleteInstance(gomock.Any(), gomock.Any()). + Return(errors.New("failed to delete instance")) _, err := reconciler.reconcileDelete(ctx, mck.Logger(), mScope) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("failed to delete instance")) @@ -1275,6 +1303,7 @@ var _ = Describe("machine in PlacementGroup", Label("machine", "placementGroup") var reconciler *LinodeMachineReconciler var lpgReconciler *LinodePlacementGroupReconciler var linodePlacementGroup infrav1alpha2.LinodePlacementGroup + var linodeFirewall infrav1alpha2.LinodeFirewall var mockCtrl *gomock.Controller var testLogs *bytes.Buffer @@ -1343,6 +1372,22 @@ var _ = Describe("machine in PlacementGroup", Label("machine", "placementGroup") } Expect(k8sClient.Create(ctx, &linodePlacementGroup)).To(Succeed()) + linodeFirewall = infrav1alpha2.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fw", + Namespace: defaultNamespace, + UID: "5123123", + }, + Spec: infrav1alpha2.LinodeFirewallSpec{ + FirewallID: ptr.To(2), + Enabled: true, + }, + Status: infrav1alpha2.LinodeFirewallStatus{ + Ready: true, + }, + } + Expect(k8sClient.Create(ctx, &linodeFirewall)).To(Succeed()) + linodeMachine = infrav1alpha2.LinodeMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "mock", @@ -1350,13 +1395,17 @@ var _ = Describe("machine in PlacementGroup", Label("machine", "placementGroup") UID: "12345", }, Spec: infrav1alpha2.LinodeMachineSpec{ - InstanceID: ptr.To(0), + ProviderID: ptr.To("linode://0"), Type: "g6-nanode-1", Image: rutil.DefaultMachineControllerLinodeImage, PlacementGroupRef: &corev1.ObjectReference{ Namespace: defaultNamespace, Name: "test-pg", }, + FirewallRef: &corev1.ObjectReference{ + Namespace: defaultNamespace, + Name: "test-fw", + }, }, } @@ -1388,7 +1437,7 @@ var _ = Describe("machine in PlacementGroup", Label("machine", "placementGroup") } }) - It("creates a instance in a PlacementGroup", func(ctx SpecContext) { + It("creates a instance in a PlacementGroup with a firewall", func(ctx SpecContext) { mockLinodeClient := mock.NewMockLinodeClient(mockCtrl) getRegion := mockLinodeClient.EXPECT(). GetRegion(ctx, gomock.Any()). @@ -1431,6 +1480,6 @@ var _ = Describe("machine in PlacementGroup", Label("machine", "placementGroup") Expect(err).NotTo(HaveOccurred()) Expect(createOpts).NotTo(BeNil()) Expect(createOpts.PlacementGroup.ID).To(Equal(1)) + Expect(createOpts.FirewallID).To(Equal(2)) }) - }) diff --git a/controller/linodeobjectstoragebucket_controller.go b/controller/linodeobjectstoragebucket_controller.go index 1e3274d60..40ea8efc6 100644 --- a/controller/linodeobjectstoragebucket_controller.go +++ b/controller/linodeobjectstoragebucket_controller.go @@ -18,7 +18,6 @@ package controller import ( "context" - "errors" "fmt" "time" @@ -36,7 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" crcontroller "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -117,15 +115,6 @@ func (r *LinodeObjectStorageBucketReconciler) reconcile(ctx context.Context, bSc reterr = err } }() - - if !bScope.Bucket.DeletionTimestamp.IsZero() { - return res, r.reconcileDelete(bScope) - } - - if err := bScope.AddFinalizer(ctx); err != nil { - return res, err - } - if err := r.reconcileApply(ctx, bScope); err != nil { return res, err } @@ -145,9 +134,9 @@ func (r *LinodeObjectStorageBucketReconciler) reconcileApply(ctx context.Context bScope.Bucket.Status.Ready = false bScope.Bucket.Status.FailureMessage = nil - bucket, err := services.EnsureObjectStorageBucket(ctx, bScope) + bucket, err := services.EnsureAndUpdateObjectStorageBucket(ctx, bScope) if err != nil { - bScope.Logger.Error(err, "Failed to ensure bucket exists") + bScope.Logger.Error(err, "Failed to ensure bucket or update bucket") r.setFailure(bScope, err) return err @@ -163,24 +152,6 @@ func (r *LinodeObjectStorageBucketReconciler) reconcileApply(ctx context.Context return nil } -func (r *LinodeObjectStorageBucketReconciler) reconcileDelete(bScope *scope.ObjectStorageBucketScope) error { - bScope.Logger.Info("Reconciling delete") - - if !controllerutil.RemoveFinalizer(bScope.Bucket, infrav1alpha2.ObjectStorageBucketFinalizer) { - err := errors.New("failed to remove finalizer from bucket; unable to delete") - bScope.Logger.Error(err, "controllerutil.RemoveFinalizer") - r.setFailure(bScope, err) - - return err - } - // TODO: remove this check and removal later - if controllerutil.ContainsFinalizer(bScope.Bucket, infrav1alpha2.GroupVersion.String()) { - controllerutil.RemoveFinalizer(bScope.Bucket, infrav1alpha2.GroupVersion.String()) - } - - return nil -} - // SetupWithManager sets up the controller with the Manager. // //nolint:dupl // This follows the pattern used for the LinodeObjectStorageBucket controller. diff --git a/controller/linodeobjectstoragebucket_controller_test.go b/controller/linodeobjectstoragebucket_controller_test.go index 5e05035ab..fef9464f0 100644 --- a/controller/linodeobjectstoragebucket_controller_test.go +++ b/controller/linodeobjectstoragebucket_controller_test.go @@ -25,9 +25,7 @@ import ( "go.uber.org/mock/gomock" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" @@ -145,7 +143,7 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { }), ), Path( - Call("bucket is retrieved on update", func(ctx context.Context, mck Mock) { + Call("bucket access options are not retrieved on update", func(ctx context.Context, mck Mock) { mck.LinodeClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Region, gomock.Any()). Return(&linodego.ObjectStorageBucket{ Label: "bucket", @@ -153,6 +151,54 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { Hostname: "hostname", Created: util.Pointer(time.Now()), }, nil) + mck.LinodeClient.EXPECT().GetObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, errors.New("bucket access options fetch error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("failed to get bucket access details")) + }), + ), + Path( + Call("bucket access options are not successfully updated", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Region, gomock.Any()). + Return(&linodego.ObjectStorageBucket{ + Label: "bucket", + Region: obj.Spec.Region, + Hostname: "hostname", + Created: util.Pointer(time.Now()), + }, nil) + mck.LinodeClient.EXPECT().GetObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any()). + Return(&linodego.ObjectStorageBucketAccess{ + ACL: linodego.ACLPrivate, + CorsEnabled: true, + }, nil) + mck.LinodeClient.EXPECT().UpdateObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(errors.New("bucket access options update error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + bScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &bScope) + Expect(err.Error()).To(ContainSubstring("failed to update the bucket access options")) + }), + ), + Path( + Call("bucket is retrieved and updated", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageBucket(gomock.Any(), obj.Spec.Region, gomock.Any()). + Return(&linodego.ObjectStorageBucket{ + Label: "bucket", + Region: obj.Spec.Region, + Hostname: "hostname", + Created: util.Pointer(time.Now()), + }, nil) + mck.LinodeClient.EXPECT().GetObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any()). + Return(&linodego.ObjectStorageBucketAccess{ + ACL: linodego.ACLPrivate, + CorsEnabled: true, + }, nil) + mck.LinodeClient.EXPECT().UpdateObjectStorageBucketAccess(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil) }), Result("success", func(ctx context.Context, mck Mock) { bScope.LinodeClient = mck.LinodeClient @@ -161,20 +207,14 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() { }), ), ), - Once("resource is deleted", func(ctx context.Context, _ Mock) { - // nb: client.Delete does not set DeletionTimestamp on the object, so re-fetch from the apiserver. - objectKey := client.ObjectKeyFromObject(&obj) + Call("resource is deleted", func(ctx context.Context, _ Mock) { Expect(k8sClient.Delete(ctx, &obj)).To(Succeed()) - Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed()) }), - Result("finalizer is removed", func(ctx context.Context, mck Mock) { + Result("success", func(ctx context.Context, mck Mock) { objectKey := client.ObjectKeyFromObject(&obj) k8sClient.Get(ctx, objectKey, &obj) bScope.LinodeClient = mck.LinodeClient - _, err := reconciler.reconcile(ctx, &bScope) - Expect(err).NotTo(HaveOccurred()) Expect(apierrors.IsNotFound(k8sClient.Get(ctx, objectKey, &obj))).To(BeTrue()) - Expect(mck.Logs()).To(ContainSubstring("Reconciling delete")) }), ) }) @@ -248,29 +288,5 @@ var _ = Describe("errors", Label("bucket", "errors"), func() { Expect(err.Error()).To(ContainSubstring("failed to create object storage bucket scope")) Expect(mck.Logs()).To(ContainSubstring("Failed to create object storage bucket scope")) }), - Call("scheme with no infrav1alpha2", func(ctx context.Context, mck Mock) { - prev := mck.K8sClient.EXPECT().Scheme().Return(scheme.Scheme) - mck.K8sClient.EXPECT().Scheme().After(prev).Return(runtime.NewScheme()).Times(2) - }), - Result("error", func(ctx context.Context, mck Mock) { - bScope.Client = mck.K8sClient - - patchHelper, err := patch.NewHelper(bScope.Bucket, mck.K8sClient) - Expect(err).NotTo(HaveOccurred()) - bScope.PatchHelper = patchHelper - - _, err = reconciler.reconcile(ctx, &bScope) - Expect(err.Error()).To(ContainSubstring("no kind is registered")) - }), - Once("finalizer is missing", func(ctx context.Context, _ Mock) { - bScope.Bucket.ObjectMeta.Finalizers = []string{} - }), - Result("error", func(ctx context.Context, mck Mock) { - bScope.LinodeClient = mck.LinodeClient - bScope.Client = mck.K8sClient - err := reconciler.reconcileDelete(&bScope) - Expect(err.Error()).To(ContainSubstring("failed to remove finalizer from bucket")) - Expect(mck.Events()).To(ContainSubstring("failed to remove finalizer from bucket")) - }), ) }) diff --git a/controller/linodeplacementgroup_controller.go b/controller/linodeplacementgroup_controller.go index eedd63c39..376564493 100644 --- a/controller/linodeplacementgroup_controller.go +++ b/controller/linodeplacementgroup_controller.go @@ -305,20 +305,24 @@ func (r *LinodePlacementGroupReconciler) SetupWithManager(mgr ctrl.Manager, opti err = ctrl.NewControllerManagedBy(mgr). For(&infrav1alpha2.LinodePlacementGroup{}). WithOptions(options). - WithEventFilter( - predicate.And( - // Filter for objects with a specific WatchLabel. - predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), - // Do not reconcile the Delete events generated by the - // controller itself. - predicate.Funcs{ - DeleteFunc: func(e event.DeleteEvent) bool { return false }, - }, - )).Watches( - &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(linodePlacementGroupMapper), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), - ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) + WithEventFilter(predicate.And( + predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicate.GenerationChangedPredicate{}, + predicate.Funcs{UpdateFunc: func(e event.UpdateEvent) bool { + oldObject, okOld := e.ObjectOld.(*infrav1alpha2.LinodePlacementGroup) + newObject, okNew := e.ObjectNew.(*infrav1alpha2.LinodePlacementGroup) + if okOld && okNew && oldObject.Spec.PGID == nil && newObject.Spec.PGID != nil { + // We just created the PG, don't enqueue and update + return false + } + return true + }}, + )). + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(linodePlacementGroupMapper), + builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) if err != nil { return fmt.Errorf("failed to build controller: %w", err) } diff --git a/controller/linodevpc_controller.go b/controller/linodevpc_controller.go index 401b10bf1..7c4deb82d 100644 --- a/controller/linodevpc_controller.go +++ b/controller/linodevpc_controller.go @@ -335,20 +335,24 @@ func (r *LinodeVPCReconciler) SetupWithManager(mgr ctrl.Manager, options crcontr err = ctrl.NewControllerManagedBy(mgr). For(&infrav1alpha2.LinodeVPC{}). WithOptions(options). - WithEventFilter( - predicate.And( - // Filter for objects with a specific WatchLabel. - predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), - // Do not reconcile the Delete events generated by the - // controller itself. - predicate.Funcs{ - DeleteFunc: func(e event.DeleteEvent) bool { return false }, - }, - )).Watches( - &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(linodeVPCMapper), - builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), - ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) + WithEventFilter(predicate.And( + predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicate.GenerationChangedPredicate{}, + predicate.Funcs{UpdateFunc: func(e event.UpdateEvent) bool { + oldObject, okOld := e.ObjectOld.(*infrav1alpha2.LinodeVPC) + newObject, okNew := e.ObjectNew.(*infrav1alpha2.LinodeVPC) + if okOld && okNew && oldObject.Spec.VPCID == nil && newObject.Spec.VPCID != nil { + // We just created the VPC, don't enqueue and update + return false + } + return true + }}, + )). + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(linodeVPCMapper), + builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) if err != nil { return fmt.Errorf("failed to build controller: %w", err) } diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 2126b8be2..983c7119f 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -9,6 +9,7 @@ - [Default (kubeadm)](./topics/flavors/default.md) - [Dual-stack](./topics/flavors/dual-stack.md) - [Etcd-disk](./topics/flavors/etcd-disk.md) + - [Etcd Backup](./topics/flavors/etcd-backup-restore.md) - [ClusterClass kubeadm](./topics/flavors/clusterclass-kubeadm.md) - [Cluster Autoscaler](./topics/flavors/cluster-autoscaler.md) - [Cilium BGP LB (kubeadm)](./topics/flavors/cilium-bgp-lb.md) @@ -17,6 +18,7 @@ - [vpcless](./topics/flavors/vpcless.md) - [konnectivity (kubeadm)](./topics/flavors/konnectivity.md) - [DNS based apiserver Load Balancing](./topics/flavors/dns-loadbalancing.md) + - [Flatcar](./topics/flavors/flatcar.md) - [Etcd](./topics/etcd.md) - [Backups](./topics/backups.md) - [Multi-Tenancy](./topics/multi-tenancy.md) diff --git a/docs/src/topics/backups.md b/docs/src/topics/backups.md index 2660dcee1..33bb8d911 100644 --- a/docs/src/topics/backups.md +++ b/docs/src/topics/backups.md @@ -55,7 +55,7 @@ CAPL will also create `read_write` and `read_only` access keys for the bucket an apiVersion: v1 kind: Secret metadata: - name: -bucket-details + name: -obj-key namespace: ownerReferences: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 @@ -67,13 +67,11 @@ data: bucket_name: bucket_region: bucket_endpoint: - access_key_rw: - secret_key_rw: - access_key_ro: - secret_key_ro: + access_key: + secret_key: ``` -The bucket-details secret is owned and managed by CAPL during the life of the `LinodeObjectStorageBucket`. +The-obj-key secret is owned and managed by CAPL during the life of the `LinodeObjectStorageBucket`. ### Access Keys Rotation diff --git a/e2e/capl-cluster-flavors/k3s-capl-cluster/assert-child-cluster-resources.yaml b/e2e/capl-cluster-flavors/k3s-capl-cluster/assert-child-cluster-resources.yaml index 6a94846f0..a630665e9 100644 --- a/e2e/capl-cluster-flavors/k3s-capl-cluster/assert-child-cluster-resources.yaml +++ b/e2e/capl-cluster-flavors/k3s-capl-cluster/assert-child-cluster-resources.yaml @@ -35,7 +35,7 @@ status: unavailableReplicas: 0 availableReplicas: 1 --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KThreesControlPlane metadata: labels: diff --git a/e2e/capl-cluster-flavors/k3s-capl-cluster/chainsaw-test.yaml b/e2e/capl-cluster-flavors/k3s-capl-cluster/chainsaw-test.yaml index 1c971de72..26e746d34 100755 --- a/e2e/capl-cluster-flavors/k3s-capl-cluster/chainsaw-test.yaml +++ b/e2e/capl-cluster-flavors/k3s-capl-cluster/chainsaw-test.yaml @@ -59,7 +59,7 @@ spec: apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment - describe: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KThreesControlPlane - name: Check if the linodes are created try: diff --git a/go.mod b/go.mod index 4905c6170..b9371bf26 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 - github.com/linode/linodego v1.38.0 + github.com/linode/linodego v1.39.0 github.com/onsi/ginkgo/v2 v2.20.0 github.com/onsi/gomega v1.34.1 github.com/stretchr/testify v1.9.0 @@ -93,7 +93,7 @@ require ( go.uber.org/ratelimit v0.2.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/net v0.28.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect golang.org/x/sys v0.23.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect diff --git a/go.sum b/go.sum index 3467d1b2c..d8c574707 100644 --- a/go.sum +++ b/go.sum @@ -123,8 +123,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linode/linodego v1.38.0 h1:wP3oW9OhGc6vhze8NPf2knbwH4TzSbrjzuCd9okjbTY= -github.com/linode/linodego v1.38.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= +github.com/linode/linodego v1.39.0 h1:gRsj2PXX+HTO3eYQaXEuQGsLeeLFDSBDontC5JL3Nn8= +github.com/linode/linodego v1.39.0/go.mod h1:da8KzAQKSm5obwa06yXk5CZSDFMP9Wb08GA/O+aR9W0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= @@ -294,8 +294,8 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/mock/client.go b/mock/client.go index f0bb9d523..3e20c26e5 100644 --- a/mock/client.go +++ b/mock/client.go @@ -530,6 +530,21 @@ func (mr *MockLinodeClientMockRecorder) GetObjectStorageBucket(ctx, regionID, la return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorageBucket", reflect.TypeOf((*MockLinodeClient)(nil).GetObjectStorageBucket), ctx, regionID, label) } +// GetObjectStorageBucketAccess mocks base method. +func (m *MockLinodeClient) GetObjectStorageBucketAccess(ctx context.Context, clusterOrRegionID, label string) (*linodego.ObjectStorageBucketAccess, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectStorageBucketAccess", ctx, clusterOrRegionID, label) + ret0, _ := ret[0].(*linodego.ObjectStorageBucketAccess) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectStorageBucketAccess indicates an expected call of GetObjectStorageBucketAccess. +func (mr *MockLinodeClientMockRecorder) GetObjectStorageBucketAccess(ctx, clusterOrRegionID, label any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorageBucketAccess", reflect.TypeOf((*MockLinodeClient)(nil).GetObjectStorageBucketAccess), ctx, clusterOrRegionID, label) +} + // GetObjectStorageKey mocks base method. func (m *MockLinodeClient) GetObjectStorageKey(ctx context.Context, keyID int) (*linodego.ObjectStorageKey, error) { m.ctrl.T.Helper() @@ -799,6 +814,20 @@ func (mr *MockLinodeClientMockRecorder) UpdateInstanceConfig(ctx, linodeID, conf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInstanceConfig", reflect.TypeOf((*MockLinodeClient)(nil).UpdateInstanceConfig), ctx, linodeID, configID, opts) } +// UpdateObjectStorageBucketAccess mocks base method. +func (m *MockLinodeClient) UpdateObjectStorageBucketAccess(ctx context.Context, clusterOrRegionID, label string, opts linodego.ObjectStorageBucketUpdateAccessOptions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateObjectStorageBucketAccess", ctx, clusterOrRegionID, label, opts) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateObjectStorageBucketAccess indicates an expected call of UpdateObjectStorageBucketAccess. +func (mr *MockLinodeClientMockRecorder) UpdateObjectStorageBucketAccess(ctx, clusterOrRegionID, label, opts any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateObjectStorageBucketAccess", reflect.TypeOf((*MockLinodeClient)(nil).UpdateObjectStorageBucketAccess), ctx, clusterOrRegionID, label, opts) +} + // UpdatePlacementGroup mocks base method. func (m *MockLinodeClient) UpdatePlacementGroup(ctx context.Context, id int, options linodego.PlacementGroupUpdateOptions) (*linodego.PlacementGroup, error) { m.ctrl.T.Helper() @@ -1554,6 +1583,21 @@ func (mr *MockLinodeObjectStorageClientMockRecorder) GetObjectStorageBucket(ctx, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorageBucket", reflect.TypeOf((*MockLinodeObjectStorageClient)(nil).GetObjectStorageBucket), ctx, regionID, label) } +// GetObjectStorageBucketAccess mocks base method. +func (m *MockLinodeObjectStorageClient) GetObjectStorageBucketAccess(ctx context.Context, clusterOrRegionID, label string) (*linodego.ObjectStorageBucketAccess, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectStorageBucketAccess", ctx, clusterOrRegionID, label) + ret0, _ := ret[0].(*linodego.ObjectStorageBucketAccess) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectStorageBucketAccess indicates an expected call of GetObjectStorageBucketAccess. +func (mr *MockLinodeObjectStorageClientMockRecorder) GetObjectStorageBucketAccess(ctx, clusterOrRegionID, label any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorageBucketAccess", reflect.TypeOf((*MockLinodeObjectStorageClient)(nil).GetObjectStorageBucketAccess), ctx, clusterOrRegionID, label) +} + // GetObjectStorageKey mocks base method. func (m *MockLinodeObjectStorageClient) GetObjectStorageKey(ctx context.Context, keyID int) (*linodego.ObjectStorageKey, error) { m.ctrl.T.Helper() @@ -1569,6 +1613,20 @@ func (mr *MockLinodeObjectStorageClientMockRecorder) GetObjectStorageKey(ctx, ke return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorageKey", reflect.TypeOf((*MockLinodeObjectStorageClient)(nil).GetObjectStorageKey), ctx, keyID) } +// UpdateObjectStorageBucketAccess mocks base method. +func (m *MockLinodeObjectStorageClient) UpdateObjectStorageBucketAccess(ctx context.Context, clusterOrRegionID, label string, opts linodego.ObjectStorageBucketUpdateAccessOptions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateObjectStorageBucketAccess", ctx, clusterOrRegionID, label, opts) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateObjectStorageBucketAccess indicates an expected call of UpdateObjectStorageBucketAccess. +func (mr *MockLinodeObjectStorageClientMockRecorder) UpdateObjectStorageBucketAccess(ctx, clusterOrRegionID, label, opts any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateObjectStorageBucketAccess", reflect.TypeOf((*MockLinodeObjectStorageClient)(nil).UpdateObjectStorageBucketAccess), ctx, clusterOrRegionID, label, opts) +} + // MockLinodeDNSClient is a mock of LinodeDNSClient interface. type MockLinodeDNSClient struct { ctrl *gomock.Controller diff --git a/observability/wrappers/linodeclient/linodeclient.gen.go b/observability/wrappers/linodeclient/linodeclient.gen.go index 8b9bb72ba..05a4ed485 100644 --- a/observability/wrappers/linodeclient/linodeclient.gen.go +++ b/observability/wrappers/linodeclient/linodeclient.gen.go @@ -865,6 +865,32 @@ func (_d LinodeClientWithTracing) GetObjectStorageBucket(ctx context.Context, re return _d.LinodeClient.GetObjectStorageBucket(ctx, regionID, label) } +// GetObjectStorageBucketAccess implements clients.LinodeClient +func (_d LinodeClientWithTracing) GetObjectStorageBucketAccess(ctx context.Context, clusterOrRegionID string, label string) (op1 *linodego.ObjectStorageBucketAccess, err error) { + ctx, _span := tracing.Start(ctx, "clients.LinodeClient.GetObjectStorageBucketAccess") + defer func() { + if _d._spanDecorator != nil { + _d._spanDecorator(_span, map[string]interface{}{ + "ctx": ctx, + "clusterOrRegionID": clusterOrRegionID, + "label": label}, map[string]interface{}{ + "op1": op1, + "err": err}) + } + + if err != nil { + _span.RecordError(err) + _span.SetAttributes( + attribute.String("event", "error"), + attribute.String("message", err.Error()), + ) + } + + _span.End() + }() + return _d.LinodeClient.GetObjectStorageBucketAccess(ctx, clusterOrRegionID, label) +} + // GetObjectStorageKey implements clients.LinodeClient func (_d LinodeClientWithTracing) GetObjectStorageKey(ctx context.Context, keyID int) (op1 *linodego.ObjectStorageKey, err error) { ctx, _span := tracing.Start(ctx, "clients.LinodeClient.GetObjectStorageKey") @@ -1325,6 +1351,32 @@ func (_d LinodeClientWithTracing) UpdateInstanceConfig(ctx context.Context, lino return _d.LinodeClient.UpdateInstanceConfig(ctx, linodeID, configID, opts) } +// UpdateObjectStorageBucketAccess implements clients.LinodeClient +func (_d LinodeClientWithTracing) UpdateObjectStorageBucketAccess(ctx context.Context, clusterOrRegionID string, label string, opts linodego.ObjectStorageBucketUpdateAccessOptions) (err error) { + ctx, _span := tracing.Start(ctx, "clients.LinodeClient.UpdateObjectStorageBucketAccess") + defer func() { + if _d._spanDecorator != nil { + _d._spanDecorator(_span, map[string]interface{}{ + "ctx": ctx, + "clusterOrRegionID": clusterOrRegionID, + "label": label, + "opts": opts}, map[string]interface{}{ + "err": err}) + } + + if err != nil { + _span.RecordError(err) + _span.SetAttributes( + attribute.String("event", "error"), + attribute.String("message", err.Error()), + ) + } + + _span.End() + }() + return _d.LinodeClient.UpdateObjectStorageBucketAccess(ctx, clusterOrRegionID, label, opts) +} + // UpdatePlacementGroup implements clients.LinodeClient func (_d LinodeClientWithTracing) UpdatePlacementGroup(ctx context.Context, id int, options linodego.PlacementGroupUpdateOptions) (pp1 *linodego.PlacementGroup, err error) { ctx, _span := tracing.Start(ctx, "clients.LinodeClient.UpdatePlacementGroup") diff --git a/scripts/pre-kubeadminit.sh b/scripts/pre-kubeadminit.sh index 0c9aa2693..434f6abb4 100644 --- a/scripts/pre-kubeadminit.sh +++ b/scripts/pre-kubeadminit.sh @@ -1,6 +1,8 @@ #!/bin/bash set -euo pipefail - +CONTAINERD_VERSION='1.7.20' +CNI_PLUGIN_VERSIONS='1.5.1' +# setup containerd config mkdir -p -m 755 /etc/containerd cat > /etc/containerd/config.toml << EOF version = 2 @@ -38,6 +40,38 @@ modprobe overlay modprobe br_netfilter sysctl --system +# containerd service +cat > /usr/lib/systemd/system/containerd.service << EOF +[Unit] +Description=containerd container runtime +Documentation=https://containerd.io +After=network.target local-fs.target + +[Service] +ExecStartPre=-/sbin/modprobe overlay +ExecStart=/usr/local/bin/containerd + +Type=notify +Delegate=yes +KillMode=process +Restart=always +RestartSec=5 + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity + +# Comment TasksMax if your systemd version does not supports it. +# Only systemd 226 and above support this version. +TasksMax=infinity +OOMScoreAdjust=-999 + +[Install] +WantedBy=multi-user.target +EOF + +# kubelet service cat > /usr/lib/systemd/system/kubelet.service << EOF [Unit] Description=kubelet: The Kubernetes Node Agent @@ -72,15 +106,39 @@ EOF sed -i '/swap/d' /etc/fstab swapoff -a - +# check for required tools and only install missing tools +REQUIRED_TOOLS=(runc socat conntrack ethtool iptables) +INSTALL_TOOLS=() +for tool in ${REQUIRED_TOOLS[*]}; do + echo "checking for ${tool}" + if [ ! -x "$(command -v ${tool})" ]; then + echo "${tool} is missing" + INSTALL_TOOLS+=(${tool}) + fi +done export DEBIAN_FRONTEND=noninteractive apt-get update -y -apt-get install -y containerd socat conntrack +apt-get install -y ${INSTALL_TOOLS[*]} + +# install containerd +curl -L "https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz" | tar -C /usr/local -xz + +# install cni plugins +mkdir -p /opt/cni/bin +curl -L "https://github.com/containernetworking/plugins/releases/download/v${CNI_PLUGIN_VERSIONS}/cni-plugins-linux-amd64-v${CNI_PLUGIN_VERSIONS}.tgz" | tar -C /opt/cni/bin -xz +chown -R root:root /opt/cni PATCH_VERSION=${1#[v]} VERSION=${PATCH_VERSION%.*} + +# install crictl curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/v${VERSION}.0/crictl-v${VERSION}.0-linux-amd64.tar.gz" | tar -C /usr/local/bin -xz + +# install kubeadm,kubelet,kubectl cd /usr/local/bin curl -L --remote-name-all https://dl.k8s.io/release/$1/bin/linux/amd64/{kubeadm,kubelet} curl -LO "https://dl.k8s.io/release/v${VERSION}.0/bin/linux/amd64/kubectl" chmod +x {kubeadm,kubelet,kubectl} +# reload systemd to pick up containerd & kubelet settings +systemctl daemon-reload +systemctl enable --now containerd kubelet diff --git a/templates/addons/etcd-backup-restore/etcd-backup-restore.yaml b/templates/addons/etcd-backup-restore/etcd-backup-restore.yaml index 67ef342a5..c47d572c9 100644 --- a/templates/addons/etcd-backup-restore/etcd-backup-restore.yaml +++ b/templates/addons/etcd-backup-restore/etcd-backup-restore.yaml @@ -62,12 +62,12 @@ data: valueFrom: secretKeyRef: name: ${CLUSTER_NAME}-etcd-backup-obj-key - key: "access_key_rw" + key: "access_key" - name: "AWS_SECRET_ACCESS_KEY" valueFrom: secretKeyRef: name: ${CLUSTER_NAME}-etcd-backup-obj-key - key: "secret_key_rw" + key: "secret_key" - name: "AWS_SSE_CUSTOMER_KEY" valueFrom: secretKeyRef: @@ -94,7 +94,7 @@ data: - --cacert=${CERTPATH}/etcd/${CACERTFILE} - --cert=${CERTPATH}/etcd/${CERTFILE} - --key=${CERTPATH}/etcd/${KEYFILE} - image: ${ETCDBR_IMAGE:-europe-docker.pkg.dev/gardener-project/releases/gardener/etcdbrctl:v0.28.0} + image: ${ETCDBR_IMAGE:-europe-docker.pkg.dev/gardener-project/releases/gardener/etcdbrctl:v0.29.0} securityContext: allowPrivilegeEscalation: false runAsUser: 0 diff --git a/templates/flavors/k3s/default/k3sConfigTemplate.yaml b/templates/flavors/k3s/default/k3sConfigTemplate.yaml index d17fe247f..e8ed97ad6 100644 --- a/templates/flavors/k3s/default/k3sConfigTemplate.yaml +++ b/templates/flavors/k3s/default/k3sConfigTemplate.yaml @@ -1,5 +1,5 @@ --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KThreesConfigTemplate metadata: name: ${CLUSTER_NAME}-md-0 diff --git a/templates/flavors/k3s/default/k3sControlPlane.yaml b/templates/flavors/k3s/default/k3sControlPlane.yaml index 7a9c45857..b40372f11 100644 --- a/templates/flavors/k3s/default/k3sControlPlane.yaml +++ b/templates/flavors/k3s/default/k3sControlPlane.yaml @@ -1,13 +1,14 @@ --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KThreesControlPlane metadata: name: ${CLUSTER_NAME}-control-plane spec: - infrastructureTemplate: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 - kind: LinodeMachineTemplate - name: ${CLUSTER_NAME}-control-plane + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 + kind: LinodeMachineTemplate + name: ${CLUSTER_NAME}-control-plane kthreesConfigSpec: files: - path: /etc/rancher/k3s/config.yaml.d/capi-config.yaml diff --git a/templates/flavors/k3s/dual-stack/kustomization.yaml b/templates/flavors/k3s/dual-stack/kustomization.yaml index f35388750..e1e69db0e 100644 --- a/templates/flavors/k3s/dual-stack/kustomization.yaml +++ b/templates/flavors/k3s/dual-stack/kustomization.yaml @@ -25,10 +25,10 @@ patches: - fd03::/108 - target: group: controlplane.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesControlPlane patch: |- - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KThreesControlPlane metadata: name: ${CLUSTER_NAME}-control-plane @@ -41,7 +41,7 @@ patches: serviceCidr: "10.96.0.0/12,fd03::/108" - target: group: controlplane.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesControlPlane patch: |- - op: replace @@ -81,7 +81,7 @@ patches: enabled: true - target: group: controlplane.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesControlPlane patch: |- - op: replace @@ -95,7 +95,7 @@ patches: - hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname - target: group: bootstrap.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesConfigTemplate patch: |- - op: replace diff --git a/templates/flavors/k3s/full-vpcless/kustomization.yaml b/templates/flavors/k3s/full-vpcless/kustomization.yaml index c6955104d..c7610c58a 100644 --- a/templates/flavors/k3s/full-vpcless/kustomization.yaml +++ b/templates/flavors/k3s/full-vpcless/kustomization.yaml @@ -57,10 +57,10 @@ patches: - fd03::/108 - target: group: controlplane.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesControlPlane patch: |- - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KThreesControlPlane metadata: name: ${CLUSTER_NAME}-control-plane @@ -73,7 +73,7 @@ patches: serviceCidr: "10.96.0.0/12,fd03::/108" - target: group: controlplane.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesControlPlane patch: |- - op: replace @@ -137,7 +137,7 @@ patches: etcd-backup: "true" - target: group: controlplane.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesControlPlane patch: |- - op: replace @@ -151,7 +151,7 @@ patches: - hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname - target: group: bootstrap.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesConfigTemplate patch: |- - op: replace diff --git a/templates/flavors/k3s/vpcless/kustomization.yaml b/templates/flavors/k3s/vpcless/kustomization.yaml index f1ef8e4cb..e3b7d9162 100644 --- a/templates/flavors/k3s/vpcless/kustomization.yaml +++ b/templates/flavors/k3s/vpcless/kustomization.yaml @@ -34,7 +34,7 @@ patches: path: /spec/vpcRef - target: group: bootstrap.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesConfigTemplate patch: |- - op: replace @@ -48,7 +48,7 @@ patches: - hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname - target: group: controlplane.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesControlPlane patch: |- - op: replace @@ -62,7 +62,7 @@ patches: - hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname - target: group: controlplane.cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: KThreesControlPlane patch: |- - op: replace diff --git a/templates/flavors/kubeadm/default/kubeadmConfigTemplate.yaml b/templates/flavors/kubeadm/default/kubeadmConfigTemplate.yaml index 41dfeebc7..43e19dbcb 100644 --- a/templates/flavors/kubeadm/default/kubeadmConfigTemplate.yaml +++ b/templates/flavors/kubeadm/default/kubeadmConfigTemplate.yaml @@ -7,7 +7,7 @@ spec: template: spec: preKubeadmCommands: - - curl -fsSL https://raw.githubusercontent.com/linode/cluster-api-provider-linode/1981a4934753c10bfe9042c0b24ed4d02392ee0e/scripts/pre-kubeadminit.sh | bash -s ${KUBERNETES_VERSION} + - curl -fsSL https://github.com/linode/cluster-api-provider-linode/raw/dd76b1f979696ef22ce093d420cdbd0051a1d725/scripts/pre-kubeadminit.sh | bash -s ${KUBERNETES_VERSION} - hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname joinConfiguration: nodeRegistration: diff --git a/templates/flavors/kubeadm/default/kubeadmControlPlane.yaml b/templates/flavors/kubeadm/default/kubeadmControlPlane.yaml index 9de72b19d..c9689068c 100644 --- a/templates/flavors/kubeadm/default/kubeadmControlPlane.yaml +++ b/templates/flavors/kubeadm/default/kubeadmControlPlane.yaml @@ -12,7 +12,7 @@ spec: name: ${CLUSTER_NAME}-control-plane kubeadmConfigSpec: preKubeadmCommands: - - curl -fsSL https://raw.githubusercontent.com/linode/cluster-api-provider-linode/1981a4934753c10bfe9042c0b24ed4d02392ee0e/scripts/pre-kubeadminit.sh | bash -s ${KUBERNETES_VERSION} + - curl -fsSL https://github.com/linode/cluster-api-provider-linode/raw/dd76b1f979696ef22ce093d420cdbd0051a1d725/scripts/pre-kubeadminit.sh | bash -s ${KUBERNETES_VERSION} - hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname clusterConfiguration: etcd: diff --git a/util/helpers.go b/util/helpers.go index 76cf480f6..cc81cb0cf 100644 --- a/util/helpers.go +++ b/util/helpers.go @@ -5,6 +5,8 @@ import ( "io" "net/http" "os" + "strconv" + "strings" "github.com/linode/linodego" ) @@ -46,3 +48,16 @@ func IsRetryableError(err error) bool { http.StatusServiceUnavailable, linodego.ErrorFromError) || errors.Is(err, http.ErrHandlerTimeout) || errors.Is(err, os.ErrDeadlineExceeded) || errors.Is(err, io.ErrUnexpectedEOF) } + +// GetInstanceID determines the instance ID from the ProviderID +func GetInstanceID(providerID *string) (int, error) { + if providerID == nil { + err := errors.New("nil ProviderID") + return -1, err + } + instanceID, err := strconv.Atoi(strings.TrimPrefix(*providerID, "linode://")) + if err != nil { + return -1, err + } + return instanceID, nil +} diff --git a/util/helpers_test.go b/util/helpers_test.go index de084ffc8..7cc58fa85 100644 --- a/util/helpers_test.go +++ b/util/helpers_test.go @@ -89,3 +89,41 @@ func TestIsRetryableError(t *testing.T) { }) } } + +func TestGetInstanceID(t *testing.T) { + t.Parallel() + tests := []struct { + name string + providerID *string + wantErr bool + wantID int + }{{ + name: "nil", + providerID: nil, + wantErr: true, + wantID: -1, + }, { + name: "invalid provider ID", + providerID: Pointer("linode://foobar"), + wantErr: true, + wantID: -1, + }, { + name: "valid", + providerID: Pointer("linode://12345"), + wantErr: false, + wantID: 12345, + }} + for _, tt := range tests { + testcase := tt + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + gotID, err := GetInstanceID(testcase.providerID) + if testcase.wantErr && err == nil { + t.Errorf("wanted %v, got %v", testcase.wantErr, err) + } + if gotID != testcase.wantID { + t.Errorf("wanted %v, got %v", testcase.wantID, gotID) + } + }) + } +} diff --git a/util/reconciler/defaults.go b/util/reconciler/defaults.go index c3079c624..cb087474b 100644 --- a/util/reconciler/defaults.go +++ b/util/reconciler/defaults.go @@ -31,7 +31,7 @@ const ( // DefaultMachineControllerLinodeImage default image. DefaultMachineControllerLinodeImage = "linode/ubuntu22.04" // DefaultMachineControllerWaitForRunningDelay is the default requeue delay if instance is not running. - DefaultMachineControllerWaitForRunningDelay = 5 * time.Second + DefaultMachineControllerWaitForRunningDelay = 15 * time.Second // DefaultMachineControllerWaitForPreflightTimeout is the default timeout during the preflight phase. DefaultMachineControllerWaitForPreflightTimeout = 5 * time.Minute // DefaultMachineControllerWaitForRunningTimeout is the default timeout if instance is not running.