diff --git a/PROJECT b/PROJECT index 6b1308ab3..6724d41c0 100644 --- a/PROJECT +++ b/PROJECT @@ -153,4 +153,16 @@ resources: conversion: true validation: true webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: infrastructure + kind: LinodeObjectStorageKey + path: github.com/linode/cluster-api-provider-linode/api/v1alpha2 + version: v1alpha2 + webhooks: + validation: true + webhookVersion: v1 version: "3" diff --git a/api/v1alpha2/linodeobjectstoragekey_types.go b/api/v1alpha2/linodeobjectstoragekey_types.go new file mode 100644 index 000000000..83473120d --- /dev/null +++ b/api/v1alpha2/linodeobjectstoragekey_types.go @@ -0,0 +1,129 @@ +/* +Copyright 2023 Akamai Technologies, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +const ( + // ObjectStorageKeyFinalizer allows ReconcileLinodeObjectStorageKey to clean up Linode resources associated + // with LinodeObjectStorageKey before removing it from the apiserver. + ObjectStorageKeyFinalizer = "linodeobjectstoragekey.infrastructure.cluster.x-k8s.io" +) + +type BucketAccessRef struct { + BucketName string `json:"bucketName"` + Permissions string `json:"permissions"` + Region string `json:"region"` +} + +// LinodeObjectStorageKeySpec defines the desired state of LinodeObjectStorageKey +type LinodeObjectStorageKeySpec struct { + // BucketAccess is the list of object storage bucket labels which can be accessed using the key + // +kubebuilder:validation:MinItems=1 + BucketAccess []BucketAccessRef `json:"bucketAccess"` + + // CredentialsRef is a reference to a Secret that contains the credentials to use for generating access keys. + // If not supplied then the credentials of the controller will be used. + // +optional + CredentialsRef *corev1.SecretReference `json:"credentialsRef"` + + // KeyGeneration may be modified to trigger a rotation of the access key. + // +kubebuilder:default=0 + KeyGeneration int `json:"keyGeneration"` + + // SecretType instructs the controller what type of secret to generate containing access key details. + // +kubebuilder:validation:Enum=Opaque;addons.cluster.x-k8s.io/resource-set + // +kubebuilder:default=Opaque + // +optional + SecretType corev1.SecretType `json:"secretType,omitempty"` +} + +// LinodeObjectStorageKeyStatus defines the observed state of LinodeObjectStorageKey +type LinodeObjectStorageKeyStatus struct { + // Ready denotes that the key has been provisioned. + // +optional + // +kubebuilder:default=false + Ready bool `json:"ready"` + + // FailureMessage will be set in the event that there is a terminal problem + // reconciling the Object Storage Key and will contain a verbose string + // suitable for logging and human consumption. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + + // Conditions specify the service state of the LinodeObjectStorageKey. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` + + // CreationTime specifies the creation timestamp for the secret. + // +optional + CreationTime *metav1.Time `json:"creationTime,omitempty"` + + // LastKeyGeneration tracks the last known value of .spec.keyGeneration. + // +optional + LastKeyGeneration *int `json:"lastKeyGeneration,omitempty"` + + // SecretName specifies the name of the Secret containing access key data. + // +optional + SecretName *string `json:"secretName,omitempty"` + + // AccessKeyRef stores the ID for Object Storage key provisioned. + // +optional + AccessKeyRef *int `json:"accessKeyRef,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=linodeobjectstoragekeys,scope=Namespaced,categories=cluster-api,shortName=lobjkey +// +kubebuilder:subresource:status +// +kubebuilder:metadata:labels="clusterctl.cluster.x-k8s.io/move-hierarchy=true" +// +kubebuilder:printcolumn:name="ID",type="string",JSONPath=".status.accessKeyRef",description="The ID assigned to the access key" +// +kubebuilder:printcolumn:name="Secret",type="string",JSONPath=".status.secretName",description="The name of the Secret containing access key data" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Whether the access key is synced in the Linode API" + +// LinodeObjectStorageKey is the Schema for the linodeobjectstoragekeys API +type LinodeObjectStorageKey struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LinodeObjectStorageKeySpec `json:"spec,omitempty"` + Status LinodeObjectStorageKeyStatus `json:"status,omitempty"` +} + +func (b *LinodeObjectStorageKey) GetConditions() clusterv1.Conditions { + return b.Status.Conditions +} + +func (b *LinodeObjectStorageKey) SetConditions(conditions clusterv1.Conditions) { + b.Status.Conditions = conditions +} + +// +kubebuilder:object:root=true + +// LinodeObjectStorageKeyList contains a list of LinodeObjectStorageKey +type LinodeObjectStorageKeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinodeObjectStorageKey `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LinodeObjectStorageKey{}, &LinodeObjectStorageKeyList{}) +} diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go index fa9e821bc..5b2c01ed7 100644 --- a/api/v1alpha2/zz_generated.deepcopy.go +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -28,6 +28,21 @@ import ( "sigs.k8s.io/cluster-api/errors" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketAccessRef) DeepCopyInto(out *BucketAccessRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketAccessRef. +func (in *BucketAccessRef) DeepCopy() *BucketAccessRef { + if in == nil { + return nil + } + out := new(BucketAccessRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceConfigInterfaceCreateOptions) DeepCopyInto(out *InstanceConfigInterfaceCreateOptions) { *out = *in @@ -746,6 +761,136 @@ func (in *LinodeObjectStorageBucketStatus) DeepCopy() *LinodeObjectStorageBucket return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeObjectStorageKey) DeepCopyInto(out *LinodeObjectStorageKey) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeObjectStorageKey. +func (in *LinodeObjectStorageKey) DeepCopy() *LinodeObjectStorageKey { + if in == nil { + return nil + } + out := new(LinodeObjectStorageKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinodeObjectStorageKey) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeObjectStorageKeyList) DeepCopyInto(out *LinodeObjectStorageKeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinodeObjectStorageKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeObjectStorageKeyList. +func (in *LinodeObjectStorageKeyList) DeepCopy() *LinodeObjectStorageKeyList { + if in == nil { + return nil + } + out := new(LinodeObjectStorageKeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinodeObjectStorageKeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeObjectStorageKeySpec) DeepCopyInto(out *LinodeObjectStorageKeySpec) { + *out = *in + if in.BucketAccess != nil { + in, out := &in.BucketAccess, &out.BucketAccess + *out = make([]BucketAccessRef, len(*in)) + copy(*out, *in) + } + if in.CredentialsRef != nil { + in, out := &in.CredentialsRef, &out.CredentialsRef + *out = new(v1.SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeObjectStorageKeySpec. +func (in *LinodeObjectStorageKeySpec) DeepCopy() *LinodeObjectStorageKeySpec { + if in == nil { + return nil + } + out := new(LinodeObjectStorageKeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeObjectStorageKeyStatus) DeepCopyInto(out *LinodeObjectStorageKeyStatus) { + *out = *in + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + if in.LastKeyGeneration != nil { + in, out := &in.LastKeyGeneration, &out.LastKeyGeneration + *out = new(int) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.AccessKeyRef != nil { + in, out := &in.AccessKeyRef, &out.AccessKeyRef + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeObjectStorageKeyStatus. +func (in *LinodeObjectStorageKeyStatus) DeepCopy() *LinodeObjectStorageKeyStatus { + if in == nil { + return nil + } + out := new(LinodeObjectStorageKeyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinodePlacementGroup) DeepCopyInto(out *LinodePlacementGroup) { *out = *in diff --git a/cloud/scope/object_storage_bucket.go b/cloud/scope/object_storage_bucket.go index 3ace48183..a444772c0 100644 --- a/cloud/scope/object_storage_bucket.go +++ b/cloud/scope/object_storage_bucket.go @@ -65,6 +65,7 @@ func validateObjectStorageBucketScopeParams(params ObjectStorageBucketScopeParam return nil } +//nolint:dupl // TODO: Remove fields related to key provisioning from the bucket resource. func NewObjectStorageBucketScope(ctx context.Context, apiKey string, params ObjectStorageBucketScopeParams) (*ObjectStorageBucketScope, error) { if err := validateObjectStorageBucketScopeParams(params); err != nil { return nil, err diff --git a/cloud/scope/object_storage_bucket_test.go b/cloud/scope/object_storage_bucket_test.go index e2ecb4e46..3c908ff3e 100644 --- a/cloud/scope/object_storage_bucket_test.go +++ b/cloud/scope/object_storage_bucket_test.go @@ -296,7 +296,7 @@ func TestObjectStorageBucketScopeMethods(t *testing.T) { } } -func TestGenerateKeySecret(t *testing.T) { +func TestGenerateKeySecretBucket(t *testing.T) { t.Parallel() tests := []struct { name string diff --git a/cloud/scope/object_storage_key.go b/cloud/scope/object_storage_key.go new file mode 100644 index 000000000..64ee44414 --- /dev/null +++ b/cloud/scope/object_storage_key.go @@ -0,0 +1,225 @@ +package scope + +import ( + "context" + "errors" + "fmt" + + "github.com/go-logr/logr" + "github.com/linode/linodego" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusteraddonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" + + . "github.com/linode/cluster-api-provider-linode/clients" +) + +type ObjectStorageKeyScopeParams struct { + Client K8sClient + Key *infrav1alpha2.LinodeObjectStorageKey + Logger *logr.Logger +} + +type ObjectStorageKeyScope struct { + Client K8sClient + Key *infrav1alpha2.LinodeObjectStorageKey + Logger logr.Logger + LinodeClient LinodeClient + PatchHelper *patch.Helper +} + +func validateObjectStorageKeyScopeParams(params ObjectStorageKeyScopeParams) error { + if params.Key == nil { + return errors.New("object storage key is required when creating an ObjectStorageKeyScope") + } + if params.Logger == nil { + return errors.New("logger is required when creating an ObjectStorageKeyScope") + } + + return nil +} + +//nolint:dupl // Temporary duplicate until key provisioning is removed from the bucket resource. +func NewObjectStorageKeyScope(ctx context.Context, apiKey string, params ObjectStorageKeyScopeParams) (*ObjectStorageKeyScope, error) { + if err := validateObjectStorageKeyScopeParams(params); err != nil { + return nil, err + } + + // Override the controller credentials with ones from the Cluster's Secret reference (if supplied). + if params.Key.Spec.CredentialsRef != nil { + // TODO: This key is hard-coded (for now) to match the externally-managed `manager-credentials` Secret. + apiToken, err := getCredentialDataFromRef(ctx, params.Client, *params.Key.Spec.CredentialsRef, params.Key.GetNamespace(), "apiToken") + if err != nil { + return nil, fmt.Errorf("credentials from secret ref: %w", err) + } + apiKey = string(apiToken) + } + linodeClient, err := CreateLinodeClient(apiKey, clientTimeout) + if err != nil { + return nil, fmt.Errorf("failed to create linode client: %w", err) + } + + patchHelper, err := patch.NewHelper(params.Key, params.Client) + if err != nil { + return nil, fmt.Errorf("failed to init patch helper: %w", err) + } + + return &ObjectStorageKeyScope{ + Client: params.Client, + Key: params.Key, + Logger: *params.Logger, + LinodeClient: linodeClient, + PatchHelper: patchHelper, + }, nil +} + +// PatchObject persists the object storage key configuration and status. +func (s *ObjectStorageKeyScope) PatchObject(ctx context.Context) error { + return s.PatchHelper.Patch(ctx, s.Key) +} + +// Close closes the current scope persisting the object storage key configuration and status. +func (s *ObjectStorageKeyScope) Close(ctx context.Context) error { + return s.PatchObject(ctx) +} + +// AddFinalizer adds a finalizer if not present and immediately patches the +// object to avoid any race conditions. +func (s *ObjectStorageKeyScope) AddFinalizer(ctx context.Context) error { + if controllerutil.AddFinalizer(s.Key, infrav1alpha2.ObjectStorageKeyFinalizer) { + return s.Close(ctx) + } + + return nil +} + +const ( + accessKeySecretNameTemplate = "%s-obj-key" + + ClusterResourceSetSecretFilename = "etcd-backup.yaml" + BucketKeySecret = `kind: Secret +apiVersion: v1 +metadata: + name: %s +stringData: + bucket_name: %s + bucket_region: %s + bucket_endpoint: %s + access_key: %s + secret_key: %s` +) + +var secretTypeExpectedKey = map[corev1.SecretType]string{ + corev1.SecretTypeOpaque: "access_key", + clusteraddonsv1.ClusterResourceSetSecretType: ClusterResourceSetSecretFilename, +} + +// GenerateKeySecret returns a secret suitable for submission to the Kubernetes API. +// The secret is expected to contain keys for accessing the bucket, as well as owner and controller references. +func (s *ObjectStorageKeyScope) GenerateKeySecret(ctx context.Context, key *linodego.ObjectStorageKey) (*corev1.Secret, error) { + if key == nil { + return nil, errors.New("expected non-nil object storage key") + } + + var secretStringData map[string]string + + secretName := fmt.Sprintf(accessKeySecretNameTemplate, s.Key.Name) + + // If the desired secret is of ClusterResourceSet type, encapsulate the secret. + // Bucket details are retrieved from the first referenced LinodeObjectStorageBucket in the access key. + if s.Key.Spec.SecretType == clusteraddonsv1.ClusterResourceSetSecretType { + // This should never run since the CRD has a validation marker to ensure bucketAccess has at least one item. + if len(s.Key.Spec.BucketAccess) == 0 { + return nil, fmt.Errorf("unable to generate %s; spec.bucketAccess must not be empty", clusteraddonsv1.ClusterResourceSetSecretType) + } + + bucketRef := s.Key.Spec.BucketAccess[0] + bucket, err := s.LinodeClient.GetObjectStorageBucket(ctx, bucketRef.Region, bucketRef.BucketName) + if err != nil { + return nil, fmt.Errorf("unable to generate %s; failed to get bucket: %w", clusteraddonsv1.ClusterResourceSetSecretType, err) + } + + secretStringData = map[string]string{ + ClusterResourceSetSecretFilename: fmt.Sprintf( + BucketKeySecret, + secretName, + bucket.Label, + bucket.Region, + bucket.Hostname, + key.AccessKey, + key.SecretKey, + ), + } + } else { + secretStringData = map[string]string{ + "access_key": key.AccessKey, + "secret_key": key.SecretKey, + } + } + + secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: s.Key.Namespace, + }, + Type: s.Key.Spec.SecretType, + StringData: secretStringData, + } + + scheme := s.Client.Scheme() + if err := controllerutil.SetOwnerReference(s.Key, &secret, scheme); err != nil { + return nil, fmt.Errorf("could not set owner ref on access key secret %s: %w", secretName, err) + } + if err := controllerutil.SetControllerReference(s.Key, &secret, scheme); err != nil { + return nil, fmt.Errorf("could not set controller ref on access key secret %s: %w", secretName, err) + } + + return &secret, nil +} + +func (s *ObjectStorageKeyScope) ShouldInitKey() bool { + return s.Key.Status.LastKeyGeneration == nil +} + +func (s *ObjectStorageKeyScope) ShouldRotateKey() bool { + return s.Key.Status.LastKeyGeneration != nil && + s.Key.Spec.KeyGeneration != *s.Key.Status.LastKeyGeneration +} + +func (s *ObjectStorageKeyScope) ShouldReconcileKeySecret(ctx context.Context) (bool, error) { + if s.Key.Status.SecretName == nil { + return false, nil + } + + secret := &corev1.Secret{} + key := client.ObjectKey{Namespace: s.Key.Namespace, Name: *s.Key.Status.SecretName} + err := s.Client.Get(ctx, key, secret) + if apierrors.IsNotFound(err) { + return true, nil + } + if err != nil { + return false, err + } + + // Identify an expected key in secret.Data for the desired secret type. + // If it is missing, we must recreate the secret since the secret.type field is immutable. + expectedKey, ok := secretTypeExpectedKey[s.Key.Spec.SecretType] + if !ok { + return false, errors.New("unsupported secret type configured in LinodeObjectStorageKey") + } + if _, ok := secret.Data[expectedKey]; !ok { + if err := s.Client.Delete(ctx, secret); err != nil { + return false, err + } + + return true, nil + } + + return false, nil +} diff --git a/cloud/scope/object_storage_key_test.go b/cloud/scope/object_storage_key_test.go new file mode 100644 index 000000000..b384eb4ff --- /dev/null +++ b/cloud/scope/object_storage_key_test.go @@ -0,0 +1,747 @@ +package scope + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/go-logr/logr" + "github.com/linode/linodego" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + clusteraddonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" + "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/linode/cluster-api-provider-linode/clients" +) + +func TestValidateObjectStorageKeyScopeParams(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + params ObjectStorageKeyScopeParams + expectedErr error + }{ + { + name: "valid", + params: ObjectStorageKeyScopeParams{ + Key: &infrav1alpha2.LinodeObjectStorageKey{}, + Logger: &logr.Logger{}, + }, + expectedErr: nil, + }, + { + name: "nil logger", + params: ObjectStorageKeyScopeParams{ + Key: &infrav1alpha2.LinodeObjectStorageKey{}, + Logger: nil, + }, + expectedErr: fmt.Errorf("logger is required when creating an ObjectStorageKeyScope"), + }, + + { + name: "nil key", + params: ObjectStorageKeyScopeParams{ + Key: nil, + Logger: &logr.Logger{}, + }, + expectedErr: fmt.Errorf("object storage key is required when creating an ObjectStorageKeyScope"), + }, + } + for _, tt := range tests { + testcase := tt + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + + err := validateObjectStorageKeyScopeParams(testcase.params) + if err != nil { + assert.EqualError(t, err, testcase.expectedErr.Error()) + } + }) + } +} + +func TestNewObjectStorageKeyScope(t *testing.T) { + t.Parallel() + + type args struct { + apiKey string + params ObjectStorageKeyScopeParams + } + tests := []struct { + name string + args args + expectedErr error + expects func(k8s *mock.MockK8sClient) + clientBuildFunc func(apiKey string) (LinodeClient, error) + }{ + { + name: "success", + args: args{ + apiKey: "apikey", + params: ObjectStorageKeyScopeParams{ + Key: &infrav1alpha2.LinodeObjectStorageKey{}, + Logger: &logr.Logger{}, + }, + }, + expectedErr: nil, + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + s := runtime.NewScheme() + infrav1alpha2.AddToScheme(s) + return s + }) + }, + }, + { + name: "with credentials from secret", + args: args{ + apiKey: "apikey", + params: ObjectStorageKeyScopeParams{ + Client: nil, + Key: &infrav1alpha2.LinodeObjectStorageKey{ + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + CredentialsRef: &corev1.SecretReference{ + Name: "example", + Namespace: "test", + }, + }, + }, + Logger: &logr.Logger{}, + }, + }, + expectedErr: nil, + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + s := runtime.NewScheme() + infrav1alpha2.AddToScheme(s) + return s + }) + k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, name types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { + cred := corev1.Secret{ + Data: map[string][]byte{ + "apiToken": []byte("example"), + }, + } + *obj = cred + return nil + }) + }, + }, + { + name: "empty params", + args: args{ + apiKey: "apikey", + params: ObjectStorageKeyScopeParams{}, + }, + expectedErr: fmt.Errorf("object storage key is required"), + expects: func(k8s *mock.MockK8sClient) {}, + }, + { + name: "patch newHelper fail", + args: args{ + apiKey: "apikey", + params: ObjectStorageKeyScopeParams{ + Client: nil, + Key: &infrav1alpha2.LinodeObjectStorageKey{}, + Logger: &logr.Logger{}, + }, + }, + expectedErr: fmt.Errorf("failed to init patch helper:"), + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT().Scheme().Return(runtime.NewScheme()) + }, + }, + { + name: "credentials from ref fail", + args: args{ + apiKey: "apikey", + params: ObjectStorageKeyScopeParams{ + Client: nil, + Key: &infrav1alpha2.LinodeObjectStorageKey{ + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + CredentialsRef: &corev1.SecretReference{ + Name: "example", + Namespace: "test", + }, + }, + }, + Logger: &logr.Logger{}, + }, + }, + expectedErr: fmt.Errorf("credentials from secret ref: get credentials secret test/example: failed to get secret"), + expects: func(mock *mock.MockK8sClient) { + mock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("failed to get secret")) + }, + }, + { + name: "empty apiKey", + args: args{ + apiKey: "", + params: ObjectStorageKeyScopeParams{ + Client: nil, + Key: &infrav1alpha2.LinodeObjectStorageKey{}, + Logger: &logr.Logger{}, + }, + }, + expectedErr: fmt.Errorf("failed to create linode client: missing Linode API key"), + expects: func(mock *mock.MockK8sClient) {}, + }, + } + for _, tt := range tests { + testcase := tt + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockK8sClient := mock.NewMockK8sClient(ctrl) + + testcase.expects(mockK8sClient) + + testcase.args.params.Client = mockK8sClient + + got, err := NewObjectStorageKeyScope(context.Background(), testcase.args.apiKey, testcase.args.params) + + if testcase.expectedErr != nil { + assert.ErrorContains(t, err, testcase.expectedErr.Error()) + } else { + assert.NotEmpty(t, got) + } + }) + } +} + +func TestObjectStrorageKeyAddFinalizer(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + Key *infrav1alpha2.LinodeObjectStorageKey + expects func(mock *mock.MockK8sClient) + }{ + { + name: "success", + Key: &infrav1alpha2.LinodeObjectStorageKey{}, + expects: func(mock *mock.MockK8sClient) { + mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + s := runtime.NewScheme() + infrav1alpha2.AddToScheme(s) + return s + }).Times(2) + mock.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + }, + { + name: "fail", + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{infrav1alpha2.ObjectStorageKeyFinalizer}, + }, + }, + expects: func(mock *mock.MockK8sClient) { + mock.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + s := runtime.NewScheme() + infrav1alpha2.AddToScheme(s) + return s + }).Times(1) + }, + }, + } + for _, tt := range tests { + testcase := tt + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockK8sClient := mock.NewMockK8sClient(ctrl) + + testcase.expects(mockK8sClient) + + keyScope, err := NewObjectStorageKeyScope( + context.Background(), + "test-key", + ObjectStorageKeyScopeParams{ + Client: mockK8sClient, + Key: testcase.Key, + Logger: &logr.Logger{}, + }) + if err != nil { + t.Errorf("NewObjectStorageBucketScope() error = %v", err) + } + + if err := keyScope.AddFinalizer(context.Background()); err != nil { + t.Errorf("ClusterScope.AddFinalizer() error = %v", err) + } + + if keyScope.Key.Finalizers[0] != infrav1alpha2.ObjectStorageKeyFinalizer { + t.Errorf("Finalizer was not added") + } + }) + } +} + +func TestGenerateKeySecret(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + Key *infrav1alpha2.LinodeObjectStorageKey + key *linodego.ObjectStorageKey + expectedErr error + expectK8s func(*mock.MockK8sClient) + expectLinode func(*mock.MockLinodeClient) + }{ + { + name: "opaque secret", + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "test-namespace", + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("test-bucket-obj-key"), + }, + }, + key: &linodego.ObjectStorageKey{ + ID: 1, + Label: "read_write", + SecretKey: "read_write_key", + AccessKey: "read_write_access_key", + Limited: false, + BucketAccess: &[]linodego.ObjectStorageKeyBucketAccess{ + { + BucketName: "bucket", + Region: "test-bucket", + Permissions: "read_write", + }, + }, + }, + expectK8s: func(mck *mock.MockK8sClient) { + mck.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + s := runtime.NewScheme() + infrav1alpha2.AddToScheme(s) + return s + }).Times(1) + }, + expectedErr: nil, + }, + { + name: "cluster resource-set", + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "test-namespace", + }, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + BucketAccess: []infrav1alpha2.BucketAccessRef{ + { + BucketName: "bucket", + Region: "test-bucket", + Permissions: "read_write", + }, + }, + SecretType: clusteraddonsv1.ClusterResourceSetSecretType, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("test-bucket-obj-key"), + }, + }, + key: &linodego.ObjectStorageKey{ + ID: 1, + Label: "read_write", + SecretKey: "read_write_key", + AccessKey: "read_write_access_key", + Limited: false, + BucketAccess: &[]linodego.ObjectStorageKeyBucketAccess{ + { + BucketName: "bucket", + Region: "test-bucket", + Permissions: "read_write", + }, + }, + }, + expectK8s: func(mck *mock.MockK8sClient) { + mck.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme { + s := runtime.NewScheme() + infrav1alpha2.AddToScheme(s) + return s + }).Times(1) + }, + expectLinode: func(mck *mock.MockLinodeClient) { + mck.EXPECT().GetObjectStorageBucket(gomock.Any(), "test-bucket", "bucket").Return(&linodego.ObjectStorageBucket{ + Label: "bucket", + Region: "us-ord", + Hostname: "hostname", + }, nil) + }, + expectedErr: nil, + }, + { + name: "cluster resource-set with empty buckets", + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "test-namespace", + }, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + SecretType: clusteraddonsv1.ClusterResourceSetSecretType, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("test-bucket-obj-key"), + }, + }, + key: &linodego.ObjectStorageKey{ + ID: 1, + Label: "read_write", + SecretKey: "read_write_key", + AccessKey: "read_write_access_key", + Limited: false, + BucketAccess: &[]linodego.ObjectStorageKeyBucketAccess{ + { + BucketName: "bucket", + Region: "test-bucket", + Permissions: "read_write", + }, + }, + }, + expectedErr: errors.New("unable to generate addons.cluster.x-k8s.io/resource-set; spec.bucketAccess must not be empty"), + }, + { + name: "missing key", + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "test-namespace", + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("test-bucket-obj-key"), + }, + }, + expectedErr: errors.New("expected non-nil object storage key"), + }, + { + name: "client scheme does not have infrav1alpha2", + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "test-namespace", + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("test-bucket-obj-key"), + }, + }, + key: &linodego.ObjectStorageKey{ + ID: 1, + Label: "read_write", + SecretKey: "read_write_key", + AccessKey: "read_write_access_key", + Limited: false, + BucketAccess: &[]linodego.ObjectStorageKeyBucketAccess{ + { + BucketName: "bucket", + Region: "test-bucket", + Permissions: "read_write", + }, + }, + }, + expectK8s: func(mock *mock.MockK8sClient) { + mock.EXPECT().Scheme().Return(runtime.NewScheme()) + }, + expectedErr: fmt.Errorf("could not set owner ref on access key secret"), + }, + } + for _, tt := range tests { + testcase := tt + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockK8sClient := mock.NewMockK8sClient(ctrl) + if testcase.expectK8s != nil { + testcase.expectK8s(mockK8sClient) + } + + mockLinodeClient := mock.NewMockLinodeClient(ctrl) + if testcase.expectLinode != nil { + testcase.expectLinode(mockLinodeClient) + } + + keyScope := &ObjectStorageKeyScope{ + Client: mockK8sClient, + LinodeClient: mockLinodeClient, + Key: testcase.Key, + } + + secret, err := keyScope.GenerateKeySecret(context.Background(), testcase.key) + if testcase.expectedErr != nil { + require.ErrorContains(t, err, testcase.expectedErr.Error()) + return + } else if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "LinodeObjectStorageKey", secret.OwnerReferences[0].Kind) + assert.True(t, *secret.OwnerReferences[0].Controller) + }) + } +} + +func TestShouldInitKey(t *testing.T) { + t.Parallel() + + assert.True(t, (&ObjectStorageKeyScope{ + Key: &infrav1alpha2.LinodeObjectStorageKey{ + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + LastKeyGeneration: nil, + }, + }, + }).ShouldInitKey()) +} + +func TestShouldRotateKey(t *testing.T) { + t.Parallel() + + assert.False(t, (&ObjectStorageKeyScope{ + Key: &infrav1alpha2.LinodeObjectStorageKey{ + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + LastKeyGeneration: nil, + }, + }, + }).ShouldRotateKey()) + + assert.False(t, (&ObjectStorageKeyScope{ + Key: &infrav1alpha2.LinodeObjectStorageKey{ + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + KeyGeneration: 0, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + LastKeyGeneration: ptr.To(0), + }, + }, + }).ShouldRotateKey()) + + assert.True(t, (&ObjectStorageKeyScope{ + Key: &infrav1alpha2.LinodeObjectStorageKey{ + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + KeyGeneration: 1, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + LastKeyGeneration: ptr.To(0), + }, + }, + }).ShouldRotateKey()) +} + +func TestShouldReconcileKeySecret(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + key *infrav1alpha2.LinodeObjectStorageKey + expects func(k8s *mock.MockK8sClient) + want bool + expectedErr error + }{ + { + name: "status has no secret name", + key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: nil, + }, + }, + want: false, + }, + { + name: "secret has expected key", + key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + }, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + SecretType: corev1.SecretTypeOpaque, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("secret"), + }, + }, + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + *obj = corev1.Secret{ + Data: map[string][]byte{ + "access_key": {}, + }, + } + return nil + }).AnyTimes() + }, + want: false, + }, + { + name: "secret is missing expected key", + key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + }, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + SecretType: corev1.SecretTypeOpaque, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("secret"), + }, + }, + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + *obj = corev1.Secret{ + Data: map[string][]byte{ + "not_access_key": {}, + }, + } + return nil + }).AnyTimes() + k8s.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + want: true, + }, + { + name: "secret is missing", + key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("secret"), + }, + }, + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT(). + Get(gomock.Any(), client.ObjectKey{Namespace: "ns", Name: "secret"}, gomock.Any()). + Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "secret")) + }, + want: true, + }, + { + name: "non-404 api error", + key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("secret"), + }, + }, + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT(). + Get(gomock.Any(), client.ObjectKey{Namespace: "ns", Name: "secret"}, gomock.Any()). + Return(errors.New("unexpected error")) + }, + want: false, + expectedErr: errors.New("unexpected error"), + }, + { + name: "unsupported secret type", + key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + }, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + SecretType: "unsupported secret type", + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("secret"), + }, + }, + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + *obj = corev1.Secret{ + Data: map[string][]byte{ + "not_access_key": {}, + }, + } + return nil + }).AnyTimes() + }, + want: false, + expectedErr: errors.New("unsupported secret type configured in LinodeObjectStorageKey"), + }, + { + name: "failed delete", + key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + }, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + SecretType: corev1.SecretTypeOpaque, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + SecretName: ptr.To("secret"), + }, + }, + expects: func(k8s *mock.MockK8sClient) { + k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + *obj = corev1.Secret{ + Data: map[string][]byte{ + "not_access_key": {}, + }, + } + return nil + }).AnyTimes() + k8s.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("failed delete")) + }, + want: false, + expectedErr: errors.New("failed delete"), + }, + } + for _, tt := range tests { + testcase := tt + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + + var mockClient *mock.MockK8sClient + if testcase.expects != nil { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClient = mock.NewMockK8sClient(ctrl) + testcase.expects(mockClient) + } + + keyScope := &ObjectStorageKeyScope{ + Client: mockClient, + Key: testcase.key, + } + + restore, err := keyScope.ShouldReconcileKeySecret(context.TODO()) + if testcase.expectedErr != nil { + require.ErrorContains(t, err, testcase.expectedErr.Error()) + } + + assert.Equal(t, testcase.want, restore) + }) + } +} diff --git a/cloud/services/object_storage_buckets.go b/cloud/services/object_storage_buckets.go index cb118ef82..dd86344d3 100644 --- a/cloud/services/object_storage_buckets.go +++ b/cloud/services/object_storage_buckets.go @@ -54,7 +54,7 @@ func RotateObjectStorageKeys(ctx context.Context, bScope *scope.ObjectStorageBuc {"read_only", "ro"}, } { keyLabel := fmt.Sprintf("%s-%s", bScope.Bucket.Name, permission.suffix) - key, err := createObjectStorageKey(ctx, bScope, keyLabel, permission.name) + key, err := createObjectStorageKeyForBucket(ctx, bScope, keyLabel, permission.name) if err != nil { return newKeys, err } @@ -72,7 +72,7 @@ func RotateObjectStorageKeys(ctx context.Context, bScope *scope.ObjectStorageBuc return newKeys, nil } -func createObjectStorageKey(ctx context.Context, bScope *scope.ObjectStorageBucketScope, label, permission string) (*linodego.ObjectStorageKey, error) { +func createObjectStorageKeyForBucket(ctx context.Context, bScope *scope.ObjectStorageBucketScope, label, permission string) (*linodego.ObjectStorageKey, error) { opts := linodego.ObjectStorageKeyCreateOptions{ Label: label, BucketAccess: &[]linodego.ObjectStorageKeyBucketAccess{ diff --git a/cloud/services/object_storage_keys.go b/cloud/services/object_storage_keys.go new file mode 100644 index 000000000..0dba8bedc --- /dev/null +++ b/cloud/services/object_storage_keys.go @@ -0,0 +1,75 @@ +package services + +import ( + "context" + "fmt" + "net/http" + + "github.com/linode/linodego" + + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/util" +) + +func RotateObjectStorageKey(ctx context.Context, keyScope *scope.ObjectStorageKeyScope) (*linodego.ObjectStorageKey, error) { + key, err := createObjectStorageKey(ctx, keyScope) + if err != nil { + return nil, err + } + + // If key revocation is necessary and fails, just log the error since the new key has been created + if !keyScope.ShouldInitKey() && keyScope.ShouldRotateKey() { + if err := RevokeObjectStorageKey(ctx, keyScope); err != nil { + keyScope.Logger.Error(err, "Failed to revoke access key; key must be manually revoked") + } + } + + return key, nil +} + +func createObjectStorageKey(ctx context.Context, keyScope *scope.ObjectStorageKeyScope) (*linodego.ObjectStorageKey, error) { + bucketAccess := make([]linodego.ObjectStorageKeyBucketAccess, len(keyScope.Key.Spec.BucketAccess)) + for idx, bucket := range keyScope.Key.Spec.BucketAccess { + bucketAccess[idx] = linodego.ObjectStorageKeyBucketAccess{ + Region: bucket.Region, + BucketName: bucket.BucketName, + Permissions: bucket.Permissions, + } + } + opts := linodego.ObjectStorageKeyCreateOptions{ + Label: keyScope.Key.Name, + BucketAccess: &bucketAccess, + } + + key, err := keyScope.LinodeClient.CreateObjectStorageKey(ctx, opts) + if err != nil { + keyScope.Logger.Error(err, "Failed to create access key", "label", opts.Label) + + return nil, fmt.Errorf("failed to create access key: %w", err) + } + + keyScope.Logger.Info("Created access key", "id", key.ID) + + return key, nil +} + +func RevokeObjectStorageKey(ctx context.Context, keyScope *scope.ObjectStorageKeyScope) error { + err := keyScope.LinodeClient.DeleteObjectStorageKey(ctx, *keyScope.Key.Status.AccessKeyRef) + if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { + keyScope.Logger.Error(err, "Failed to revoke access key", "id", *keyScope.Key.Status.AccessKeyRef) + return fmt.Errorf("failed to revoke access key: %w", err) + } + + keyScope.Logger.Info("Revoked access key", "id", *keyScope.Key.Status.AccessKeyRef) + + return nil +} + +func GetObjectStorageKey(ctx context.Context, keyScope *scope.ObjectStorageKeyScope) (*linodego.ObjectStorageKey, error) { + key, err := keyScope.LinodeClient.GetObjectStorageKey(ctx, *keyScope.Key.Status.AccessKeyRef) + if err != nil { + return nil, err + } + + return key, nil +} diff --git a/cloud/services/object_storage_keys_test.go b/cloud/services/object_storage_keys_test.go new file mode 100644 index 000000000..957b06cca --- /dev/null +++ b/cloud/services/object_storage_keys_test.go @@ -0,0 +1,160 @@ +package services + +import ( + "bytes" + "context" + "errors" + "testing" + + "github.com/linode/linodego" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" +) + +func TestRotateObjectStorageKey(t *testing.T) { + t.Parallel() + + NewSuite(t, mock.MockLinodeClient{}).Run( + OneOf( + Path(Call("create key", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().CreateObjectStorageKey(ctx, gomock.Any()).Return(&linodego.ObjectStorageKey{ID: 1, Label: "key"}, nil) + })), + Path( + Call("create key fail", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().CreateObjectStorageKey(ctx, gomock.Any()).Return(nil, errors.New("unable to create")) + }), + Result("error", func(ctx context.Context, mck Mock) { + _, err := RotateObjectStorageKey(ctx, &scope.ObjectStorageKeyScope{ + LinodeClient: mck.LinodeClient, + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{Name: "key"}, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + BucketAccess: []infrav1alpha2.BucketAccessRef{ + { + BucketName: "mybucket", + Region: "us-ord", + Permissions: "read_write", + }, + }, + }, + }, + }) + assert.ErrorContains(t, err, "unable to create") + }), + ), + ), + OneOf( + Path(Result("rotate not needed", func(ctx context.Context, mck Mock) { + key, err := RotateObjectStorageKey(ctx, &scope.ObjectStorageKeyScope{ + LinodeClient: mck.LinodeClient, + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{Name: "key"}, + }, + }) + require.NoError(t, err) + assert.Equal(t, 1, key.ID) + assert.Equal(t, "key", key.Label) + })), + Path( + Call("delete old key", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().DeleteObjectStorageKey(ctx, 0).Return(nil) + }), + Result("success", func(ctx context.Context, mck Mock) { + key, err := RotateObjectStorageKey(ctx, &scope.ObjectStorageKeyScope{ + LinodeClient: mck.LinodeClient, + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{Name: "key"}, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + KeyGeneration: 1, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + LastKeyGeneration: ptr.To(0), + AccessKeyRef: ptr.To(0), + }, + }, + }) + require.NoError(t, err) + assert.Equal(t, 1, key.ID) + }), + ), + Path( + Call("delete old key fail", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().DeleteObjectStorageKey(ctx, 0).Return(errors.New("fail")) + }), + Result("error logged", func(ctx context.Context, mck Mock) { + logs := &bytes.Buffer{} + + key, err := RotateObjectStorageKey(ctx, &scope.ObjectStorageKeyScope{ + LinodeClient: mck.LinodeClient, + Logger: zap.New(zap.WriteTo(logs)), + Key: &infrav1alpha2.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{Name: "key"}, + Spec: infrav1alpha2.LinodeObjectStorageKeySpec{ + KeyGeneration: 1, + }, + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + LastKeyGeneration: ptr.To(0), + AccessKeyRef: ptr.To(0), + }, + }, + }) + require.NoError(t, err) + assert.Equal(t, 1, key.ID) + + assert.Contains(t, logs.String(), "Failed to revoke access key; key must be manually revoked") + }), + ), + ), + ) +} + +func TestGetObjectStorageKey(t *testing.T) { + t.Parallel() + + key := infrav1alpha2.LinodeObjectStorageKey{ + Status: infrav1alpha2.LinodeObjectStorageKeyStatus{ + AccessKeyRef: ptr.To(0), + }, + } + + NewSuite(t, mock.MockLinodeClient{}).Run( + OneOf( + Path( + Call("get key", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageKey(ctx, gomock.Any()).Return(&linodego.ObjectStorageKey{ID: 0, Label: "key"}, nil) + }), + Result("success", func(ctx context.Context, mck Mock) { + resp, err := GetObjectStorageKey(ctx, &scope.ObjectStorageKeyScope{ + LinodeClient: mck.LinodeClient, + Key: &key, + }) + require.NoError(t, err) + assert.Equal(t, 0, resp.ID) + assert.Equal(t, "key", resp.Label) + }), + ), + Path( + Call("get key fail", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageKey(ctx, gomock.Any()).Return(nil, errors.New("fail")) + }), + Result("error", func(ctx context.Context, mck Mock) { + _, err := GetObjectStorageKey(ctx, &scope.ObjectStorageKeyScope{ + LinodeClient: mck.LinodeClient, + Key: &key, + }) + assert.ErrorContains(t, err, "fail") + }), + ), + ), + ) +} diff --git a/cmd/main.go b/cmd/main.go index 5828ed28d..ded7b3e81 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -84,6 +84,7 @@ func main() { machineWatchFilter string clusterWatchFilter string objectStorageBucketWatchFilter string + objectStorageKeyWatchFilter string metricsAddr string enableLeaderElection bool probeAddr string @@ -215,9 +216,22 @@ func main() { os.Exit(1) } + if err = (&controller.LinodeObjectStorageKeyReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Logger: ctrl.Log.WithName("LinodeObjectStorageKeyReconciler"), + Recorder: mgr.GetEventRecorderFor("LinodeObjectStorageKeyReconciler"), + WatchFilterValue: objectStorageKeyWatchFilter, + LinodeApiKey: linodeToken, + }).SetupWithManager(mgr, crcontroller.Options{MaxConcurrentReconciles: linodeObjectStorageBucketConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "LinodeObjectStorageKey") + os.Exit(1) + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { setupWebhooks(mgr) } + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragekeys.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragekeys.yaml new file mode 100644 index 000000000..b7851fcbd --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragekeys.yaml @@ -0,0 +1,191 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: linodeobjectstoragekeys.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: LinodeObjectStorageKey + listKind: LinodeObjectStorageKeyList + plural: linodeobjectstoragekeys + shortNames: + - lobjkey + singular: linodeobjectstoragekey + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The ID assigned to the access key + jsonPath: .status.accessKeyRef + name: ID + type: string + - description: The name of the Secret containing access key data + jsonPath: .status.secretName + name: Secret + type: string + - description: Whether the access key is synced in the Linode API + jsonPath: .status.ready + name: Ready + type: string + name: v1alpha2 + schema: + openAPIV3Schema: + description: LinodeObjectStorageKey is the Schema for the linodeobjectstoragekeys + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinodeObjectStorageKeySpec defines the desired state of LinodeObjectStorageKey + properties: + bucketAccess: + description: BucketAccess is the list of object storage bucket labels + which can be accessed using the key + items: + properties: + bucketName: + type: string + permissions: + type: string + region: + type: string + required: + - bucketName + - permissions + - region + type: object + minItems: 1 + type: array + credentialsRef: + description: |- + CredentialsRef is a reference to a Secret that contains the credentials to use for generating access keys. + If not supplied then the credentials of the controller will be used. + properties: + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + keyGeneration: + default: 0 + description: KeyGeneration may be modified to trigger a rotation of + the access key. + type: integer + secretType: + default: Opaque + description: SecretType instructs the controller what type of secret + to generate containing access key details. + enum: + - Opaque + - addons.cluster.x-k8s.io/resource-set + type: string + required: + - bucketAccess + - keyGeneration + type: object + status: + description: LinodeObjectStorageKeyStatus defines the observed state of + LinodeObjectStorageKey + properties: + accessKeyRef: + description: AccessKeyRef stores the ID for Object Storage key provisioned. + type: integer + conditions: + description: Conditions specify the service state of the LinodeObjectStorageKey. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + A human readable message indicating details about the transition. + This field may be empty. + type: string + reason: + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. + type: string + severity: + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + creationTime: + description: CreationTime specifies the creation timestamp for the + secret. + format: date-time + type: string + failureMessage: + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Object Storage Key and will contain a verbose string + suitable for logging and human consumption. + type: string + lastKeyGeneration: + description: LastKeyGeneration tracks the last known value of .spec.keyGeneration. + type: integer + ready: + default: false + description: Ready denotes that the key has been provisioned. + type: boolean + secretName: + description: SecretName specifies the name of the Secret containing + access key data. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c1fd8a61e..2581e62d4 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -16,6 +16,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_linodevpcs.yaml - bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml - bases/infrastructure.cluster.x-k8s.io_linodeplacementgroups.yaml +- bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragekeys.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/config/default/manager_image_patch.yaml-e b/config/default/manager_image_patch.yaml-e new file mode 100644 index 000000000..c2ad88b8d --- /dev/null +++ b/config/default/manager_image_patch.yaml-e @@ -0,0 +1,12 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - image: docker.io/linode/cluster-api-provider-linode:main + name: manager diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index d822a1f4c..1b2b6dae3 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -20,6 +20,8 @@ resources: # default, aiding admins in cluster management. Those roles are # not used by the Project itself. You can comment the following lines # if you do not want those helpers be installed with your Project. +- linodeobjectstoragekey_editor_role.yaml +- linodeobjectstoragekey_viewer_role.yaml - linodeobjectstoragebucket_editor_role.yaml - linodeobjectstoragebucket_viewer_role.yaml - linodemachinetemplate_editor_role.yaml @@ -30,4 +32,3 @@ resources: - linodevpc_viewer_role.yaml - linodemachine_editor_role.yaml - linodemachine_viewer_role.yaml - diff --git a/config/rbac/linodeobjectstoragekey_editor_role.yaml b/config/rbac/linodeobjectstoragekey_editor_role.yaml new file mode 100644 index 000000000..0747893e9 --- /dev/null +++ b/config/rbac/linodeobjectstoragekey_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit linodeobjectstoragekeys. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-linode + app.kubernetes.io/managed-by: kustomize + name: linodeobjectstoragekey-editor-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodeobjectstoragekeys + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodeobjectstoragekeys/status + verbs: + - get diff --git a/config/rbac/linodeobjectstoragekey_viewer_role.yaml b/config/rbac/linodeobjectstoragekey_viewer_role.yaml new file mode 100644 index 000000000..504702afe --- /dev/null +++ b/config/rbac/linodeobjectstoragekey_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view linodeobjectstoragekeys. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-linode + app.kubernetes.io/managed-by: kustomize + name: linodeobjectstoragekey-viewer-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodeobjectstoragekeys + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodeobjectstoragekeys/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 8832438e6..6c237485d 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -21,6 +21,7 @@ rules: - secrets verbs: - create + - delete - get - list - patch @@ -120,6 +121,32 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodeobjectstoragekeys + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodeobjectstoragekeys/finalizers + verbs: + - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodeobjectstoragekeys/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/config/samples/infrastructure_v1alpha2_linodeobjectstoragekey.yaml b/config/samples/infrastructure_v1alpha2_linodeobjectstoragekey.yaml new file mode 100644 index 000000000..cdc726a4a --- /dev/null +++ b/config/samples/infrastructure_v1alpha2_linodeobjectstoragekey.yaml @@ -0,0 +1,12 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: LinodeObjectStorageKey +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-linode + app.kubernetes.io/managed-by: kustomize + name: linodeobjectstoragekey-sample +spec: + bucketAccess: + - bucketName: linodeobjectstoragebucket-sample + permissions: read_only + region: us-ord diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index db283218e..da20039ff 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -7,6 +7,7 @@ resources: - infrastructure_v1alpha1_linodevpc.yaml - infrastructure_v1alpha1_linodeobjectstoragebucket.yaml - infrastructure_v1alpha2_linodecluster.yaml +- infrastructure_v1alpha1_linodeobjectstoragekey.yaml - infrastructure_v1alpha2_linodemachine.yaml - infrastructure_v1alpha2_linodevpc.yaml - infrastructure_v1alpha2_linodeobjectstoragebucket.yaml diff --git a/controller/linodeobjectstoragebucket_controller.go b/controller/linodeobjectstoragebucket_controller.go index 15ab1eb31..68d79be36 100644 --- a/controller/linodeobjectstoragebucket_controller.go +++ b/controller/linodeobjectstoragebucket_controller.go @@ -109,6 +109,7 @@ func (r *LinodeObjectStorageBucketReconciler) Reconcile(ctx context.Context, req return r.reconcile(ctx, bScope) } +//nolint:dupl // This follows the pattern used for the LinodeObjectStorageKey controller. func (r *LinodeObjectStorageBucketReconciler) reconcile(ctx context.Context, bScope *scope.ObjectStorageBucketScope) (res ctrl.Result, reterr error) { // Always close the scope when exiting this function so we can persist any LinodeObjectStorageBucket changes. defer func() { @@ -260,6 +261,8 @@ func (r *LinodeObjectStorageBucketReconciler) reconcileDelete(ctx context.Contex } // SetupWithManager sets up the controller with the Manager. +// +//nolint:dupl // This follows the pattern used for the LinodeObjectStorageBucket controller. func (r *LinodeObjectStorageBucketReconciler) SetupWithManager(mgr ctrl.Manager, options crcontroller.Options) error { linodeObjectStorageBucketMapper, err := kutil.ClusterToTypedObjectsMapper( r.TracedClient(), diff --git a/controller/linodeobjectstoragekey_controller.go b/controller/linodeobjectstoragekey_controller.go new file mode 100644 index 000000000..12afd5ce4 --- /dev/null +++ b/controller/linodeobjectstoragekey_controller.go @@ -0,0 +1,297 @@ +/* +Copyright 2023 Akamai Technologies, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/linode/linodego" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + kutil "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + crcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/cloud/services" + wrappedruntimeclient "github.com/linode/cluster-api-provider-linode/observability/wrappers/runtimeclient" + wrappedruntimereconciler "github.com/linode/cluster-api-provider-linode/observability/wrappers/runtimereconciler" + "github.com/linode/cluster-api-provider-linode/util" + "github.com/linode/cluster-api-provider-linode/util/reconciler" +) + +// LinodeObjectStorageKeyReconciler reconciles a LinodeObjectStorageKey object +type LinodeObjectStorageKeyReconciler struct { + client.Client + Logger logr.Logger + Recorder record.EventRecorder + LinodeApiKey string + WatchFilterValue string + Scheme *runtime.Scheme + ReconcileTimeout time.Duration +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeobjectstoragekeys,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeobjectstoragekeys/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeobjectstoragekeys/finalizers,verbs=update + +// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the LinodeObjectStorageKey object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.2/pkg/reconcile +func (r *LinodeObjectStorageKeyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) + defer cancel() + + logger := r.Logger.WithValues("name", req.NamespacedName.String()) + + tracedClient := r.TracedClient() + + objectStorageKey := &infrav1alpha2.LinodeObjectStorageKey{} + if err := tracedClient.Get(ctx, req.NamespacedName, objectStorageKey); err != nil { + if err = client.IgnoreNotFound(err); err != nil { + logger.Error(err, "Failed to fetch LinodeObjectStorageKey", "name", req.NamespacedName.String()) + } + + return ctrl.Result{}, err + } + + keyScope, err := scope.NewObjectStorageKeyScope( + ctx, + r.LinodeApiKey, + scope.ObjectStorageKeyScopeParams{ + Client: tracedClient, + Key: objectStorageKey, + Logger: &logger, + }, + ) + if err != nil { + logger.Error(err, "Failed to create object storage key scope") + + return ctrl.Result{}, fmt.Errorf("failed to create object storage key scope: %w", err) + } + + return r.reconcile(ctx, keyScope) +} + +//nolint:dupl // This follows the pattern used for the LinodeObjectStorageBucket controller. +func (r *LinodeObjectStorageKeyReconciler) reconcile(ctx context.Context, keyScope *scope.ObjectStorageKeyScope) (res ctrl.Result, reterr error) { + // Always close the scope when exiting this function so we can persist any LinodeObjectStorageKey changes. + defer func() { + // Filter out any IsNotFound message since client.IgnoreNotFound does not handle aggregate errors + if err := keyScope.Close(ctx); utilerrors.FilterOut(err, apierrors.IsNotFound) != nil && reterr == nil { + keyScope.Logger.Error(err, "failed to patch LinodeObjectStorageKey") + reterr = err + } + }() + + if !keyScope.Key.DeletionTimestamp.IsZero() { + return res, r.reconcileDelete(ctx, keyScope) + } + + if err := keyScope.AddFinalizer(ctx); err != nil { + return res, err + } + + if err := r.reconcileApply(ctx, keyScope); err != nil { + return res, err + } + + return res, nil +} + +func (r *LinodeObjectStorageKeyReconciler) setFailure(keyScope *scope.ObjectStorageKeyScope, err error) { + keyScope.Key.Status.FailureMessage = util.Pointer(err.Error()) + r.Recorder.Event(keyScope.Key, corev1.EventTypeWarning, "Failed", err.Error()) + conditions.MarkFalse(keyScope.Key, clusterv1.ReadyCondition, "Failed", clusterv1.ConditionSeverityError, "%s", err.Error()) +} + +func (r *LinodeObjectStorageKeyReconciler) reconcileApply(ctx context.Context, keyScope *scope.ObjectStorageKeyScope) error { + keyScope.Logger.Info("Reconciling apply") + + keyScope.Key.Status.Ready = false + + var keyForSecret *linodego.ObjectStorageKey + + switch { + // If no access key exists or key rotation is requested, make a new key + case keyScope.ShouldInitKey(), keyScope.ShouldRotateKey(): + key, err := services.RotateObjectStorageKey(ctx, keyScope) + if err != nil { + keyScope.Logger.Error(err, "Failed to provision new access key") + r.setFailure(keyScope, err) + + return err + } + + keyScope.Key.Status.AccessKeyRef = &key.ID + keyForSecret = key + + if keyScope.Key.Status.LastKeyGeneration == nil { + keyScope.Key.Status.CreationTime = &metav1.Time{Time: time.Now()} + } + + r.Recorder.Event(keyScope.Key, corev1.EventTypeNormal, "KeyAssigned", "Object storage key assigned") + + // Ensure the generated secret still exists + case keyScope.Key.Status.AccessKeyRef != nil: + ok, err := keyScope.ShouldReconcileKeySecret(ctx) + if err != nil { + keyScope.Logger.Error(err, "Failed check for access key secret") + r.setFailure(keyScope, err) + + return err + } + + if ok { + key, err := services.GetObjectStorageKey(ctx, keyScope) + if err != nil { + keyScope.Logger.Error(err, "Failed to restore access key for modified/deleted secret") + r.setFailure(keyScope, err) + + return err + } + + keyForSecret = key + + r.Recorder.Event(keyScope.Key, corev1.EventTypeNormal, "KeyRetrieved", "Object storage key retrieved") + } + } + + if keyForSecret != nil { + secret, err := keyScope.GenerateKeySecret(ctx, keyForSecret) + if err != nil { + keyScope.Logger.Error(err, "Failed to generate key secret") + r.setFailure(keyScope, err) + + return err + } + + emptySecret := &corev1.Secret{ObjectMeta: secret.ObjectMeta} + operation, err := controllerutil.CreateOrUpdate(ctx, keyScope.Client, emptySecret, func() error { + emptySecret.Type = keyScope.Key.Spec.SecretType + emptySecret.StringData = secret.StringData + emptySecret.Data = nil + + return nil + }) + if err != nil { + keyScope.Logger.Error(err, "Failed to apply key secret") + r.setFailure(keyScope, err) + + return err + } + + keyScope.Key.Status.SecretName = util.Pointer(secret.Name) + + keyScope.Logger.Info(fmt.Sprintf("Secret %s was %s with access key", secret.Name, operation)) + r.Recorder.Event(keyScope.Key, corev1.EventTypeNormal, "KeyStored", "Object storage key stored in secret") + } + + keyScope.Key.Status.LastKeyGeneration = &keyScope.Key.Spec.KeyGeneration + keyScope.Key.Status.Ready = true + + conditions.MarkTrue(keyScope.Key, clusterv1.ReadyCondition) + r.Recorder.Event(keyScope.Key, corev1.EventTypeNormal, "Synced", "Object storage key synced") + + return nil +} + +func (r *LinodeObjectStorageKeyReconciler) reconcileDelete(ctx context.Context, keyScope *scope.ObjectStorageKeyScope) error { + keyScope.Logger.Info("Reconciling delete") + + if err := services.RevokeObjectStorageKey(ctx, keyScope); err != nil { + keyScope.Logger.Error(err, "failed to revoke access key; key must be manually revoked") + r.setFailure(keyScope, err) + + return err + } + + r.Recorder.Event(keyScope.Key, clusterv1.DeletedReason, "KeyRevoked", "Object storage key revoked") + + if !controllerutil.RemoveFinalizer(keyScope.Key, infrav1alpha2.ObjectStorageKeyFinalizer) { + err := errors.New("failed to remove finalizer from key; unable to delete") + keyScope.Logger.Error(err, "controllerutil.RemoveFinalizer") + r.setFailure(keyScope, err) + + return err + } + // TODO: remove this check and removal later + if controllerutil.ContainsFinalizer(keyScope.Key, infrav1alpha2.GroupVersion.String()) { + controllerutil.RemoveFinalizer(keyScope.Key, infrav1alpha2.GroupVersion.String()) + } + + return nil +} + +// SetupWithManager sets up the controller with the Manager. +// +//nolint:dupl // This follows the pattern used for the LinodeObjectStorageBucket controller. +func (r *LinodeObjectStorageKeyReconciler) SetupWithManager(mgr ctrl.Manager, options crcontroller.Options) error { + linodeObjectStorageKeyMapper, err := kutil.ClusterToTypedObjectsMapper(r.TracedClient(), &infrav1alpha2.LinodeObjectStorageKeyList{}, mgr.GetScheme()) + if err != nil { + return fmt.Errorf("failed to create mapper for LinodeObjectStorageKeys: %w", err) + } + + err = ctrl.NewControllerManagedBy(mgr). + For(&infrav1alpha2.LinodeObjectStorageKey{}). + WithOptions(options). + Owns(&corev1.Secret{}). + WithEventFilter(predicate.And( + predicates.ResourceHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue), + predicate.GenerationChangedPredicate{}, + )). + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(linodeObjectStorageKeyMapper), + builder.WithPredicates(predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger())), + ).Complete(wrappedruntimereconciler.NewRuntimeReconcilerWithTracing(r, wrappedruntimereconciler.DefaultDecorator())) + if err != nil { + return fmt.Errorf("failed to build controller: %w", err) + } + + return nil +} + +func (r *LinodeObjectStorageKeyReconciler) TracedClient() client.Client { + return wrappedruntimeclient.NewRuntimeClientWithTracing(r.Client, wrappedruntimeclient.DefaultDecorator()) +} diff --git a/controller/linodeobjectstoragekey_controller_test.go b/controller/linodeobjectstoragekey_controller_test.go new file mode 100644 index 000000000..0d40f4cad --- /dev/null +++ b/controller/linodeobjectstoragekey_controller_test.go @@ -0,0 +1,537 @@ +/* +Copyright 2023 Akamai Technologies, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + + "github.com/linode/linodego" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusteraddonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("lifecycle", Ordered, Label("key", "lifecycle"), func() { + suite := NewControllerSuite(GinkgoT(), mock.MockLinodeClient{}) + + key := infrav1.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Name: "lifecycle", + Namespace: "default", + }, + Spec: infrav1.LinodeObjectStorageKeySpec{ + BucketAccess: []infrav1.BucketAccessRef{ + { + BucketName: "mybucket", + Permissions: "read_only", + Region: "us-ord", + }, + }, + }, + } + + keyScope := scope.ObjectStorageKeyScope{ + Key: &key, + } + + reconciler := LinodeObjectStorageKeyReconciler{} + + BeforeAll(func(ctx SpecContext) { + keyScope.Client = k8sClient + Expect(k8sClient.Create(ctx, &key)).To(Succeed()) + }) + + suite.BeforeEach(func(ctx context.Context, mck Mock) { + reconciler.Recorder = mck.Recorder() + keyScope.Logger = mck.Logger() + + objectKey := client.ObjectKeyFromObject(&key) + Expect(k8sClient.Get(ctx, objectKey, &key)).To(Succeed()) + + // Create patch helper with latest state of resource. + // This is only needed when relying on envtest's k8sClient. + patchHelper, err := patch.NewHelper(&key, k8sClient) + Expect(err).NotTo(HaveOccurred()) + keyScope.PatchHelper = patchHelper + }) + + suite.Run( + OneOf( + Path( + Call("key is not created", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("create key error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("create key error")) + }), + ), + Path( + Call("key is created", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().CreateObjectStorageKey(ctx, gomock.Any()). + Return(&linodego.ObjectStorageKey{ + ID: 1, + AccessKey: "access-key-1", + SecretKey: "secret-key-1", + }, nil) + }), + Result("resources are updated", func(ctx context.Context, mck Mock) { + objectKey := client.ObjectKeyFromObject(&key) + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err).NotTo(HaveOccurred()) + + By("status") + Expect(k8sClient.Get(ctx, objectKey, &key)).To(Succeed()) + Expect(key.Status.Ready).To(BeTrue()) + Expect(key.Status.Conditions).To(HaveLen(1)) + Expect(key.Status.Conditions[0].Type).To(Equal(clusterv1.ReadyCondition)) + Expect(key.Status.CreationTime).NotTo(BeNil()) + Expect(*key.Status.LastKeyGeneration).To(Equal(key.Spec.KeyGeneration)) + Expect(*key.Status.LastKeyGeneration).To(Equal(0)) + Expect(*key.Status.AccessKeyRef).To(Equal(1)) + + By("secret") + var secret corev1.Secret + secretKey := client.ObjectKey{Namespace: "default", Name: *key.Status.SecretName} + Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) + Expect(secret.Data).To(HaveLen(2)) + Expect(string(secret.Data["access_key"])).To(Equal("access-key-1")) + Expect(string(secret.Data["secret_key"])).To(Equal("secret-key-1")) + + events := mck.Events() + Expect(events).To(ContainSubstring("Object storage key assigned")) + Expect(events).To(ContainSubstring("Object storage key stored in secret")) + Expect(events).To(ContainSubstring("Object storage key synced")) + + logOutput := mck.Logs() + Expect(logOutput).To(ContainSubstring("Reconciling apply")) + Expect(logOutput).To(ContainSubstring("Secret %s was created with access key", *key.Status.SecretName)) + }), + ), + ), + Call("keyGeneration is modified", func(ctx context.Context, _ Mock) { + key.Spec.KeyGeneration = 1 + Expect(k8sClient.Update(ctx, &key)).To(Succeed()) + }), + OneOf( + Path( + Call("key is not rotated", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("rotate key error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("rotate key error")) + }), + ), + Path( + Call("key is rotated", func(ctx context.Context, mck Mock) { + createCall := mck.LinodeClient.EXPECT().CreateObjectStorageKey(gomock.Any(), gomock.Any()). + Return(&linodego.ObjectStorageKey{ + ID: 2, + AccessKey: "access-key-2", + SecretKey: "secret-key-2", + }, nil) + mck.LinodeClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 1).After(createCall).Return(nil) + }), + Result("resources are updated", func(ctx context.Context, mck Mock) { + objectKey := client.ObjectKeyFromObject(&key) + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err).NotTo(HaveOccurred()) + + By("status") + Expect(k8sClient.Get(ctx, objectKey, &key)).To(Succeed()) + Expect(*key.Status.LastKeyGeneration).To(Equal(1)) + Expect(*key.Status.AccessKeyRef).To(Equal(2)) + + By("secret") + var secret corev1.Secret + secretKey := client.ObjectKey{Namespace: "default", Name: *key.Status.SecretName} + Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) + Expect(secret.Data).To(HaveLen(2)) + Expect(string(secret.Data["access_key"])).To(Equal("access-key-2")) + Expect(string(secret.Data["secret_key"])).To(Equal("secret-key-2")) + + events := mck.Events() + Expect(events).To(ContainSubstring("Object storage key assigned")) + Expect(events).To(ContainSubstring("Object storage key stored in secret")) + Expect(events).To(ContainSubstring("Object storage key synced")) + + logOutput := mck.Logs() + Expect(logOutput).To(ContainSubstring("Reconciling apply")) + Expect(logOutput).To(ContainSubstring("Secret %s was updated with access key", *key.Status.SecretName)) + }), + ), + ), + Once("secret is deleted", func(ctx context.Context, _ Mock) { + var secret corev1.Secret + secretKey := client.ObjectKey{Namespace: "default", Name: *key.Status.SecretName} + Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) + Expect(k8sClient.Delete(ctx, &secret)).To(Succeed()) + }), + OneOf( + Path( + Call("(secret is deleted) > key is not retrieved", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageKey(gomock.Any(), 2).Return(nil, errors.New("get key error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("get key error")) + }), + ), + Path( + Call("(secret is deleted) > key is retrieved", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageKey(gomock.Any(), 2). + Return(&linodego.ObjectStorageKey{ + ID: 2, + AccessKey: "access-key-2", + SecretKey: "secret-key-2", + }, nil) + }), + Result("secret is recreated", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err).NotTo(HaveOccurred()) + + var secret corev1.Secret + secretKey := client.ObjectKey{Namespace: "default", Name: *key.Status.SecretName} + Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) + Expect(secret.Data).To(HaveLen(2)) + Expect(string(secret.Data["access_key"])).To(Equal("access-key-2")) + Expect(string(secret.Data["secret_key"])).To(Equal("secret-key-2")) + + events := mck.Events() + Expect(events).To(ContainSubstring("Object storage key retrieved")) + Expect(events).To(ContainSubstring("Object storage key stored in secret")) + Expect(events).To(ContainSubstring("Object storage key synced")) + + logOutput := mck.Logs() + Expect(logOutput).To(ContainSubstring("Reconciling apply")) + Expect(logOutput).To(ContainSubstring("Secret %s was created with access key", *key.Status.SecretName)) + }), + ), + ), + Once("secretType set to cluster resource set", func(ctx context.Context, _ Mock) { + key.Spec.SecretType = clusteraddonsv1.ClusterResourceSetSecretType + Expect(k8sClient.Update(ctx, &key)).To(Succeed()) + }), + OneOf( + Path( + Call("(secretType set to cluster resource set) > key is not retrieved", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageKey(gomock.Any(), 2).Return(nil, errors.New("get key error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("get key error")) + }), + ), + Path( + Call("(secretType set to cluster resource set) > key is retrieved", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageKey(gomock.Any(), 2). + Return(&linodego.ObjectStorageKey{ + ID: 2, + AccessKey: "access-key-2", + SecretKey: "secret-key-2", + }, nil) + }), + OneOf( + Path( + Call("bucket is not retrieved", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageBucket(gomock.Any(), "us-ord", "mybucket").Return(nil, errors.New("get bucket error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("get bucket error")) + }), + ), + Path( + Call("bucket is retrieved", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageBucket(gomock.Any(), "us-ord", "mybucket").Return(&linodego.ObjectStorageBucket{ + Label: "mybucket", + Region: "us-ord", + Hostname: "mybucket.us-ord-1.linodeobjects.com", + }, nil) + }), + Result("secret is recreated as cluster resource set type", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err).NotTo(HaveOccurred()) + + var secret corev1.Secret + secretKey := client.ObjectKey{Namespace: "default", Name: *key.Status.SecretName} + Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed()) + Expect(secret.Data).To(HaveLen(1)) + Expect(string(secret.Data[scope.ClusterResourceSetSecretFilename])).To(Equal(fmt.Sprintf(scope.BucketKeySecret, + *key.Status.SecretName, + "mybucket", + "us-ord", + "mybucket.us-ord-1.linodeobjects.com", + "access-key-2", + "secret-key-2", + ))) + + events := mck.Events() + Expect(events).To(ContainSubstring("Object storage key retrieved")) + Expect(events).To(ContainSubstring("Object storage key stored in secret")) + Expect(events).To(ContainSubstring("Object storage key synced")) + + logOutput := mck.Logs() + Expect(logOutput).To(ContainSubstring("Reconciling apply")) + Expect(logOutput).To(ContainSubstring("Secret %s was created with access key", *key.Status.SecretName)) + }), + ), + ), + ), + ), + Once("resource is deleted", func(ctx context.Context, _ Mock) { + // nb: client.Delete does not set DeletionTimestamp on the object, so re-fetch from the apiserver. + objectKey := client.ObjectKeyFromObject(&key) + Expect(k8sClient.Delete(ctx, &key)).To(Succeed()) + Expect(k8sClient.Get(ctx, objectKey, &key)).To(Succeed()) + }), + OneOf( + Path( + Call("(resource is deleted) > key is not revoked", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 2).Return(errors.New("revoke key error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("revoke key error")) + }), + ), + Path( + Call("(resource is deleted) > key is revoked", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), 2).Return(nil) + }), + Result("finalizer is removed, resource is not found", func(ctx context.Context, mck Mock) { + objectKey := client.ObjectKeyFromObject(&key) + k8sClient.Get(ctx, objectKey, &key) + keyScope.LinodeClient = mck.LinodeClient + _, err := reconciler.reconcile(ctx, &keyScope) + Expect(err).NotTo(HaveOccurred()) + Expect(apierrors.IsNotFound(k8sClient.Get(ctx, objectKey, &key))).To(BeTrue()) + + events := mck.Events() + Expect(events).To(ContainSubstring("Object storage key revoked")) + + logOutput := mck.Logs() + Expect(logOutput).To(ContainSubstring("Reconciling delete")) + }), + ), + ), + ) +}) + +var _ = Describe("errors", Label("key", "errors"), func() { + suite := NewControllerSuite( + GinkgoT(), + mock.MockLinodeClient{}, + mock.MockK8sClient{}, + ) + + reconciler := LinodeObjectStorageKeyReconciler{} + keyScope := scope.ObjectStorageKeyScope{} + + suite.BeforeEach(func(_ context.Context, mck Mock) { + reconciler.Recorder = mck.Recorder() + keyScope.Logger = mck.Logger() + + // Reset obj to base state to be modified in each test path. + // We can use a consistent name since these tests are stateless. + keyScope.Key = &infrav1.LinodeObjectStorageKey{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mock", + Namespace: "default", + }, + Spec: infrav1.LinodeObjectStorageKeySpec{ + BucketAccess: []infrav1.BucketAccessRef{ + { + BucketName: "mybucket", + Permissions: "read_only", + Region: "us-ord", + }, + }, + }, + } + }) + + suite.Run( + OneOf( + Path(Call("resource can be fetched", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + })), + Path( + Call("resource is not found", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, "mock")) + }), + Result("no error", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(keyScope.Key), + }) + Expect(err).NotTo(HaveOccurred()) + }), + ), + Path( + Call("resource can't be fetched", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("non-404 error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + reconciler.Logger = keyScope.Logger + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(keyScope.Key), + }) + Expect(err.Error()).To(ContainSubstring("non-404 error")) + Expect(mck.Logs()).To(ContainSubstring("Failed to fetch LinodeObjectStorageKey")) + }), + ), + ), + Result("scope params is missing args", func(ctx context.Context, mck Mock) { + reconciler.Client = mck.K8sClient + reconciler.Logger = keyScope.Logger + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(keyScope.Key), + }) + Expect(err.Error()).To(ContainSubstring("failed to create object storage key scope")) + Expect(mck.Logs()).To(ContainSubstring("Failed to create object storage key scope")) + }), + Call("scheme with no infrav1alpha1", func(ctx context.Context, mck Mock) { + prev := mck.K8sClient.EXPECT().Scheme().Return(scheme.Scheme) + mck.K8sClient.EXPECT().Scheme().After(prev).Return(runtime.NewScheme()).Times(2) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.Client = mck.K8sClient + + patchHelper, err := patch.NewHelper(keyScope.Key, mck.K8sClient) + Expect(err).NotTo(HaveOccurred()) + keyScope.PatchHelper = patchHelper + + _, err = reconciler.reconcile(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("no kind is registered")) + }), + OneOf( + Path( + Call("failed check for deleted secret", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("api error")) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.Key.Spec.KeyGeneration = 1 + keyScope.Key.Status.LastKeyGeneration = ptr.To(keyScope.Key.Spec.KeyGeneration) + keyScope.Key.Status.SecretName = ptr.To("mock-obj-key") + keyScope.Key.Status.AccessKeyRef = ptr.To(1) + + keyScope.LinodeClient = mck.LinodeClient + keyScope.Client = mck.K8sClient + err := reconciler.reconcileApply(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("api error")) + Expect(mck.Events()).To(ContainSubstring("api error")) + Expect(mck.Logs()).To(ContainSubstring("Failed check for access key secret")) + }), + ), + Path(Call("secret deleted", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "mock-obj-key")) + })), + ), + Call("get key", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().GetObjectStorageKey(gomock.Any(), gomock.Any()).Return(&linodego.ObjectStorageKey{ID: 1}, nil) + }), + OneOf( + Path( + Call("secret resource creation fails", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().Return(scheme.Scheme).AnyTimes() + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{Resource: "Secret"}, "mock-obj-key")) + mck.K8sClient.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("secret creation error")) + }), + Result("creation error", func(ctx context.Context, mck Mock) { + keyScope.Key.Spec.KeyGeneration = 1 + keyScope.Key.Status.LastKeyGeneration = ptr.To(keyScope.Key.Spec.KeyGeneration) + keyScope.Key.Status.SecretName = ptr.To("mock-obj-key") + keyScope.Key.Status.AccessKeyRef = ptr.To(1) + + keyScope.LinodeClient = mck.LinodeClient + keyScope.Client = mck.K8sClient + err := reconciler.reconcileApply(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("secret creation error")) + Expect(mck.Events()).To(ContainSubstring("key retrieved")) + Expect(mck.Events()).To(ContainSubstring("secret creation error")) + Expect(mck.Logs()).To(ContainSubstring("Failed to apply key secret")) + }), + ), + Path( + Call("secret generation fails", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Scheme().Return(runtime.NewScheme()) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.Key.Spec.KeyGeneration = 1 + keyScope.Key.Status.LastKeyGeneration = ptr.To(keyScope.Key.Spec.KeyGeneration) + keyScope.Key.Status.SecretName = ptr.To("mock-obj-key") + keyScope.Key.Status.AccessKeyRef = ptr.To(1) + + keyScope.LinodeClient = mck.LinodeClient + keyScope.Client = mck.K8sClient + err := reconciler.reconcileApply(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("no kind is registered")) + Expect(mck.Events()).To(ContainSubstring("key retrieved")) + Expect(mck.Events()).To(ContainSubstring("no kind is registered")) + Expect(mck.Logs()).To(ContainSubstring("Failed to generate key secret")) + }), + ), + ), + Once("finalizer is missing", func(ctx context.Context, _ Mock) { + keyScope.Key.Status.AccessKeyRef = ptr.To(1) + keyScope.Key.ObjectMeta.Finalizers = []string{} + }), + Call("revoke key", func(ctx context.Context, mck Mock) { + mck.LinodeClient.EXPECT().DeleteObjectStorageKey(gomock.Any(), gomock.Any()).Return(nil) + }), + Result("error", func(ctx context.Context, mck Mock) { + keyScope.LinodeClient = mck.LinodeClient + keyScope.Client = mck.K8sClient + err := reconciler.reconcileDelete(ctx, &keyScope) + Expect(err.Error()).To(ContainSubstring("failed to remove finalizer from key")) + Expect(mck.Events()).To(ContainSubstring("failed to remove finalizer from key")) + }), + ) +}) diff --git a/docs/src/developers/testing.md b/docs/src/developers/testing.md index 59ec11e2c..33e08b9b8 100644 --- a/docs/src/developers/testing.md +++ b/docs/src/developers/testing.md @@ -236,6 +236,7 @@ There are other selectors you can use to invoke specfic tests. Please look at th | Linode Cluster Controller | `linodecluster` | | Linode Machine Controller | `linodemachine` | | Linode Obj Controller | `linodeobj` | +| Linode Obj Key Controller | `linodeobjkey` | | Linode VPC Controller | `linodevpc` | *Note: For any flavor e2e tests, please set the required env variables* diff --git a/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/assert-capi-resources.yaml b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/assert-capi-resources.yaml new file mode 100644 index 000000000..8a5b8dbab --- /dev/null +++ b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/assert-capi-resources.yaml @@ -0,0 +1,15 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: capi-controller-manager + namespace: capi-system +status: + availableReplicas: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: capl-controller-manager + namespace: capl-system +status: + availableReplicas: 1 diff --git a/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/assert-key-and-secret.yaml b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/assert-key-and-secret.yaml new file mode 100644 index 000000000..6629be44a --- /dev/null +++ b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/assert-key-and-secret.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: LinodeObjectStorageKey +metadata: + name: ($key) +spec: + bucketAccess: + - bucketName: ($key) + permissions: read_only + region: us-sea + keyGeneration: 0 +status: + ready: true + secretName: ($access_key_secret) + lastKeyGeneration: 0 +--- +apiVersion: v1 +kind: Secret +metadata: + name: ($access_key_secret) diff --git a/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/chainsaw-test.yaml b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/chainsaw-test.yaml new file mode 100755 index 000000000..1cc040cab --- /dev/null +++ b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/chainsaw-test.yaml @@ -0,0 +1,106 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: minimal-linodeobjectstoragekey + # Label to trigger the test on every PR + labels: + all: + quick: + linodeobjkey: +spec: + bindings: + # A short identifier for the E2E test run + - name: run + value: (join('-', ['e2e', 'min-obj', env('GIT_REF')])) + - name: key + # Format the key name into a valid Kubernetes object name + # TODO: This is over-truncated to account for the Kubernetes access key Secret + value: (trim((truncate(($run), `52`)), '-')) + - name: access_key_secret + value: (join('-', [($key), 'obj-key'])) + template: true + steps: + - name: Check if CAPI provider resources exist + try: + - assert: + file: assert-capi-resources.yaml + - name: Create bucket + try: + - script: + env: + - name: URI + value: object-storage/buckets + - name: BUCKET_LABEL + value: ($key) + content: | + set -e + + curl -s \ + -X POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -d "{\"label\":\"$BUCKET_LABEL\",\"region\":\"us-sea\"}" \ + "https://api.linode.com/v4/$URI" + check: + ($error): ~ + - name: Create LinodeObjectStorageKey + try: + - apply: + file: create-linodeobjectstoragekey.yaml + - assert: + file: assert-key-and-secret.yaml + catch: + - describe: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 + kind: LinodeObjectStorageKey + - describe: + apiVersion: v1 + kind: Secret + - name: Ensure the access key was created + try: + - script: + env: + - name: URI + value: object-storage/keys + - name: OBJ_KEY + value: ($key) + content: | + set -e + + export KEY_ID=$(kubectl -n $NAMESPACE get lobjkey $OBJ_KEY -ojson | jq '.status.accessKeyRef') + + curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/$URI/$KEY_ID" + check: + ($error): ~ + - name: Delete LinodeObjectStorageKey + try: + - delete: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 + kind: LinodeObjectStorageKey + name: ($key) + - name: Check if the LinodeObjectStorageKey and Secret were deleted + try: + - error: + file: check-key-and-secret-deletion.yaml + - name: Delete bucket + try: + - script: + env: + - name: URI + value: object-storage/buckets/us-sea + - name: BUCKET_LABEL + value: ($key) + content: | + set -e + + curl -s \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/$URI/$BUCKET_LABEL" + check: + ($error): ~ \ No newline at end of file diff --git a/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/check-key-and-secret-deletion.yaml b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/check-key-and-secret-deletion.yaml new file mode 100644 index 000000000..85f76cb62 --- /dev/null +++ b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/check-key-and-secret-deletion.yaml @@ -0,0 +1,9 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: LinodeObjectStorageKey +metadata: + name: ($key) +--- +apiVersion: v1 +kind: Secret +metadata: + name: ($access_key_secret) diff --git a/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/create-linodeobjectstoragekey.yaml b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/create-linodeobjectstoragekey.yaml new file mode 100644 index 000000000..c6e133b8f --- /dev/null +++ b/e2e/linodeobjectstoragekey-controller/minimal-linodeobjectstoragekey/create-linodeobjectstoragekey.yaml @@ -0,0 +1,9 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: LinodeObjectStorageKey +metadata: + name: ($key) +spec: + bucketAccess: + - bucketName: ($key) + permissions: read_only + region: us-sea