diff --git a/api/v1alpha2/linodecluster_types.go b/api/v1alpha2/linodecluster_types.go index 26eeeb18d..daa2f8b79 100644 --- a/api/v1alpha2/linodecluster_types.go +++ b/api/v1alpha2/linodecluster_types.go @@ -52,6 +52,11 @@ type LinodeClusterSpec struct { // NodeBalancerFirewallRef is a reference to a NodeBalancer Firewall object. This makes the linode use the specified NodeBalancer Firewall. NodeBalancerFirewallRef *corev1.ObjectReference `json:"nodeBalancerFirewallRef,omitempty"` + // ObjectStore defines a supporting Object Storage bucket for cluster operations. This is currently used for + // bootstrapping (e.g. Cloud-init). + // +optional + ObjectStore *ObjectStore `json:"objectStore,omitempty"` + // CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not // supplied then the credentials of the controller will be used. // +optional @@ -173,6 +178,21 @@ type LinodeNBPortConfig struct { NodeBalancerConfigID *int `json:"nodeBalancerConfigID,omitempty"` } +// ObjectStore defines a supporting Object Storage bucket for cluster operations. This is currently used for +// bootstrapping (e.g. Cloud-init). +type ObjectStore struct { + // PresignedURLDuration defines the duration for which presigned URLs are valid. + // + // This is used to generate presigned URLs for S3 Bucket objects, which are used by + // control-plane and worker nodes to fetch bootstrap data. + // + // +optional + PresignedURLDuration *metav1.Duration `json:"presignedURLDuration,omitempty"` + + // CredentialsRef is a reference to a Secret that contains the credentials to use for accessing the Cluster Object Store. + CredentialsRef corev1.SecretReference `json:"credentialsRef,omitempty"` +} + // +kubebuilder:object:root=true // LinodeClusterList contains a list of LinodeCluster diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go index 21dff695d..f50b2638b 100644 --- a/api/v1alpha2/zz_generated.deepcopy.go +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package v1alpha2 import ( "github.com/linode/linodego" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/errors" @@ -353,6 +354,11 @@ func (in *LinodeClusterSpec) DeepCopyInto(out *LinodeClusterSpec) { *out = new(v1.ObjectReference) **out = **in } + if in.ObjectStore != nil { + in, out := &in.ObjectStore, &out.ObjectStore + *out = new(ObjectStore) + (*in).DeepCopyInto(*out) + } if in.CredentialsRef != nil { in, out := &in.CredentialsRef, &out.CredentialsRef *out = new(v1.SecretReference) @@ -1486,6 +1492,27 @@ func (in *NetworkSpec) DeepCopy() *NetworkSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStore) DeepCopyInto(out *ObjectStore) { + *out = *in + if in.PresignedURLDuration != nil { + in, out := &in.PresignedURLDuration, &out.PresignedURLDuration + *out = new(metav1.Duration) + **out = **in + } + out.CredentialsRef = in.CredentialsRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStore. +func (in *ObjectStore) DeepCopy() *ObjectStore { + if in == nil { + return nil + } + out := new(ObjectStore) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPCIPv4) DeepCopyInto(out *VPCIPv4) { *out = *in diff --git a/cloud/scope/common.go b/cloud/scope/common.go index beb1b04e8..24f739afb 100644 --- a/cloud/scope/common.go +++ b/cloud/scope/common.go @@ -16,11 +16,16 @@ import ( "github.com/akamai/AkamaiOPEN-edgegrid-golang/v8/pkg/dns" "github.com/akamai/AkamaiOPEN-edgegrid-golang/v8/pkg/edgegrid" "github.com/akamai/AkamaiOPEN-edgegrid-golang/v8/pkg/session" + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/linode/linodego" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" "github.com/linode/cluster-api-provider-linode/observability/wrappers/linodeclient" "github.com/linode/cluster-api-provider-linode/version" @@ -33,6 +38,9 @@ const ( // MaxBodySize is the max payload size for Akamai edge dns client requests maxBody = 131072 + + // defaultObjectStorageSignedUrlExpiry is the default expiration for Object Storage signed URls + defaultObjectStorageSignedUrlExpiry = 15 * time.Minute ) type Option struct { @@ -104,6 +112,47 @@ func CreateLinodeClient(config ClientConfig, opts ...Option) (LinodeClient, erro ), nil } +func CreateS3Clients(ctx context.Context, crClient K8sClient, cluster infrav1alpha2.LinodeCluster) (S3Client, S3PresignClient, error) { + var ( + configOpts = []func(*awsconfig.LoadOptions) error{ + awsconfig.WithRegion("auto"), + } + + clientOpts = []func(*s3.Options){} + ) + + // If we have a cluster object store bucket, get its configuration. + if cluster.Spec.ObjectStore != nil { + secret, err := getCredentials(ctx, crClient, cluster.Spec.ObjectStore.CredentialsRef, cluster.GetNamespace()) + if err == nil { + var ( + access_key = string(secret.Data["access_key"]) + secret_key = string(secret.Data["secret_key"]) + s3_endpoint = string(secret.Data["s3_endpoint"]) + ) + + configOpts = append(configOpts, awsconfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(access_key, secret_key, ""))) + clientOpts = append(clientOpts, func(opts *s3.Options) { + opts.BaseEndpoint = aws.String(s3_endpoint) + }) + } + } + + config, err := awsconfig.LoadDefaultConfig(ctx, configOpts...) + if err != nil { + return nil, nil, fmt.Errorf("load s3 config: %w", err) + } + + var ( + s3Client = s3.NewFromConfig(config, clientOpts...) + s3PresignClient = s3.NewPresignClient(s3Client, func(opts *s3.PresignOptions) { + opts.Expires = defaultObjectStorageSignedUrlExpiry + }) + ) + + return s3Client, s3PresignClient, nil +} + func setUpEdgeDNSInterface() (dnsInterface dns.DNS, err error) { edgeRCConfig := edgegrid.Config{ Host: os.Getenv("AKAMAI_HOST"), diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index b4599914e..080ca78fb 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -25,14 +25,16 @@ type MachineScopeParams struct { } type MachineScope struct { - Client K8sClient - PatchHelper *patch.Helper - Cluster *clusterv1.Cluster - Machine *clusterv1.Machine - TokenHash string - LinodeClient LinodeClient - LinodeCluster *infrav1alpha2.LinodeCluster - LinodeMachine *infrav1alpha2.LinodeMachine + Client K8sClient + S3Client S3Client + S3PresignClient S3PresignClient + PatchHelper *patch.Helper + Cluster *clusterv1.Cluster + Machine *clusterv1.Machine + TokenHash string + LinodeClient LinodeClient + LinodeCluster *infrav1alpha2.LinodeCluster + LinodeMachine *infrav1alpha2.LinodeMachine } func validateMachineScopeParams(params MachineScopeParams) error { @@ -62,20 +64,28 @@ func NewMachineScope(ctx context.Context, linodeClientConfig ClientConfig, param if err != nil { return nil, fmt.Errorf("failed to create linode client: %w", err) } + + s3client, s3PresignClient, err := CreateS3Clients(ctx, params.Client, *params.LinodeCluster) + if err != nil { + return nil, fmt.Errorf("create s3 clients: %w", err) + } + helper, err := patch.NewHelper(params.LinodeMachine, params.Client) if err != nil { return nil, fmt.Errorf("failed to init patch helper: %w", err) } return &MachineScope{ - Client: params.Client, - PatchHelper: helper, - Cluster: params.Cluster, - Machine: params.Machine, - TokenHash: GetHash(linodeClientConfig.Token), - LinodeClient: linodeClient, - LinodeCluster: params.LinodeCluster, - LinodeMachine: params.LinodeMachine, + Client: params.Client, + S3Client: s3client, + S3PresignClient: s3PresignClient, + PatchHelper: helper, + Cluster: params.Cluster, + Machine: params.Machine, + TokenHash: GetHash(linodeClientConfig.Token), + LinodeClient: linodeClient, + LinodeCluster: params.LinodeCluster, + LinodeMachine: params.LinodeMachine, }, nil } @@ -102,7 +112,7 @@ func (s *MachineScope) AddFinalizer(ctx context.Context) error { // GetBootstrapData returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName. func (m *MachineScope) GetBootstrapData(ctx context.Context) ([]byte, error) { if m.Machine.Spec.Bootstrap.DataSecretName == nil { - return []byte{}, fmt.Errorf( + return nil, fmt.Errorf( "bootstrap data secret is nil for LinodeMachine %s/%s", m.LinodeMachine.Namespace, m.LinodeMachine.Name, @@ -112,7 +122,7 @@ func (m *MachineScope) GetBootstrapData(ctx context.Context) ([]byte, error) { secret := &corev1.Secret{} key := types.NamespacedName{Namespace: m.LinodeMachine.Namespace, Name: *m.Machine.Spec.Bootstrap.DataSecretName} if err := m.Client.Get(ctx, key, secret); err != nil { - return []byte{}, fmt.Errorf( + return nil, fmt.Errorf( "failed to retrieve bootstrap data secret for LinodeMachine %s/%s", m.LinodeMachine.Namespace, m.LinodeMachine.Name, @@ -131,6 +141,19 @@ func (m *MachineScope) GetBootstrapData(ctx context.Context) ([]byte, error) { return value, nil } +func (m *MachineScope) GetBucketName(ctx context.Context) (string, error) { + if m.LinodeCluster.Spec.ObjectStore == nil { + return "", errors.New("no cluster object store") + } + + name, err := getCredentialDataFromRef(ctx, m.Client, m.LinodeCluster.Spec.ObjectStore.CredentialsRef, m.LinodeCluster.GetNamespace(), "bucket_name") + if err != nil { + return "", fmt.Errorf("get bucket name: %w", err) + } + + return string(name), nil +} + func (s *MachineScope) AddCredentialsRefFinalizer(ctx context.Context) error { // Only add the finalizer if the machine has an override for the credentials reference if s.LinodeMachine.Spec.CredentialsRef == nil { diff --git a/cloud/scope/machine_test.go b/cloud/scope/machine_test.go index b8b1b6415..b2f086a81 100644 --- a/cloud/scope/machine_test.go +++ b/cloud/scope/machine_test.go @@ -264,6 +264,35 @@ func TestNewMachineScope(t *testing.T) { require.NoError(t, err) assert.NotNil(t, mScope) })), + Path( + Call("cluster object store used", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + }), + Result("success", func(ctx context.Context, mck Mock) { + mScope, err := NewMachineScope(ctx, ClientConfig{Token: "apiToken"}, MachineScopeParams{ + Client: mck.K8sClient, + Cluster: &clusterv1.Cluster{}, + Machine: &clusterv1.Machine{}, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}, + }, + }}, + LinodeMachine: &infrav1alpha2.LinodeMachine{}, + }) + require.NoError(t, err) + assert.NotNil(t, mScope) + })), ), ) } diff --git a/cloud/services/object_storage_objects.go b/cloud/services/object_storage_objects.go new file mode 100644 index 000000000..b6c5e6382 --- /dev/null +++ b/cloud/services/object_storage_objects.go @@ -0,0 +1,139 @@ +package services + +import ( + "bytes" + "context" + "errors" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go" + + "github.com/linode/cluster-api-provider-linode/cloud/scope" +) + +func validateObjectScopeParams(mscope *scope.MachineScope) error { + if mscope == nil { + return errors.New("nil machine scope") + } + + if mscope.S3Client == nil { + return errors.New("nil S3 client") + } + + if mscope.LinodeCluster.Spec.ObjectStore == nil { + return errors.New("nil cluster object store") + } + + return nil +} + +func CreateObject(ctx context.Context, mscope *scope.MachineScope, data []byte) (string, error) { + if err := validateObjectScopeParams(mscope); err != nil { + return "", err + } + if len(data) == 0 { + return "", errors.New("empty data") + } + + bucket, err := mscope.GetBucketName(ctx) + if err != nil { + return "", err + } + if bucket == "" { + return "", errors.New("missing bucket name") + } + + // Key by UUID for shared buckets. + key := string(mscope.LinodeMachine.ObjectMeta.UID) + + if _, err := mscope.S3Client.PutObject(ctx, &s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + Body: s3manager.ReadSeekCloser(bytes.NewReader(data)), + }); err != nil { + return "", fmt.Errorf("put object: %w", err) + } + + var opts []func(*s3.PresignOptions) + if mscope.LinodeCluster.Spec.ObjectStore.PresignedURLDuration != nil { + opts = append(opts, func(opts *s3.PresignOptions) { + opts.Expires = mscope.LinodeCluster.Spec.ObjectStore.PresignedURLDuration.Duration + }) + } + + req, err := mscope.S3PresignClient.PresignGetObject( + ctx, + &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }, + opts...) + if err != nil { + return "", fmt.Errorf("generate presigned url: %w", err) + } + + return req.URL, nil +} + +func DeleteObject(ctx context.Context, mscope *scope.MachineScope) error { + if err := validateObjectScopeParams(mscope); err != nil { + return err + } + + bucket, err := mscope.GetBucketName(ctx) + if err != nil { + return err + } + if bucket == "" { + return errors.New("missing bucket name") + } + + // Key by UUID for shared buckets. + key := string(mscope.LinodeMachine.ObjectMeta.UID) + + _, err = mscope.S3Client.HeadObject( + ctx, + &s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + var ( + ae smithy.APIError + bne *types.NoSuchBucket + kne *types.NoSuchKey + nf *types.NotFound + ) + switch { + // In the case that the IAM policy does not have sufficient permissions to get the object, we will attempt to + // delete it anyway for backwards compatibility reasons. + case errors.As(err, &ae) && ae.ErrorCode() == "Forbidden": + break + // Specified bucket does not exist. + case errors.As(err, &bne): + return nil + // Specified key does not exist. + case errors.As(err, &kne): + return nil + // Object not found. + case errors.As(err, &nf): + return nil + default: + return fmt.Errorf("delete object: %w", err) + } + } + + if _, err = mscope.S3Client.DeleteObject(ctx, + &s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }); err != nil { + return fmt.Errorf("delete object: %w", err) + } + + return nil +} diff --git a/cloud/services/object_storage_objects_test.go b/cloud/services/object_storage_objects_test.go new file mode 100644 index 000000000..c5426e8f2 --- /dev/null +++ b/cloud/services/object_storage_objects_test.go @@ -0,0 +1,415 @@ +package services + +import ( + "context" + "errors" + "testing" + + awssigner "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/mock" + + . "github.com/linode/cluster-api-provider-linode/mock/mocktest" +) + +func TestCreateObject(t *testing.T) { + t.Parallel() + + NewSuite(t, mock.MockK8sClient{}, mock.MockS3Client{}, mock.MockS3PresignClient{}).Run( + OneOf( + Path( + Result("nil machine scope", func(ctx context.Context, mck Mock) { + _, err := CreateObject(ctx, nil, []byte("fake")) + assert.ErrorContains(t, err, "nil machine scope") + }), + Result("nil s3 client", func(ctx context.Context, mck Mock) { + _, err := CreateObject(ctx, &scope.MachineScope{ + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}}, + []byte("fake")) + assert.ErrorContains(t, err, "nil machine scope") + }), + Result("nil cluster object store", func(ctx context.Context, mck Mock) { + _, err := CreateObject(ctx, &scope.MachineScope{ + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{}, + }, []byte("fake")) + assert.ErrorContains(t, err, "nil cluster object store") + }), + Result("no data", func(ctx context.Context, mck Mock) { + _, err := CreateObject(ctx, &scope.MachineScope{ + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + }, nil) + assert.ErrorContains(t, err, "empty data") + }), + ), + Path( + Call("fail to get bucket name", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("get credentials ref")) + }), + Result("error", func(ctx context.Context, mck Mock) { + _, err := CreateObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + }, []byte("fake")) + assert.ErrorContains(t, err, "get bucket name") + }), + Call("empty bucket name", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": nil, + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + }), + Result("error", func(ctx context.Context, mck Mock) { + _, err := CreateObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + }, []byte("fake")) + assert.ErrorContains(t, err, "missing bucket name") + }), + ), + ), + OneOf( + Path( + Call("fail to put object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().PutObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("fail")) + }), + Result("error", func(ctx context.Context, mck Mock) { + _, err := CreateObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: "fake"}, + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + LinodeMachine: &infrav1alpha2.LinodeMachine{}, + }, []byte("fake")) + assert.ErrorContains(t, err, "put object") + }), + ), + Path( + Call("fail to generate presigned url", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().PutObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.PutObjectOutput{}, nil) + mck.S3PresignClient.EXPECT().PresignGetObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("fail")) + }), + Result("error", func(ctx context.Context, mck Mock) { + _, err := CreateObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: "fake"}, + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + LinodeMachine: &infrav1alpha2.LinodeMachine{}, + }, []byte("fake")) + assert.ErrorContains(t, err, "generate presigned url") + }), + ), + Path( + Call("create object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().PutObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.PutObjectOutput{}, nil) + mck.S3PresignClient.EXPECT().PresignGetObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(&awssigner.PresignedHTTPRequest{URL: "https://example.com"}, nil) + }), + Result("success", func(ctx context.Context, mck Mock) { + url, err := CreateObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + LinodeMachine: &infrav1alpha2.LinodeMachine{}, + }, []byte("fake")) + require.NoError(t, err) + assert.Equal(t, "https://example.com", url) + }), + ), + ), + ) +} + +func TestDeleteObject(t *testing.T) { + t.Parallel() + + NewSuite(t, mock.MockK8sClient{}, mock.MockS3Client{}).Run( + OneOf( + Path( + Result("nil machine scope", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, nil) + assert.Error(t, err) + }), + Result("nil s3 client", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, &scope.MachineScope{ + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}}) + assert.Error(t, err) + }), + Result("nil cluster object store", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, &scope.MachineScope{ + LinodeCluster: &infrav1alpha2.LinodeCluster{}, + S3Client: &mock.MockS3Client{}}) + assert.ErrorContains(t, err, "nil cluster object store") + }), + Path( + Call("empty bucket name", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return("", errors.New("get credentials ref")) + }), + Result("error", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + }) + assert.ErrorContains(t, err, "empty data") + }), + ), + ), + Path( + Call("fail to get bucket name", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("get credentials ref")) + }), + Result("error", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + }) + assert.ErrorContains(t, err, "get bucket name") + }), + Call("empty bucket name", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": nil, + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + }), + Result("error", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + }) + assert.ErrorContains(t, err, "missing bucket name") + }), + ), + ), + OneOf( + Path( + Call("fail to head object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().HeadObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("fail")) + }), + Result("error", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: "fake"}, + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + LinodeMachine: &infrav1alpha2.LinodeMachine{}, + }) + assert.Error(t, err) + }), + ), + Path( + Call("fail to delete object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().HeadObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.HeadObjectOutput{}, nil) + mck.S3Client.EXPECT().DeleteObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("fail")) + }), + Result("error", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: "fake"}, + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + LinodeMachine: &infrav1alpha2.LinodeMachine{}, + }) + assert.Error(t, err) + }), + ), + Path( + OneOf( + Path(Call("delete object (no such key)", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().HeadObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &types.NoSuchKey{}) + })), + Path(Call("delete object (no such bucket)", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().HeadObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &types.NoSuchBucket{}) + })), + Path(Call("delete object (not found)", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().HeadObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &types.NotFound{}) + })), + Path(Call("delete object", func(ctx context.Context, mck Mock) { + mck.K8sClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key client.ObjectKey, obj *corev1.Secret, opts ...client.GetOption) error { + secret := corev1.Secret{Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "s3_endpoint": []byte("fake"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }} + *obj = secret + return nil + }) + mck.S3Client.EXPECT().HeadObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.HeadObjectOutput{}, nil) + mck.S3Client.EXPECT().DeleteObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.DeleteObjectOutput{}, nil) + })), + ), + Result("success", func(ctx context.Context, mck Mock) { + err := DeleteObject(ctx, &scope.MachineScope{ + Client: mck.K8sClient, + S3Client: mck.S3Client, + S3PresignClient: mck.S3PresignClient, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: "fake"}, + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{ + CredentialsRef: corev1.SecretReference{Name: "fake"}}}}, + LinodeMachine: &infrav1alpha2.LinodeMachine{}, + }) + assert.NoError(t, err) + }), + ), + ), + ) +} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml index f2f885ea7..7d233d4a3 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml @@ -226,6 +226,33 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf + objectStore: + description: |- + ObjectStore defines a supporting Object Storage bucket for cluster operations. This is currently used for + bootstrapping (e.g. Cloud-init). + properties: + credentialsRef: + description: CredentialsRef is a reference to a Secret that contains + the credentials to use for accessing the Cluster Object Store. + properties: + name: + description: name is unique within a namespace to reference + a secret resource. + type: string + namespace: + description: namespace defines the space within which the + secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + presignedURLDuration: + description: |- + PresignedURLDuration defines the duration for which presigned URLs are valid. + + This is used to generate presigned URLs for S3 Bucket objects, which are used by + control-plane and worker nodes to fetch bootstrap data. + type: string + type: object region: description: The Linode Region the LinodeCluster lives in. type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml index 22da070d8..be6117f9a 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml @@ -220,6 +220,34 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf + objectStore: + description: |- + ObjectStore defines a supporting Object Storage bucket for cluster operations. This is currently used for + bootstrapping (e.g. Cloud-init). + properties: + credentialsRef: + description: CredentialsRef is a reference to a Secret + that contains the credentials to use for accessing the + Cluster Object Store. + properties: + name: + description: name is unique within a namespace to + reference a secret resource. + type: string + namespace: + description: namespace defines the space within which + the secret name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + presignedURLDuration: + description: |- + PresignedURLDuration defines the duration for which presigned URLs are valid. + + This is used to generate presigned URLs for S3 Bucket objects, which are used by + control-plane and worker nodes to fetch bootstrap data. + type: string + type: object region: description: The Linode Region the LinodeCluster lives in. type: string diff --git a/go.mod b/go.mod index 43a9d41d4..a9298d8d4 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,11 @@ go 1.23 require ( github.com/akamai/AkamaiOPEN-edgegrid-golang/v8 v8.4.0 github.com/aws/aws-sdk-go-v2 v1.32.6 + github.com/aws/aws-sdk-go-v2/config v1.28.6 + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.42 github.com/aws/aws-sdk-go-v2/service/s3 v1.70.0 + github.com/aws/smithy-go v1.22.1 github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 @@ -34,14 +38,18 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect - github.com/aws/smithy-go v1.22.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/google/cel-go v0.20.1 // indirect diff --git a/go.sum b/go.sum index a630106a8..b8896fb18 100644 --- a/go.sum +++ b/go.sum @@ -24,10 +24,20 @@ github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.42 h1:vEnk9vtjJ62OO2wOhEmgKMZgNcn1w0aF7XCiNXO5rK0= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.42/go.mod h1:GUOPbPJWRZsdt1OJ355upCrry4d3ZFgdX6rhT7gtkto= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 h1:r67ps7oHCYnflpgDy2LZU0MAQtQbYIOqNNnqGO6xQkE= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25/go.mod h1:GrGY+Q4fIokYLtjCVB/aFfCVL6hhGUFl8inD18fDalE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= @@ -40,6 +50,12 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 h1:BbGDtTi0T1DYlm github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6/go.mod h1:hLMJt7Q8ePgViKupeymbqI0la+t9/iYFBjxQCFwuAwI= github.com/aws/aws-sdk-go-v2/service/s3 v1.70.0 h1:HrHFR8RoS4l4EvodRMFcJMYQ8o3UhmALn2nbInXaxZA= github.com/aws/aws-sdk-go-v2/service/s3 v1.70.0/go.mod h1:sT/iQz8JK3u/5gZkT+Hmr7GzVZehUMkRZpOaAwYXeGY= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= diff --git a/internal/controller/cloud-init.tmpl b/internal/controller/cloud-init.tmpl new file mode 100644 index 000000000..1161597b3 --- /dev/null +++ b/internal/controller/cloud-init.tmpl @@ -0,0 +1,4 @@ +#include +{{- range . }} +{{ . }} +{{- end }} diff --git a/internal/controller/linodemachine_controller.go b/internal/controller/linodemachine_controller.go index 0de9c9423..da8b6f8bc 100644 --- a/internal/controller/linodemachine_controller.go +++ b/internal/controller/linodemachine_controller.go @@ -513,6 +513,14 @@ func (r *LinodeMachineReconciler) reconcileUpdate(ctx context.Context, logger lo machineScope.LinodeMachine.Status.Ready = true conditions.MarkTrue(machineScope.LinodeMachine, clusterv1.ReadyCondition) } + + // Clean up after instance creation. + if linodeInstance.Status == linodego.InstanceRunning && machineScope.Machine.Status.Phase == "Running" { + if err := deleteBootstrapData(ctx, machineScope); err != nil { + logger.Error(err, "Fail to bootstrap data") + } + } + return ctrl.Result{}, nil } @@ -523,6 +531,10 @@ func (r *LinodeMachineReconciler) reconcileDelete( ) (ctrl.Result, error) { logger.Info("deleting machine") + if err := deleteBootstrapData(ctx, machineScope); err != nil { + logger.Error(err, "Fail to bootstrap data") + } + if machineScope.LinodeMachine.Spec.ProviderID == nil { logger.Info("Machine ID is missing, nothing to do") diff --git a/internal/controller/linodemachine_controller_helpers.go b/internal/controller/linodemachine_controller_helpers.go index dab7682a6..0184a2480 100644 --- a/internal/controller/linodemachine_controller_helpers.go +++ b/internal/controller/linodemachine_controller_helpers.go @@ -27,6 +27,7 @@ import ( "net/netip" "slices" "strings" + "text/template" "time" "github.com/go-logr/logr" @@ -50,6 +51,8 @@ import ( "github.com/linode/cluster-api-provider-linode/cloud/services" "github.com/linode/cluster-api-provider-linode/util" "github.com/linode/cluster-api-provider-linode/util/reconciler" + + _ "embed" ) const ( @@ -59,6 +62,9 @@ const ( ) var ( + //go:embed cloud-init.tmpl + cloudConfigTemplate string + errNoPublicIPv4Addrs = errors.New("no public ipv4 addresses set") errNoPublicIPv6Addrs = errors.New("no public IPv6 address set") errNoPublicIPv6SLAACAddrs = errors.New("no public SLAAC address set") @@ -495,36 +501,14 @@ func compressUserData(bootstrapData []byte) ([]byte, error) { } func setUserData(ctx context.Context, machineScope *scope.MachineScope, createConfig *linodego.InstanceCreateOptions, gzipCompressionEnabled bool, logger logr.Logger) error { - bootstrapData, err := machineScope.GetBootstrapData(ctx) + bootstrapData, err := resolveBootstrapData(ctx, machineScope, gzipCompressionEnabled, logger) if err != nil { - logger.Error(err, "Failed to get bootstrap data") - return err } - userData := bootstrapData - //nolint:nestif // this is a temp flag until cloud-init is updated in cloud-init compatible images if machineScope.LinodeMachine.Status.CloudinitMetadataSupport { - if gzipCompressionEnabled { - userData, err = compressUserData(bootstrapData) - if err != nil { - logger.Error(err, "failed to compress bootstrap data") - - return err - } - } - bootstrapSize := len(userData) - if bootstrapSize > maxBootstrapDataBytesCloudInit { - err = errors.New("bootstrap data too large") - logger.Error(err, "decoded bootstrap data exceeds size limit", - "limit", maxBootstrapDataBytesCloudInit, - "size", bootstrapSize, - ) - - return err - } createConfig.Metadata = &linodego.InstanceMetadataOptions{ - UserData: b64.StdEncoding.EncodeToString(userData), + UserData: b64.StdEncoding.EncodeToString(bootstrapData), } } else { logger.Info("using StackScripts for bootstrapping") @@ -535,16 +519,6 @@ func setUserData(ctx context.Context, machineScope *scope.MachineScope, createCo "instancedata": b64.StdEncoding.EncodeToString([]byte(instanceData)), "userdata": b64.StdEncoding.EncodeToString(bootstrapData), } - stackscriptSize := len(fmt.Sprint(createConfig.StackScriptData)) - if stackscriptSize > maxBootstrapDataBytesStackscript { - err = errors.New("bootstrap data too large") - logger.Error(err, "decoded bootstrap data exceeds size limit", - "limit", maxBootstrapDataBytesStackscript, - "size", stackscriptSize, - ) - - return err - } createConfig.StackScriptID, err = services.EnsureStackscript(ctx, machineScope) if err != nil { return fmt.Errorf("ensure stackscript: %w", err) @@ -553,6 +527,80 @@ func setUserData(ctx context.Context, machineScope *scope.MachineScope, createCo return nil } +func resolveBootstrapData(ctx context.Context, machineScope *scope.MachineScope, gzipCompressionEnabled bool, logger logr.Logger) ([]byte, error) { + bootstrapdata, err := machineScope.GetBootstrapData(ctx) + if err != nil { + return nil, err + } + + var ( + size = len(bootstrapdata) + compressed []byte + limit int + ) + + // Determine limits for delivery service, e.g. Metadata vs. Stackscript. + if machineScope.LinodeMachine.Status.CloudinitMetadataSupport { + limit = maxBootstrapDataBytesCloudInit + } else { + limit = maxBootstrapDataBytesStackscript + } + + // Determine the delivery mechanism for the bootstrap data based on limits. This informs the formatting of the + // bootstrap data. + switch { + // Best case: Deliver data directly. + case size < limit: + return bootstrapdata, nil + // Compromise case (Metadata): Use compression. + case machineScope.LinodeMachine.Status.CloudinitMetadataSupport && gzipCompressionEnabled: + if compressed, err = compressUserData(bootstrapdata); err != nil { + // Break and use the Cluster Object Store workaround on compression failure. + logger.Info(fmt.Sprintf("Failed to compress bootstrap data: %v. Using Cluster Object Store instead.", err)) + break + } + + size = len(compressed) + if len(compressed) < limit { + return compressed, nil + } + } + + // Worst case: Upload to Cluster Object Store. + logger.Info("decoded bootstrap data exceeds size limit", "limit", limit, "size", size) + + if machineScope.LinodeCluster.Spec.ObjectStore == nil { + return nil, errors.New("must enable cluster object store feature to bootstrap linodemachine") + } + + logger.Info("Uploading bootstrap data the Cluster Object Store") + + // Upload the original bootstrap data. + url, err := services.CreateObject(ctx, machineScope, bootstrapdata) + if err != nil { + return nil, fmt.Errorf("upload bootstrap data: %w", err) + } + + // Format a "pointer" cloud-config. + tmpl, err := template.New(string(machineScope.LinodeMachine.UID)).Parse(cloudConfigTemplate) + if err != nil { + return nil, fmt.Errorf("parse cloud-config template: %w", err) + } + var config bytes.Buffer + if err := tmpl.Execute(&config, []string{url}); err != nil { + return nil, fmt.Errorf("execute cloud-config template: %w", err) + } + + return config.Bytes(), err +} + +// NOTE: Prefer to keep this logic simple, by always assuming the LinodeMachine's cloud-config exists in Object Storage +// and attempting to delete it, then ignoring certain errors, e.g. no such key or bucket, etc. +// This *may* need to revisit w.r.t. rate-limits for shared(?) buckets 🤷‍♀️ +func deleteBootstrapData(ctx context.Context, machineScope *scope.MachineScope) error { + return services.DeleteObject(ctx, machineScope) +} + func createInstanceConfigDeviceMap(instanceDisks map[string]*infrav1alpha2.InstanceDisk, instanceConfig *linodego.InstanceConfigDeviceMap) error { for deviceName, disk := range instanceDisks { dev := linodego.InstanceConfigDevice{ diff --git a/internal/controller/linodemachine_controller_helpers_test.go b/internal/controller/linodemachine_controller_helpers_test.go index 77de048e4..4a8d8103b 100644 --- a/internal/controller/linodemachine_controller_helpers_test.go +++ b/internal/controller/linodemachine_controller_helpers_test.go @@ -9,7 +9,10 @@ import ( "fmt" "testing" - "github.com/go-logr/logr" + awssigner "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/s3" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/go-logr/logr/testr" "github.com/linode/linodego" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -84,7 +87,7 @@ func TestSetUserData(t *testing.T) { createConfig *linodego.InstanceCreateOptions wantConfig *linodego.InstanceCreateOptions expectedError error - expects func(client *mock.MockLinodeClient, kClient *mock.MockK8sClient) + expects func(client *mock.MockLinodeClient, kClient *mock.MockK8sClient, s3Client *mock.MockS3Client, s3PresignedClient *mock.MockS3PresignClient) }{ { name: "Success - SetUserData metadata", @@ -108,7 +111,7 @@ func TestSetUserData(t *testing.T) { wantConfig: &linodego.InstanceCreateOptions{Metadata: &linodego.InstanceMetadataOptions{ UserData: b64.StdEncoding.EncodeToString(userData), }}, - expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient) { + expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient, s3Client *mock.MockS3Client, s3PresignedClient *mock.MockS3PresignClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -143,7 +146,7 @@ func TestSetUserData(t *testing.T) { "instancedata": b64.StdEncoding.EncodeToString([]byte("label: test-cluster\nregion: us-east\ntype: g6-standard-1")), "userdata": b64.StdEncoding.EncodeToString([]byte("test-data")), }}, - expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient) { + expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient, s3Client *mock.MockS3Client, s3PresignedClient *mock.MockS3PresignClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -160,7 +163,7 @@ func TestSetUserData(t *testing.T) { }, }, { - name: "Error - SetUserData large bootstrap data for cloud-init", + name: "Success - SetUserData metadata and cluster object store (large bootstrap data)", machineScope: &scope.MachineScope{Machine: &v1beta1.Machine{ Spec: v1beta1.MachineSpec{ ClusterName: "", @@ -176,10 +179,18 @@ func TestSetUserData(t *testing.T) { }, Spec: infrav1alpha2.LinodeMachineSpec{Region: "us-ord", Image: "linode/ubuntu22.04"}, Status: infrav1alpha2.LinodeMachineStatus{CloudinitMetadataSupport: true}, + }, LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{CredentialsRef: corev1.SecretReference{Name: "fake"}}, + }, }}, createConfig: &linodego.InstanceCreateOptions{}, - wantConfig: &linodego.InstanceCreateOptions{}, - expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient) { + wantConfig: &linodego.InstanceCreateOptions{Metadata: &linodego.InstanceMetadataOptions{ + UserData: b64.StdEncoding.EncodeToString([]byte(`#include +https://object.bucket.example.com +`)), + }}, + expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient, s3Mock *mock.MockS3Client, s3PresignedMock *mock.MockS3PresignClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { largeData := make([]byte, maxBootstrapDataBytesCloudInit*10) _, rerr := rand.Read(largeData) @@ -192,11 +203,25 @@ func TestSetUserData(t *testing.T) { *obj = cred return nil }) + kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { + cred := corev1.Secret{ + Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "bucket_endpoint": []byte("fake.example.com"), + "endpoint": []byte("example.com"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }, + } + *obj = cred + return nil + }) + s3Mock.EXPECT().PutObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.PutObjectOutput{}, nil) + s3PresignedMock.EXPECT().PresignGetObject(gomock.Any(), gomock.Any()).Return(&awssigner.PresignedHTTPRequest{URL: "https://object.bucket.example.com"}, nil) }, - expectedError: fmt.Errorf("bootstrap data too large"), }, { - name: "Error - SetUserData large bootstrap data for stackscript", + name: "Success - SetUserData StackScript and Cluster Object Store (large bootstrap data)", machineScope: &scope.MachineScope{Machine: &v1beta1.Machine{ Spec: v1beta1.MachineSpec{ ClusterName: "", @@ -210,23 +235,52 @@ func TestSetUserData(t *testing.T) { Name: "test-cluster", Namespace: "default", }, - Spec: infrav1alpha2.LinodeMachineSpec{Region: "us-ord", Image: "linode/ubuntu22.04"}, + Spec: infrav1alpha2.LinodeMachineSpec{Region: "us-ord", Image: "linode/ubuntu22.04", Type: "g6-standard-2"}, Status: infrav1alpha2.LinodeMachineStatus{CloudinitMetadataSupport: false}, + }, LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{CredentialsRef: corev1.SecretReference{Name: "fake"}}, + }, }}, createConfig: &linodego.InstanceCreateOptions{}, - wantConfig: &linodego.InstanceCreateOptions{}, - expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient) { + wantConfig: &linodego.InstanceCreateOptions{ + StackScriptData: map[string]string{ + "instancedata": b64.StdEncoding.EncodeToString([]byte("label: test-cluster\nregion: us-ord\ntype: g6-standard-2")), + "userdata": b64.StdEncoding.EncodeToString([]byte(`#include +https://object.bucket.example.com +`)), + }, + }, + expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient, s3Mock *mock.MockS3Client, s3PresignedMock *mock.MockS3PresignClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { + largeData := make([]byte, maxBootstrapDataBytesStackscript*10) + _, rerr := rand.Read(largeData) + require.NoError(t, rerr, "Failed to create bootstrap data") cred := corev1.Secret{ Data: map[string][]byte{ - "value": make([]byte, maxBootstrapDataBytesStackscript+1), + "value": largeData, }, } *obj = cred return nil }) + kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { + cred := corev1.Secret{ + Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "bucket_endpoint": []byte("fake.example.com"), + "s3_endpoint": []byte("example.com"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }, + } + *obj = cred + return nil + }) + s3Mock.EXPECT().PutObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.PutObjectOutput{}, nil) + s3PresignedMock.EXPECT().PresignGetObject(gomock.Any(), gomock.Any()).Return(&awssigner.PresignedHTTPRequest{URL: "https://object.bucket.example.com"}, nil) + mockClient.EXPECT().ListStackscripts(gomock.Any(), gomock.Any()).Return([]linodego.Stackscript{{}}, nil) }, - expectedError: fmt.Errorf("bootstrap data too large"), }, { name: "Error - SetUserData get bootstrap data", @@ -249,7 +303,7 @@ func TestSetUserData(t *testing.T) { }}, createConfig: &linodego.InstanceCreateOptions{}, wantConfig: &linodego.InstanceCreateOptions{}, - expects: func(c *mock.MockLinodeClient, k *mock.MockK8sClient) { + expects: func(c *mock.MockLinodeClient, k *mock.MockK8sClient, s3Client *mock.MockS3Client, s3PresignedClient *mock.MockS3PresignClient) { }, expectedError: fmt.Errorf("bootstrap data secret is nil for LinodeMachine default/test-cluster"), }, @@ -276,7 +330,7 @@ func TestSetUserData(t *testing.T) { "instancedata": b64.StdEncoding.EncodeToString([]byte("label: test-cluster\nregion: us-east\ntype: g6-standard-1")), "userdata": b64.StdEncoding.EncodeToString([]byte("test-data")), }}, - expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient) { + expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient, s3Client *mock.MockS3Client, s3PresignedClient *mock.MockS3PresignClient) { kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { cred := corev1.Secret{ Data: map[string][]byte{ @@ -290,6 +344,60 @@ func TestSetUserData(t *testing.T) { }, expectedError: fmt.Errorf("ensure stackscript: failed to get stackscript with label CAPL-dev: failed to get stackscripts"), }, + { + name: "Error - SetUserData failed to upload to Cluster Object Store", + machineScope: &scope.MachineScope{Machine: &v1beta1.Machine{ + Spec: v1beta1.MachineSpec{ + ClusterName: "", + Bootstrap: v1beta1.Bootstrap{ + DataSecretName: ptr.To("test-data"), + }, + InfrastructureRef: corev1.ObjectReference{}, + }, + }, LinodeMachine: &infrav1alpha2.LinodeMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + }, + Spec: infrav1alpha2.LinodeMachineSpec{Region: "us-ord", Image: "linode/ubuntu22.04"}, + Status: infrav1alpha2.LinodeMachineStatus{CloudinitMetadataSupport: true}, + }, LinodeCluster: &infrav1alpha2.LinodeCluster{ + Spec: infrav1alpha2.LinodeClusterSpec{ + ObjectStore: &infrav1alpha2.ObjectStore{CredentialsRef: corev1.SecretReference{Name: "fake"}}, + }, + }}, + createConfig: &linodego.InstanceCreateOptions{}, + wantConfig: &linodego.InstanceCreateOptions{}, + expects: func(mockClient *mock.MockLinodeClient, kMock *mock.MockK8sClient, s3Mock *mock.MockS3Client, s3PresignedMock *mock.MockS3PresignClient) { + kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { + largeData := make([]byte, max(maxBootstrapDataBytesCloudInit, maxBootstrapDataBytesStackscript)*10) + _, rerr := rand.Read(largeData) + require.NoError(t, rerr, "Failed to create bootstrap data") + cred := corev1.Secret{ + Data: map[string][]byte{ + "value": largeData, + }, + } + *obj = cred + return nil + }) + kMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, key types.NamespacedName, obj *corev1.Secret, opts ...client.GetOption) error { + cred := corev1.Secret{ + Data: map[string][]byte{ + "bucket_name": []byte("fake"), + "bucket_endpoint": []byte("fake.example.com"), + "s3_endpoint": []byte("example.com"), + "access_key": []byte("fake"), + "secret_key": []byte("fake"), + }, + } + *obj = cred + return nil + }) + s3Mock.EXPECT().PutObject(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &s3types.NoSuchBucket{}) + }, + expectedError: fmt.Errorf("put object"), + }, } for _, tt := range tests { testcase := tt @@ -301,10 +409,14 @@ func TestSetUserData(t *testing.T) { mockClient := mock.NewMockLinodeClient(ctrl) mockK8sClient := mock.NewMockK8sClient(ctrl) + mockS3Client := mock.NewMockS3Client(ctrl) + mockS3PresignClient := mock.NewMockS3PresignClient(ctrl) testcase.machineScope.LinodeClient = mockClient testcase.machineScope.Client = mockK8sClient - testcase.expects(mockClient, mockK8sClient) - logger := logr.Logger{} + testcase.machineScope.S3Client = mockS3Client + testcase.machineScope.S3PresignClient = mockS3PresignClient + testcase.expects(mockClient, mockK8sClient, mockS3Client, mockS3PresignClient) + logger := testr.New(t) err := setUserData(context.Background(), testcase.machineScope, testcase.createConfig, gzipCompressionFlag, logger) if testcase.expectedError != nil { diff --git a/internal/controller/linodeobjectstoragekey_controller_test.go b/internal/controller/linodeobjectstoragekey_controller_test.go index b3ef2ae5d..245c9af05 100644 --- a/internal/controller/linodeobjectstoragekey_controller_test.go +++ b/internal/controller/linodeobjectstoragekey_controller_test.go @@ -360,6 +360,10 @@ var _ = Describe("custom-secret", Label("key", "key-custom-secret"), func() { patchHelper, err := patch.NewHelper(keyScope.Key, k8sClient) Expect(err).NotTo(HaveOccurred()) keyScope.PatchHelper = patchHelper + + mck.LinodeClient.EXPECT().GetObjectStorageBucket(gomock.Any(), "us-ord", "mybucket").Return(&linodego.ObjectStorageBucket{ + Hostname: "hostname", + }, nil) }), Result("generates opaque secret with templated data", func(ctx context.Context, mck Mock) { _, err := reconciler.reconcile(ctx, &keyScope)