From 090c3e0f1a3ed89fc75482c5ca66dc90e586840c Mon Sep 17 00:00:00 2001 From: Cannon Palms Date: Mon, 27 Nov 2023 22:13:28 -0500 Subject: [PATCH] fix: move responsibility for managing k3s token to control plane controller (#71) * Move responsibility for creating the token required by nodes to join the cluster to the KThreesControlPlane controller --- .golangci.yml | 2 + .../controllers/kthreesconfig_controller.go | 70 +---- controlplane/api/v1beta1/condition_consts.go | 8 + .../kthreescontrolplane_controller.go | 9 + go.mod | 1 + go.sum | 1 + pkg/token/token.go | 128 ++++++++- pkg/token/token_test.go | 258 ++++++++++++++++++ 8 files changed, 411 insertions(+), 66 deletions(-) create mode 100644 pkg/token/token_test.go diff --git a/.golangci.yml b/.golangci.yml index 0726bca7..9b75d52d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -86,6 +86,8 @@ linters-settings: - sigs.k8s.io/cluster-api - github.com/cluster-api-provider-k3s/cluster-api-k3s + + - github.com/google/uuid gci: sections: - standard diff --git a/bootstrap/controllers/kthreesconfig_controller.go b/bootstrap/controllers/kthreesconfig_controller.go index 240e183d..b506af50 100644 --- a/bootstrap/controllers/kthreesconfig_controller.go +++ b/bootstrap/controllers/kthreesconfig_controller.go @@ -223,13 +223,13 @@ func (r *KThreesConfigReconciler) joinControlplane(ctx context.Context, scope *S serverURL := fmt.Sprintf("https://%s", scope.Cluster.Spec.ControlPlaneEndpoint.String()) - tokn, err := r.retrieveToken(ctx, scope) + tokn, err := token.Lookup(ctx, r.Client, client.ObjectKeyFromObject(scope.Cluster)) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return err } - configStruct := k3s.GenerateJoinControlPlaneConfig(serverURL, tokn, + configStruct := k3s.GenerateJoinControlPlaneConfig(serverURL, *tokn, scope.Cluster.Spec.ControlPlaneEndpoint.Host, scope.Config.Spec.ServerConfig, scope.Config.Spec.AgentConfig) @@ -284,13 +284,13 @@ func (r *KThreesConfigReconciler) joinWorker(ctx context.Context, scope *Scope) serverURL := fmt.Sprintf("https://%s", scope.Cluster.Spec.ControlPlaneEndpoint.String()) - tokn, err := r.retrieveToken(ctx, scope) + tokn, err := token.Lookup(ctx, r.Client, client.ObjectKeyFromObject(scope.Cluster)) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return err } - configStruct := k3s.GenerateWorkerConfig(serverURL, tokn, scope.Config.Spec.ServerConfig, scope.Config.Spec.AgentConfig) + configStruct := k3s.GenerateWorkerConfig(serverURL, *tokn, scope.Config.Spec.ServerConfig, scope.Config.Spec.AgentConfig) b, err := kubeyaml.Marshal(configStruct) if err != nil { @@ -424,7 +424,7 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex } conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) - token, err := r.generateAndStoreToken(ctx, scope) + token, err := token.Lookup(ctx, r.Client, client.ObjectKeyFromObject(scope.Cluster)) if err != nil { return ctrl.Result{}, err } @@ -433,7 +433,7 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex // For now just use the etcd option configStruct := k3s.GenerateInitControlPlaneConfig( scope.Cluster.Spec.ControlPlaneEndpoint.Host, - token, + *token, scope.Config.Spec.ServerConfig, scope.Config.Spec.AgentConfig) @@ -480,64 +480,6 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex return r.reconcileKubeconfig(ctx, scope) } -func (r *KThreesConfigReconciler) generateAndStoreToken(ctx context.Context, scope *Scope) (string, error) { - tokn, err := token.Random(16) - if err != nil { - return "", err - } - - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: token.Name(scope.Cluster.Name), - Namespace: scope.Config.Namespace, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: scope.Cluster.Name, - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: clusterv1.GroupVersion.String(), - Kind: "Cluster", - Name: scope.Cluster.Name, - UID: scope.Cluster.UID, - Controller: pointer.Bool(true), - }, - }, - }, - Data: map[string][]byte{ - "value": []byte(tokn), - }, - Type: clusterv1.ClusterSecretType, - } - - // as secret creation and scope.Config status patch are not atomic operations - // it is possible that secret creation happens but the config.Status patches are not applied - if err := r.Client.Create(ctx, secret); err != nil { - if !apierrors.IsAlreadyExists(err) { - return "", fmt.Errorf("failed to create token for KThreesConfig %s/%s: %w", scope.Config.Namespace, scope.Config.Name, err) - } - // r.Log.Info("bootstrap data secret for KThreesConfig already exists, updating", "secret", secret.Name, "KThreesConfig", scope.Config.Name) - if err := r.Client.Update(ctx, secret); err != nil { - return "", fmt.Errorf("failed to update bootstrap token secret for KThreesConfig %s/%s: %w", scope.Config.Namespace, scope.Config.Name, err) - } - } - - return tokn, nil -} - -func (r *KThreesConfigReconciler) retrieveToken(ctx context.Context, scope *Scope) (string, error) { - secret := &corev1.Secret{} - obj := client.ObjectKey{ - Namespace: scope.Config.Namespace, - Name: token.Name(scope.Cluster.Name), - } - - if err := r.Client.Get(ctx, obj, secret); err != nil { - return "", fmt.Errorf("failed to get token for KThreesConfig %s/%s: %w", scope.Config.Namespace, scope.Config.Name, err) - } - - return string(secret.Data["value"]), nil -} - func (r *KThreesConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { if r.KThreesInitLock == nil { r.KThreesInitLock = locking.NewControlPlaneInitMutex(ctrl.Log.WithName("init-locker"), mgr.GetClient()) diff --git a/controlplane/api/v1beta1/condition_consts.go b/controlplane/api/v1beta1/condition_consts.go index 9fb559a3..d8efb422 100644 --- a/controlplane/api/v1beta1/condition_consts.go +++ b/controlplane/api/v1beta1/condition_consts.go @@ -120,3 +120,11 @@ const ( // EtcdMemberUnhealthyReason (Severity=Error) documents a Machine's etcd member is unhealthy. EtcdMemberUnhealthyReason = "EtcdMemberUnhealthy" ) + +const ( + // TokenAvailableCondition documents whether the token required for nodes to join the cluster is available. + TokenAvailableCondition clusterv1.ConditionType = "TokenAvailable" + + // TokenGenerationFailedReason documents that the token required for nodes to join the cluster could not be generated. + TokenGenerationFailedReason = "TokenGenerationFailed" +) diff --git a/controlplane/controllers/kthreescontrolplane_controller.go b/controlplane/controllers/kthreescontrolplane_controller.go index f14d82f1..a880a425 100644 --- a/controlplane/controllers/kthreescontrolplane_controller.go +++ b/controlplane/controllers/kthreescontrolplane_controller.go @@ -50,6 +50,7 @@ import ( "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/kubeconfig" "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/machinefilters" "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/secret" + "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/token" ) // KThreesControlPlaneReconciler reconciles a KThreesControlPlane object. @@ -244,6 +245,7 @@ func patchKThreesControlPlane(ctx context.Context, patchHelper *patch.Helper, kc controlplanev1.MachinesReadyCondition, controlplanev1.AvailableCondition, controlplanev1.CertificatesAvailableCondition, + controlplanev1.TokenAvailableCondition, ), ) @@ -258,6 +260,7 @@ func patchKThreesControlPlane(ctx context.Context, patchHelper *patch.Helper, kc controlplanev1.MachinesReadyCondition, controlplanev1.AvailableCondition, controlplanev1.CertificatesAvailableCondition, + controlplanev1.TokenAvailableCondition, }}, ) } @@ -408,6 +411,12 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster * } conditions.MarkTrue(kcp, controlplanev1.CertificatesAvailableCondition) + if err := token.Reconcile(ctx, r.Client, client.ObjectKeyFromObject(cluster), kcp); err != nil { + conditions.MarkFalse(kcp, controlplanev1.TokenAvailableCondition, controlplanev1.TokenGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + return reconcile.Result{}, err + } + conditions.MarkTrue(kcp, controlplanev1.TokenAvailableCondition) + // If ControlPlaneEndpoint is not set, return early if !cluster.Spec.ControlPlaneEndpoint.IsValid() { logger.Info("Cluster does not yet have a ControlPlaneEndpoint defined") diff --git a/go.mod b/go.mod index bffaee67..8834db04 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/coredns/caddy v1.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/zapr v1.2.3 // indirect diff --git a/go.sum b/go.sum index 808c7725..1f3833a0 100644 --- a/go.sum +++ b/go.sum @@ -102,6 +102,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/pkg/token/token.go b/pkg/token/token.go index 67be9985..202347e8 100644 --- a/pkg/token/token.go +++ b/pkg/token/token.go @@ -1,12 +1,61 @@ package token import ( + "context" cryptorand "crypto/rand" "encoding/hex" "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) -func Random(size int) (string, error) { +func Lookup(ctx context.Context, ctrlclient client.Client, clusterKey client.ObjectKey) (*string, error) { + var s *corev1.Secret + var err error + + if s, err = getSecret(ctx, ctrlclient, clusterKey); err != nil { + return nil, fmt.Errorf("failed to lookup token: %v", err) + } + if val, ok := s.Data["value"]; ok { + ret := string(val) + return &ret, nil + } + + return nil, fmt.Errorf("found token secret without value") +} + +func Reconcile(ctx context.Context, ctrlclient client.Client, clusterKey client.ObjectKey, owner client.Object) error { + var s *corev1.Secret + var err error + + // Find the token secret + if s, err = getSecret(ctx, ctrlclient, clusterKey); err != nil { + if apierrors.IsNotFound(err) { + // Secret does not exist, create it + _, err = generateAndStore(ctx, ctrlclient, clusterKey, owner) + return err + } + } + + // Secret exists + // Ensure the secret has correct ownership; this is necessary because at one point, the secret was owned by KThreesConfig + if !metav1.IsControlledBy(s, owner) { + upsertControllerRef(s, owner) + if err := ctrlclient.Update(ctx, s); err != nil { + return fmt.Errorf("failed to update ownership of token: %v", err) + } + } + + return nil +} + +// randomB64 generates a cryptographically secure random byte slice of length size and returns its base64 encoding. +func randomB64(size int) (string, error) { token := make([]byte, size) _, err := cryptorand.Read(token) if err != nil { @@ -15,6 +64,81 @@ func Random(size int) (string, error) { return hex.EncodeToString(token), err } -func Name(clusterName string) string { +// name returns the name of the token secret, computed by convention using the name of the cluster. +func name(clusterName string) string { return fmt.Sprintf("%s-token", clusterName) } + +func getSecret(ctx context.Context, ctrlclient client.Client, clusterKey client.ObjectKey) (*corev1.Secret, error) { + s := &corev1.Secret{} + key := client.ObjectKey{ + Name: name(clusterKey.Name), + Namespace: clusterKey.Namespace, + } + if err := ctrlclient.Get(ctx, key, s); err != nil { + return nil, err + } + + return s, nil +} + +func generateAndStore(ctx context.Context, ctrlclient client.Client, clusterKey client.ObjectKey, owner client.Object) (*string, error) { + tokn, err := randomB64(16) + if err != nil { + return nil, fmt.Errorf("failed to generate token: %v", err) + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name(clusterKey.Name), + Namespace: clusterKey.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterKey.Name, + }, + }, + Data: map[string][]byte{ + "value": []byte(tokn), + }, + Type: clusterv1.ClusterSecretType, + } + + //nolint:errcheck + controllerutil.SetControllerReference(owner, secret, ctrlclient.Scheme()) + + // as secret creation and scope.Config status patch are not atomic operations + // it is possible that secret creation happens but the config.Status patches are not applied + if err := ctrlclient.Create(ctx, secret); err != nil { + return nil, fmt.Errorf("failed to store token: %v", err) + } + + return &tokn, nil +} + +// upsertControllerRef takes controllee and controller objects, either replaces the existing controller ref +// if one exists or appends the new controller ref if one does not exist, and returns the updated controllee +// This is meant to be used in place of controllerutil.SetControllerReference(...), which would throw an error +// if there were already an existing controller ref. +func upsertControllerRef(controllee client.Object, controller client.Object) { + newControllerRef := metav1.NewControllerRef(controller, controller.GetObjectKind().GroupVersionKind()) + + // Iterate through existing owner references + var updatedOwnerReferences []metav1.OwnerReference + var controllerRefUpdated bool + for _, ownerRef := range controllee.GetOwnerReferences() { + // Identify and replace the controlling owner reference + if ownerRef.Controller != nil && *ownerRef.Controller { + updatedOwnerReferences = append(updatedOwnerReferences, *newControllerRef) + controllerRefUpdated = true + } else { + // Keep non-controlling owner references intact + updatedOwnerReferences = append(updatedOwnerReferences, ownerRef) + } + } + + // If the controlling owner reference was not found, add the new one + if !controllerRefUpdated { + updatedOwnerReferences = append(updatedOwnerReferences, *newControllerRef) + } + + controllee.SetOwnerReferences(updatedOwnerReferences) +} diff --git a/pkg/token/token_test.go b/pkg/token/token_test.go new file mode 100644 index 00000000..b21e8ee9 --- /dev/null +++ b/pkg/token/token_test.go @@ -0,0 +1,258 @@ +package token + +import ( + "context" + "fmt" + "testing" + + "github.com/google/uuid" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func TestLookup(t *testing.T) { + const testToken = "test-token" + + // Mock a Kubernetes client + ctrlClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).Build() + + // Create a test cluster key + clusterKey := client.ObjectKey{Name: "test-cluster", Namespace: "default"} + + // Test case: Secret does not exist + if token, err := Lookup(context.Background(), ctrlClient, clusterKey); token != nil || err == nil { + t.Errorf("Lookup() should return nil token and error when secret does not exist") + } + + // Test case: Secret exists + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: name(clusterKey.Name), Namespace: clusterKey.Namespace}, + Data: map[string][]byte{"value": []byte(testToken)}, + Type: clusterv1.ClusterSecretType, + } + //nolint:errcheck + ctrlClient.Create(context.Background(), secret) + + if token, err := Lookup(context.Background(), ctrlClient, clusterKey); token == nil || *token != testToken || err != nil { + t.Errorf("Lookup() returned unexpected result. Expected: %v, Actual: %v, error: %v", testToken, token, err) + } +} + +func TestReconcile(t *testing.T) { + // Create a Pod as the controllingOwner. By using a Pod, we avoid having to deal with schemes. + // This could just as easily be a KThreesControlPlane or any other object + controllingOwner := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "controlling-owner", + Namespace: "default", + UID: "5c4d7cd1-5345-4887-a545-f45bad557ffd", // random, but required for proper ownerRef comparison + }, + } + additionalOwner := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-controlling-owner", + Namespace: "default", + UID: "2592fce9-789b-4788-9543-d0e9d1fb8a91", + }, + } + newControllingOwner := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-controlling-owner", + Namespace: "default", + UID: "d0339496-2625-4087-9a7e-e7186f4b3aab", + }, + } + + // Mock a Kubernetes client + ctrlClient := fake.NewClientBuilder(). + WithObjects( + controllingOwner.DeepCopy(), + additionalOwner.DeepCopy(), + newControllingOwner.DeepCopy(), + ). + Build() + + // Create a test cluster key + clusterKey := client.ObjectKey{Name: "test-cluster", Namespace: "default"} + + // Test case: Secret does not exist + if err := Reconcile(context.Background(), ctrlClient, clusterKey, controllingOwner); err != nil { + t.Errorf("Reconcile() returned unexpected error when secret does not exist: %v", err) + } + + // Verify that the secret has been created + secret := &corev1.Secret{} + key := client.ObjectKey{Name: name(clusterKey.Name), Namespace: clusterKey.Namespace} + if err := ctrlClient.Get(context.Background(), key, secret); err != nil { + t.Errorf("Failed to get secret: %v", err) + } + + // Test case: Secret exists, ownership remains + if err := Reconcile(context.Background(), ctrlClient, clusterKey, controllingOwner); err != nil { + t.Errorf("Reconcile() returned unexpected error when secret exists: %v", err) + } + + if err := ctrlClient.Get(context.Background(), key, secret); err != nil { + t.Errorf("Failed to get secret: %v", err) + } + + // Verify that controlling ownership has been set + if !metav1.IsControlledBy(secret, controllingOwner) { + t.Error("Reconcile() did not set correct ownership for existing secret") + } + + // Test case: Secret already has a different controlling owner reference + // and an additional non-controlling owner reference + if err := addOwnerRef(ctrlClient, secret, additionalOwner); err != nil { + t.Errorf("Failed to add additional non-controlling owner: %v", err) + } + + if err := Reconcile(context.Background(), ctrlClient, clusterKey, newControllingOwner); err != nil { + t.Errorf("Reconcile() returned unexpected error when secret exists with different controlling owner: %v", err) + } + + if err := ctrlClient.Get(context.Background(), key, secret); err != nil { + t.Errorf("Failed to get secret: %v", err) + } + + // Verify that the new controller ref has replaced the old controller ref + if !metav1.IsControlledBy(secret, newControllingOwner) { + t.Error("Reconcile() did not set overwrite controlling ownership for existing secret") + } + + // Verify that the non-controlling owner ref is still present + if !isOwnedBy(secret, additionalOwner) { + t.Error("Reconcile() did not maintain existing non-controlling ownership for existing secret") + } +} + +func TestUpsertControllerRef(t *testing.T) { + // Helper function to create a new instance of TestObject + newPod := func(name string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + UID: types.UID(uuid.New().String()), + }, + } + } + + // Test case 1: No pre-existing owner references + t.Run("NoPreExistingOwnerReferences", func(t *testing.T) { + controllee := newPod("controllee") + controller := newPod("controller") + + upsertControllerRef(controllee, controller) + + if len(controllee.GetOwnerReferences()) != 1 { + t.Error("Expected one owner reference, got:", len(controllee.GetOwnerReferences())) + } + if !metav1.IsControlledBy(controllee, controller) { + t.Error("Expected controllee to be controlled by controller") + } + }) + + // Test case 2: Pre-existing controlling owner reference + t.Run("PreExistingControllingOwnerReference", func(t *testing.T) { + controllee := newPod("controllee") + oldController := newPod("old-controller") + newController := newPod("new-controller") + setControllerReference(oldController, controllee, scheme.Scheme) + + upsertControllerRef(controllee, newController) + + if len(controllee.GetOwnerReferences()) != 1 { + t.Error("Expected one owner reference, got:", len(controllee.GetOwnerReferences())) + } + if !metav1.IsControlledBy(controllee, newController) { + t.Error("Expected controllee to be controlled by controller") + } + }) + + // Test case 3: Pre-existing non-controlling owner reference + t.Run("PreExistingNonControllingOwnerReference", func(t *testing.T) { + controllee := newPod("controllee") + controller := newPod("controller") + otherObject := newPod("otherObject") + setOwnerReference(otherObject, controllee, scheme.Scheme) + + upsertControllerRef(controllee, controller) + + if len(controllee.GetOwnerReferences()) != 2 { + t.Error("Expected two owner references, got:", len(controllee.GetOwnerReferences())) + } + if !metav1.IsControlledBy(controllee, controller) { + t.Error("Expected controllee to be controlled by controller") + } + }) + + // Test case 4: Pre-existing non-controlling owner references and pre-existing controlling owner reference + t.Run("PreExistingNonControllingAndControllingOwnerReferences", func(t *testing.T) { + controllee := newPod("controllee") + + otherObject := newPod("otherObject") + setOwnerReference(otherObject, controllee, scheme.Scheme) + + oldController := newPod("old-controller") + existingControllerRef := metav1.NewControllerRef(oldController, oldController.GetObjectKind().GroupVersionKind()) + controllee.OwnerReferences = append(controllee.OwnerReferences, *existingControllerRef) + + newController := newPod("new-controller") + + upsertControllerRef(controllee, newController) + + if len(controllee.GetOwnerReferences()) != 2 { + t.Error("Expected two owner references, got:", len(controllee.GetOwnerReferences())) + } + if !metav1.IsControlledBy(controllee, newController) { + t.Error("Expected controllee to be controlled by controller") + } + }) +} + +func addOwnerRef(client client.Client, object, owner client.Object) error { + setOwnerReference(owner, object, client.Scheme()) + + if err := client.Update(context.Background(), object); err != nil { + return fmt.Errorf("failed to add owner to object: %v", err) + } + + return nil +} + +// isOwnedBy returns a boolean based upon whether the provided object "owned" +// has an owner reference for the provided object "owner". +func isOwnedBy(owned, owner metav1.Object) bool { + // Retrieve the owner references from the owned object + ownerReferences := owned.GetOwnerReferences() + + // Check if the owner references include the owner + for _, ref := range ownerReferences { + if ref.UID == owner.GetUID() { + return true + } + } + + return false +} + +// setOwnerReference is a helper function that wraps controllerutil.SetOwnerReference(...) +// with an //nolint:errcheck comment to suppress error check linters. +func setOwnerReference(owner, owned metav1.Object, scheme *runtime.Scheme) { + //nolint:errcheck + controllerutil.SetOwnerReference(owner, owned, scheme) +} + +// setControllerReference is a helper function that wraps controllerutil.SetControllerReference(...) +// with an //nolint:errcheck comment to suppress error check linters. +func setControllerReference(owner, owned metav1.Object, scheme *runtime.Scheme) { + //nolint:errcheck + controllerutil.SetControllerReference(owner, owned, scheme) +}