From e10305cad9c0afe4d5bb400af5861ea070e7e671 Mon Sep 17 00:00:00 2001 From: Evan Johnson Date: Wed, 24 Jan 2024 17:50:09 -0500 Subject: [PATCH] add initial LinodeCluster controller logic --- api/v1alpha1/linodecluster_types.go | 17 +- api/v1alpha1/zz_generated.deepcopy.go | 2 +- cloud/services/loadbalancers.go | 137 ++++++++++++ cmd/main.go | 15 +- ...cture.cluster.x-k8s.io_linodeclusters.yaml | 17 +- config/crd/kustomization.yaml | 5 + controller/linodecluster_controller.go | 209 ++++++++++++++++-- .../linodecluster_controller_helpers.go | 83 +++++++ 8 files changed, 454 insertions(+), 31 deletions(-) create mode 100644 cloud/services/loadbalancers.go create mode 100644 controller/linodecluster_controller_helpers.go diff --git a/api/v1alpha1/linodecluster_types.go b/api/v1alpha1/linodecluster_types.go index 7b9595e4b..aa70c9dc8 100644 --- a/api/v1alpha1/linodecluster_types.go +++ b/api/v1alpha1/linodecluster_types.go @@ -47,13 +47,13 @@ type LinodeClusterStatus struct { // reconciling the Machine and will contain a succinct value suitable // for machine interpretation. // +optional - FailureReason *errors.MachineStatusError `json:"failureReason"` + FailureReason *errors.ClusterStatusError `json:"failureReason,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable // for logging and human consumption. // +optional - FailureMessage *string `json:"failureMessage"` + FailureMessage *string `json:"failureMessage,omitempty"` // Conditions defines current service state of the LinodeCluster. // +optional @@ -85,9 +85,18 @@ func (lm *LinodeCluster) SetConditions(conditions clusterv1.Conditions) { // NetworkSpec encapsulates Linode networking resources. type NetworkSpec struct { - // NodebalancerID is the id of apiserver Nodebalancer. + // LoadBalancerType is the type of load balancer to use, defaults to NodeBalancer if not otherwise set + // +kubebuilder:validation:Enum=NodeBalancer // +optional - NodebalancerID int `json:"nodebalancerID,omitempty"` + LoadBalancerType string `json:"loadBalancerType,omitempty"` + // LoadBalancerPort used by the api server. It must be valid ports range (1-65535). If omitted, default value is 6443. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + LoadBalancerPort int `json:"loadBalancerPort,omitempty"` + // NodeBalancerID is the id of api server NodeBalancer. + // +optional + NodeBalancerID int `json:"nodeBalancerID,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 29963df12..2f7eda4c1 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -153,7 +153,7 @@ func (in *LinodeClusterStatus) DeepCopyInto(out *LinodeClusterStatus) { *out = *in if in.FailureReason != nil { in, out := &in.FailureReason, &out.FailureReason - *out = new(errors.MachineStatusError) + *out = new(errors.ClusterStatusError) **out = **in } if in.FailureMessage != nil { diff --git a/cloud/services/loadbalancers.go b/cloud/services/loadbalancers.go new file mode 100644 index 000000000..db9eeb5f2 --- /dev/null +++ b/cloud/services/loadbalancers.go @@ -0,0 +1,137 @@ +package services + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "github.com/go-logr/logr" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/util" + "github.com/linode/linodego" + "net/http" + "strconv" + "strings" +) + +var ( + defaultLBPort = 6443 +) + +// CreateNodeBalancer creates a new NodeBalancer if one doesn't exist +func CreateNodeBalancer(ctx context.Context, clusterScope *scope.ClusterScope, logger logr.Logger) (*linodego.NodeBalancer, error) { + var linodeNBs []linodego.NodeBalancer + var linodeNB *linodego.NodeBalancer + + tags := []string{string(clusterScope.LinodeCluster.UID)} + filter := map[string]string{ + "tags": strings.Join(tags, ","), + } + + rawFilter, err := json.Marshal(filter) + if err != nil { + + return nil, err + } + logger.Info("Creating NodeBalancer") + if linodeNBs, err = clusterScope.LinodeClient.ListNodeBalancers(ctx, linodego.NewListOptions(1, string(rawFilter))); err != nil { + logger.Info("Failed to list NodeBalancers", "error", err.Error()) + + return nil, err + } + + switch len(linodeNBs) { + case 1: + logger.Info(fmt.Sprintf("NodeBalancer %s already exists", *linodeNBs[0].Label)) + + linodeNB = &linodeNBs[0] + case 0: + logger.Info(fmt.Sprintf("Creating NodeBalancer %s-api-server", clusterScope.LinodeCluster.Name)) + createConfig := linodego.NodeBalancerCreateOptions{ + Label: util.Pointer(fmt.Sprintf("%s-api-server", clusterScope.LinodeCluster.Name)), + Region: clusterScope.LinodeCluster.Spec.Region, + ClientConnThrottle: nil, + Tags: tags, + } + + if linodeNB, err = clusterScope.LinodeClient.CreateNodeBalancer(ctx, createConfig); err != nil { + logger.Info("Failed to create Linode NodeBalancer", "error", err.Error()) + + // Already exists is not an error + apiErr := linodego.Error{} + if errors.As(err, &apiErr) && apiErr.Code != http.StatusFound { + return nil, err + } + + err = nil + + if linodeNB != nil { + logger.Info("Linode NodeBalancer already exists", "existing", linodeNB.Label) + } + } + + default: + err = errors.New("multiple NodeBalancers") + + logger.Error(err, "Panic! Multiple NodeBalancers found. This might be a concurrency issue in the controller!!!", "filters", string(rawFilter)) + + return nil, err + } + + if linodeNB == nil { + err = errors.New("missing NodeBalancer") + + logger.Error(err, "Panic! Failed to create NodeBalancer") + + return nil, err + } + + return linodeNB, nil +} + +// CreateNodeBalancerConfig creates NodeBalancer config if it does not exist +func CreateNodeBalancerConfig(ctx context.Context, clusterScope *scope.ClusterScope, logger logr.Logger) (*linodego.NodeBalancerConfig, error) { + + var linodeNBConfigs []linodego.NodeBalancerConfig + var linodeNBConfig *linodego.NodeBalancerConfig + var err error + + if linodeNBConfigs, err = clusterScope.LinodeClient.ListNodeBalancerConfigs(ctx, clusterScope.LinodeCluster.Spec.Network.NodeBalancerID, linodego.NewListOptions(1, "")); err != nil { + logger.Info("Failed to list NodeBalancer Configs", "error", err.Error()) + + return nil, err + } + lbPort := defaultLBPort + if clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort != 0 { + lbPort = clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort + } + switch len(linodeNBConfigs) { + case 1: + logger.Info("NodeBalancer ", strconv.Itoa(linodeNBConfigs[0].ID), " already exists") + linodeNBConfig = &linodeNBConfigs[0] + + case 0: + createConfig := linodego.NodeBalancerConfigCreateOptions{ + Port: lbPort, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + } + + if linodeNBConfig, err = clusterScope.LinodeClient.CreateNodeBalancerConfig(ctx, clusterScope.LinodeCluster.Spec.Network.NodeBalancerID, createConfig); err != nil { + logger.Info("Failed to create Linode NodeBalancer config", "error", err.Error()) + + return nil, err + + } + + default: + err = errors.New("multiple NodeBalancer Configs") + + logger.Error(err, "Panic! Multiple NodeBalancer Configs found. This might be a concurrency issue in the controller!!!") + + return nil, err + } + + return linodeNBConfig, nil +} diff --git a/cmd/main.go b/cmd/main.go index a5357b8e1..720857027 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -39,7 +39,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" infrastructurev1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) var ( @@ -51,7 +51,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(capi.AddToScheme(scheme)) utilruntime.Must(infrastructurev1alpha1.AddToScheme(scheme)) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme } func main() { @@ -62,10 +62,12 @@ func main() { } var machineWatchFilter string + var clusterWatchFilter string var metricsAddr string var enableLeaderElection bool var probeAddr string flag.StringVar(&machineWatchFilter, "machine-watch-filter", "", "The machines to watch by label.") + flag.StringVar(&clusterWatchFilter, "cluster-watch-filter", "", "The clusters to watch by label.") flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, @@ -103,8 +105,11 @@ func main() { } if err = (&controller2.LinodeClusterReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("LinodeClusterReconciler"), + WatchFilterValue: clusterWatchFilter, + LinodeApiKey: linodeToken, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "LinodeCluster") os.Exit(1) @@ -119,7 +124,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "LinodeMachine") os.Exit(1) } - //+kubebuilder:scaffold:builder + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml index 706c0ceb3..ba92feda4 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml @@ -68,8 +68,21 @@ spec: description: NetworkSpec encapsulates all things related to Linode network. properties: - nodebalancerID: - description: NodebalancerID is the id of apiserver Nodebalancer. + loadBalancerPort: + description: LoadBalancerPort used by the api server. It must + be valid ports range (1-65535). If omitted, default value is + 6443. + maximum: 65535 + minimum: 1 + type: integer + loadBalancerType: + description: LoadBalancerType is the type of load balancer to + use, defaults to NodeBalancer if not otherwise set + enum: + - NodeBalancer + type: string + nodeBalancerID: + description: NodeBalancerID is the id of api server NodeBalancer. type: integer type: object region: diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 78afaaaa9..878da1a56 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,3 +1,8 @@ +# common labels for CRD resources as required by +# https://cluster-api.sigs.k8s.io/developer/providers/contracts.html#api-version-labels +commonLabels: + cluster.x-k8s.io/v1beta1: v1alpha1 + # This kustomization.yaml is not intended to be run by itself, # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index 490ad8081..83e612742 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -18,45 +18,216 @@ package controller import ( "context" + "errors" + "fmt" + "github.com/go-logr/logr" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/cloud/services" + "github.com/linode/cluster-api-provider-linode/util" + "github.com/linode/cluster-api-provider-linode/util/reconciler" + "github.com/linode/linodego" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + "net/http" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + cerrs "sigs.k8s.io/cluster-api/errors" + kutil "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/predicates" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + "time" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - infrastructurev1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + infrav1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" ) // LinodeClusterReconciler reconciles a LinodeCluster object type LinodeClusterReconciler struct { client.Client - Scheme *runtime.Scheme + Recorder record.EventRecorder + LinodeApiKey string + WatchFilterValue string + Scheme *runtime.Scheme + ReconcileTimeout time.Duration } -//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeclusters,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeclusters/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeclusters/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the LinodeCluster object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.16.0/pkg/reconcile + func (r *LinodeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) + defer cancel() + + logger := ctrl.LoggerFrom(ctx).WithName("LinodeClusterReconciler").WithValues("name", req.NamespacedName.String()) + LinodeCluster := &infrav1.LinodeCluster{} + if err := r.Client.Get(ctx, req.NamespacedName, LinodeCluster); err != nil { + logger.Info("Failed to fetch Linode cluster", "error", err.Error()) + + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + cluster, err := kutil.GetOwnerCluster(ctx, r.Client, LinodeCluster.ObjectMeta) + if err != nil { + logger.Info("Failed to get owner cluster", "error", err.Error()) + + return ctrl.Result{}, client.IgnoreNotFound(err) + } else if cluster == nil { + logger.Info("Machine Controller has not yet set OwnerRef, skipping reconciliation") + + return ctrl.Result{}, nil + } + if annotations.IsPaused(cluster, LinodeCluster) { + logger.Info("LinodeCluster of linked Cluster is marked as paused. Won't reconcile") - // TODO(user): your logic here + return ctrl.Result{}, nil + } + // Create the cluster scope. + clusterScope, err := scope.NewClusterScope( + r.LinodeApiKey, + scope.ClusterScopeParams{ + Client: r.Client, + Cluster: cluster, + LinodeCluster: LinodeCluster, + }) + if err != nil { + logger.Info("Failed to create cluster scope", "error", err.Error()) - return ctrl.Result{}, nil + return ctrl.Result{}, fmt.Errorf("failed to create cluster scope: %w", err) + } + + return r.reconcile(ctx, clusterScope, logger) +} + +func (r *LinodeClusterReconciler) reconcile( + ctx context.Context, + clusterScope *scope.ClusterScope, + logger logr.Logger, +) (res ctrl.Result, err error) { + res = ctrl.Result{} + + clusterScope.LinodeCluster.Status.Ready = false + clusterScope.LinodeCluster.Status.FailureReason = nil + clusterScope.LinodeCluster.Status.FailureMessage = util.Pointer("") + + failureReason := cerrs.ClusterStatusError("UnknownError") + + defer func() { + if err != nil { + clusterScope.LinodeCluster.Status.FailureReason = util.Pointer(failureReason) + clusterScope.LinodeCluster.Status.FailureMessage = util.Pointer(err.Error()) + + conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, string(failureReason), clusterv1.ConditionSeverityError, "%s", err.Error()) + + r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, string(failureReason), err.Error()) + } + + if patchErr := clusterScope.PatchHelper.Patch(ctx, clusterScope.LinodeCluster); patchErr != nil && client.IgnoreNotFound(patchErr) != nil { + logger.Error(patchErr, "failed to patch LinodeCluster") + + err = errors.Join(err, patchErr) + } + }() + + // Delete + if !clusterScope.LinodeCluster.ObjectMeta.DeletionTimestamp.IsZero() { + failureReason = cerrs.DeleteClusterError + + err = r.reconcileDelete(ctx, logger, clusterScope) + + return + } + + controllerutil.AddFinalizer(clusterScope.LinodeCluster, infrav1.GroupVersion.String()) + // Create + if clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint.Host == "" { + failureReason = cerrs.CreateClusterError + + _, err = r.reconcileCreate(ctx, clusterScope, logger) + } + + clusterScope.LinodeCluster.Status.Ready = true + conditions.MarkTrue(clusterScope.LinodeCluster, clusterv1.ReadyCondition) + + r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeNormal, string(clusterv1.ReadyCondition), "instance is running") + + return +} + +func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, clusterScope *scope.ClusterScope, logger logr.Logger) (*linodego.NodeBalancer, error) { + + linodeNB, err := services.CreateNodeBalancer(ctx, clusterScope, logger) + if err != nil { + return nil, err + } + + clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = linodeNB.ID + + linodeNBConfig, err := services.CreateNodeBalancerConfig(ctx, clusterScope, logger) + if err != nil { + return nil, err + } + + clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: *linodeNB.IPv4, + Port: int32(linodeNBConfig.Port), + } + + return linodeNB, nil +} + +func (*LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope) error { + logger.Info("deleting cluster") + + if clusterScope.LinodeCluster.Spec.Network.NodeBalancerID != 0 { + if err := clusterScope.LinodeClient.DeleteNodeBalancer(ctx, clusterScope.LinodeCluster.Spec.Network.NodeBalancerID); err != nil { + logger.Info("Failed to delete Linode NodeBalancer", "error", err.Error()) + + // Not found is not an error + apiErr := linodego.Error{} + if errors.As(err, &apiErr) && apiErr.Code != http.StatusNotFound { + conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "Load balancer deleted") + clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = 0 + controllerutil.RemoveFinalizer(clusterScope.LinodeCluster, infrav1.GroupVersion.String()) + + return err + } + } + } else { + logger.Info("NodeBalancer ID is missing, nothing to do") + } + + conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "Load balancer deleted") + + clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = 0 + controllerutil.RemoveFinalizer(clusterScope.LinodeCluster, infrav1.GroupVersion.String()) + + return nil } // SetupWithManager sets up the controller with the Manager. func (r *LinodeClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&infrastructurev1alpha1.LinodeCluster{}). - Complete(r) + controller, err := ctrl.NewControllerManagedBy(mgr). + For(&infrav1.LinodeCluster{}). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetLogger(), r.WatchFilterValue)). + Build(r) + if err != nil { + + return fmt.Errorf("failed to build controller: %w", err) + } + + return controller.Watch( + source.Kind(mgr.GetCache(), &clusterv1.Cluster{}), + handler.EnqueueRequestsFromMapFunc(r.requeueLinodeClustersForUnpausedCluster(mgr.GetLogger())), + predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetLogger()), + ) } diff --git a/controller/linodecluster_controller_helpers.go b/controller/linodecluster_controller_helpers.go new file mode 100644 index 000000000..bdfbf30c5 --- /dev/null +++ b/controller/linodecluster_controller_helpers.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 Akamai Technologies, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "github.com/go-logr/logr" + "github.com/linode/cluster-api-provider-linode/util/reconciler" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +func (r *LinodeClusterReconciler) requestsForCluster(ctx context.Context, namespace, name string) ([]ctrl.Request, error) { + labels := map[string]string{clusterv1.ClusterNameLabel: name} + + machineList := clusterv1.MachineList{} + if err := r.Client.List(ctx, &machineList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil { + return nil, err + } + + result := make([]ctrl.Request, 0, len(machineList.Items)) + for _, item := range machineList.Items { + if item.Spec.InfrastructureRef.GroupVersionKind().Kind != "LinodeCluster" || item.Spec.InfrastructureRef.Name == "" { + continue + } + + result = append(result, ctrl.Request{ + NamespacedName: client.ObjectKey{ + Namespace: item.Namespace, + Name: item.Spec.InfrastructureRef.Name, + }, + }) + } + + return result, nil +} + +func (r *LinodeClusterReconciler) requeueLinodeClustersForUnpausedCluster(logger logr.Logger) handler.MapFunc { + logger = logger.WithName("LinodeClusterReconciler").WithName("requeueLinodeClustersForUnpausedCluster") + + return func(ctx context.Context, o client.Object) []ctrl.Request { + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) + defer cancel() + + cluster, ok := o.(*clusterv1.Cluster) + if !ok { + logger.Info("Failed to cast object to Cluster") + + return nil + } + + if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { + logger.Info("Cluster has a deletion timestamp, skipping mapping") + + return nil + } + + request, err := r.requestsForCluster(ctx, cluster.Namespace, cluster.Name) + if err != nil { + logger.Info("Failed to create request for cluster", "error", err.Error()) + + return nil + } + + return request + } +}