From 4e4f30794a35e01617f21ceea9bd703719ba61a9 Mon Sep 17 00:00:00 2001 From: Ashley Dumaine Date: Wed, 6 Mar 2024 18:14:14 -0500 Subject: [PATCH] even more refactoring! --- api/v1alpha1/linodecluster_types.go | 4 + api/v1alpha1/zz_generated.deepcopy.go | 5 + cloud/scope/cluster.go | 30 ++-- cloud/services/firewalls.go | 139 ++++++++++++++++++ ...cture.cluster.x-k8s.io_linodeclusters.yaml | 47 ++++++ ...uster.x-k8s.io_linodeclustertemplates.yaml | 47 ++++++ config/rbac/role.yaml | 26 ---- controller/linodecluster_controller.go | 82 +++++++---- controller/linodemachine_controller.go | 25 ++++ .../linodemachine_controller_helpers.go | 39 ++++- 10 files changed, 366 insertions(+), 78 deletions(-) diff --git a/api/v1alpha1/linodecluster_types.go b/api/v1alpha1/linodecluster_types.go index 8ce6e56a0..0f84cfcd0 100644 --- a/api/v1alpha1/linodecluster_types.go +++ b/api/v1alpha1/linodecluster_types.go @@ -50,6 +50,10 @@ type LinodeClusterSpec struct { // control plane nodes. // +optional ControlPlaneFirewall FirewallSpec `json:"controlPlaneFirewall,omitempty"` + + // ControlPlaneFirewallRef is a reference to the Firewall for the control plane nodes. + // +optional + ControlPlaneFirewallRef *corev1.ObjectReference `json:"controlPlaneFirewallRef,omitempty"` } // LinodeClusterStatus defines the observed state of LinodeCluster diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 226384169..2358b337c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -198,6 +198,11 @@ func (in *LinodeClusterSpec) DeepCopyInto(out *LinodeClusterSpec) { **out = **in } in.ControlPlaneFirewall.DeepCopyInto(&out.ControlPlaneFirewall) + if in.ControlPlaneFirewallRef != nil { + in, out := &in.ControlPlaneFirewallRef, &out.ControlPlaneFirewallRef + *out = new(v1.ObjectReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeClusterSpec. diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index d6fd4011c..05a1e6b45 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -32,10 +32,9 @@ import ( // ClusterScopeParams defines the input parameters used to create a new Scope. type ClusterScopeParams struct { - Client client.Client - Cluster *clusterv1.Cluster - LinodeCluster *infrav1alpha1.LinodeCluster - ControlPlaneFirewall *infrav1alpha1.LinodeFirewall + Client client.Client + Cluster *clusterv1.Cluster + LinodeCluster *infrav1alpha1.LinodeCluster } func validateClusterScopeParams(params ClusterScopeParams) error { @@ -45,9 +44,6 @@ func validateClusterScopeParams(params ClusterScopeParams) error { if params.LinodeCluster == nil { return errors.New("linodeCluster is required when creating a ClusterScope") } - if params.ControlPlaneFirewall == nil { - return errors.New("controlPlaneFirewall is required when creating a ClusterScope") - } return nil } @@ -75,12 +71,11 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara } return &ClusterScope{ - client: params.Client, - Cluster: params.Cluster, - LinodeClient: linodeClient, - LinodeCluster: params.LinodeCluster, - ControlPlaneFirewall: params.ControlPlaneFirewall, - PatchHelper: helper, + client: params.Client, + Cluster: params.Cluster, + LinodeClient: linodeClient, + LinodeCluster: params.LinodeCluster, + PatchHelper: helper, }, nil } @@ -88,11 +83,10 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara type ClusterScope struct { client client.Client - PatchHelper *patch.Helper - LinodeClient *linodego.Client - Cluster *clusterv1.Cluster - LinodeCluster *infrav1alpha1.LinodeCluster - ControlPlaneFirewall *infrav1alpha1.LinodeFirewall + PatchHelper *patch.Helper + LinodeClient *linodego.Client + Cluster *clusterv1.Cluster + LinodeCluster *infrav1alpha1.LinodeCluster } // PatchObject persists the cluster configuration and status. diff --git a/cloud/services/firewalls.go b/cloud/services/firewalls.go index 6889e0368..b65a284b1 100644 --- a/cloud/services/firewalls.go +++ b/cloud/services/firewalls.go @@ -10,6 +10,7 @@ import ( "github.com/linode/linodego" infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + "github.com/linode/cluster-api-provider-linode/cloud/scope" "github.com/linode/cluster-api-provider-linode/util" ) @@ -311,3 +312,141 @@ func processACL(firewall *infrav1alpha1.LinodeFirewall, tags []string) ( return createOpts, nil } + +// AddNodeToApiServerFW adds a Node's IPs to the given Cloud Firewall's inbound rules +func AddNodeToApiServerFW( + ctx context.Context, + logger logr.Logger, + machineScope *scope.MachineScope, + firewall *infrav1alpha1.LinodeFirewall, +) error { + if firewall.Spec.FirewallID == nil { + err := errors.New("no firewall ID") + logger.Error(err, "no ID is set for the firewall") + + return err + } + + ipv4s, ipv6s, err := getInstanceIPs(ctx, machineScope.LinodeClient, machineScope.LinodeMachine.Spec.InstanceID) + if err != nil { + logger.Error(err, "Failed get instance IP addresses") + + return err + } + + // get the rules and append a new rule for this Node to access the api server + newRule := infrav1alpha1.FirewallRule{ + Action: "ACCEPT", + Label: "api-server", + Description: "Rule created by CAPL", + Ports: fmt.Sprint(machineScope.LinodeCluster.Spec.ControlPlaneEndpoint.Port), + Protocol: linodego.TCP, + Addresses: &infrav1alpha1.NetworkAddresses{ + IPv4: util.Pointer(ipv4s), + IPv6: util.Pointer(ipv6s), + }, + } + // update the inbound rules + firewall.Spec.InboundRules = append(firewall.Spec.InboundRules, newRule) + + // reprocess the firewall to make sure we won't exceed the IP and rule limit + clusterUID := firewall.Spec.ClusterUID + fwConfig, err := processACL(firewall, []string{clusterUID}) + if err != nil { + logger.Info("Failed to process ACL", "error", err.Error()) + + return err + } + + // finally, update the firewall + if _, err := machineScope.LinodeClient.UpdateFirewallRules(ctx, *firewall.Spec.FirewallID, fwConfig.Rules); err != nil { + logger.Info("Failed to update firewall", "error", err.Error()) + + return err + } + + return nil +} + +// DeleteNodeFromApiServerFW removes Node from the given Cloud Firewall's inbound rules +func DeleteNodeFromApiServerFW( + ctx context.Context, + logger logr.Logger, + machineScope *scope.MachineScope, + firewall *infrav1alpha1.LinodeFirewall, +) error { + if firewall.Spec.FirewallID == nil { + logger.Info("Firewall already deleted, no Firewall address to remove") + + return nil + } + + if machineScope.LinodeMachine.Spec.InstanceID == nil { + return errors.New("no InstanceID") + } + + ipv4s, ipv6s, err := getInstanceIPs(ctx, machineScope.LinodeClient, machineScope.LinodeMachine.Spec.InstanceID) + if err != nil { + logger.Error(err, "Failed get instance IP addresses") + + return err + } + + for _, rule := range firewall.Spec.InboundRules { + rule.Addresses.IPv4 = util.Pointer(setDiff(*rule.Addresses.IPv4, ipv4s)) + rule.Addresses.IPv6 = util.Pointer(setDiff(*rule.Addresses.IPv6, ipv6s)) + } + + // reprocess the firewall + clusterUID := firewall.Spec.ClusterUID + fwConfig, err := processACL(firewall, []string{clusterUID}) + if err != nil { + logger.Info("Failed to process ACL", "error", err.Error()) + + return err + } + + // finally, update the firewall + if _, err := machineScope.LinodeClient.UpdateFirewallRules(ctx, *firewall.Spec.FirewallID, fwConfig.Rules); err != nil { + logger.Info("Failed to update firewall", "error", err.Error()) + + return err + } + + return nil +} + +func getInstanceIPs(ctx context.Context, client *linodego.Client, instanceID *int) (ipv4s, ipv6s []string, err error) { + addresses, err := client.GetInstanceIPAddresses(ctx, *instanceID) + if err != nil { + return ipv4s, ipv6s, err + } + + // get all the ipv4 addresses for the node + for _, addr := range addresses.IPv4.Private { + ipv4s = append(ipv4s, addr.Address) + } + for _, addr := range addresses.IPv4.Public { + ipv4s = append(ipv4s, addr.Address) + } + + // get all the ipv6 addresses for the node + ipv6s = []string{addresses.IPv6.SLAAC.Address, addresses.IPv6.LinkLocal.Address} + + return ipv4s, ipv6s, nil +} + +// setDiff: A - B +func setDiff(a, b []string) (diff []string) { + m := make(map[string]bool) + for _, item := range b { + m[item] = true + } + for _, item := range a { + if _, ok := m[item]; !ok { + diff = append(diff, item) + } + } + + return diff +} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml index e6b0eb238..2c0a7464c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml @@ -88,6 +88,8 @@ spec: description: |- AllowedIPV4Addresses specifies additional IPV4 addresses aside from the worker nodes that should be permitted to reach the K8s API server + Per the Linode API: + Must contain only valid IPv4 addresses or networks (both must be in ip/mask format) items: type: string type: array @@ -109,6 +111,51 @@ spec: description: FirewallID is the ID of the Cloud Firewall. type: integer type: object + controlPlaneFirewallRef: + description: ControlPlaneFirewallRef is a reference to the Firewall + for the control plane nodes. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic credentialsRef: description: |- CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml index 74f97d1c6..1cde9ad7b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml @@ -82,6 +82,8 @@ spec: description: |- AllowedIPV4Addresses specifies additional IPV4 addresses aside from the worker nodes that should be permitted to reach the K8s API server + Per the Linode API: + Must contain only valid IPv4 addresses or networks (both must be in ip/mask format) items: type: string type: array @@ -103,6 +105,51 @@ spec: description: FirewallID is the ID of the Cloud Firewall. type: integer type: object + controlPlaneFirewallRef: + description: ControlPlaneFirewallRef is a reference to the + Firewall for the control plane nodes. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic credentialsRef: description: |- CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 21520bc6e..ce62e56e6 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -68,32 +68,6 @@ rules: - get - patch - update -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - linodefirewalls - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - linodefirewalls/finalizers - verbs: - - update -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - linodefirewalls/status - verbs: - - get - - patch - - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index ec1d97a41..0414dacca 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -94,20 +94,14 @@ func (r *LinodeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - controlPlaneFW := &infrav1alpha1.LinodeFirewall{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-api-server", linodeCluster.Name), - }, - } // Create the cluster scope. clusterScope, err := scope.NewClusterScope( ctx, r.LinodeApiKey, scope.ClusterScopeParams{ - Client: r.Client, - Cluster: cluster, - LinodeCluster: linodeCluster, - ControlPlaneFirewall: controlPlaneFW, + Client: r.Client, + Cluster: cluster, + LinodeCluster: linodeCluster, }) if err != nil { logger.Info("Failed to create cluster scope", "error", err.Error()) @@ -166,8 +160,9 @@ func (r *LinodeClusterReconciler) reconcile( return res, nil } -func createControlPlaneFirewallSpec(linodeCluster *infrav1alpha1.LinodeCluster) *infrav1alpha1.LinodeFirewallSpec { - // TODO: get node IPs and append +func (r *LinodeClusterReconciler) createControlPlaneFirewallSpec( + linodeCluster *infrav1alpha1.LinodeCluster, +) *infrav1alpha1.LinodeFirewallSpec { // Per the Linode API: // Must contain only valid IPv4 addresses or networks (both must be in ip/mask format) apiServerIPV4 := append( @@ -214,46 +209,62 @@ func createControlPlaneFirewallSpec(linodeCluster *infrav1alpha1.LinodeCluster) } } -func setFailureReason(clusterScope *scope.ClusterScope, failureReason cerrs.ClusterStatusError, err error, lcr *LinodeClusterReconciler) { +func (r *LinodeClusterReconciler) setFailureReason(clusterScope *scope.ClusterScope, failureReason cerrs.ClusterStatusError, err error) { clusterScope.LinodeCluster.Status.FailureReason = util.Pointer(failureReason) clusterScope.LinodeCluster.Status.FailureMessage = util.Pointer(err.Error()) conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, string(failureReason), clusterv1.ConditionSeverityError, "%s", err.Error()) - lcr.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, string(failureReason), err.Error()) + r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, string(failureReason), err.Error()) } func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope) error { + // handle NodeBalancer linodeNB, err := services.CreateNodeBalancer(ctx, clusterScope, logger) if err != nil { - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.CreateClusterError, err) return err } - clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = linodeNB.ID - linodeNBConfig, err := services.CreateNodeBalancerConfig(ctx, clusterScope, logger) if err != nil { - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.CreateClusterError, err) return err } - clusterScope.LinodeCluster.Spec.Network.NodeBalancerConfigID = util.Pointer(linodeNBConfig.ID) + // Set the control plane endpoint with the new Nodebalancer host and port clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: *linodeNB.IPv4, Port: int32(linodeNBConfig.Port), } - // build out the control plane firewall rules - clusterScope.ControlPlaneFirewall.Spec = *createControlPlaneFirewallSpec(clusterScope.LinodeCluster) + // build out the control plane Firewall rules + controlPlaneFW := &infrav1alpha1.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-api-server", clusterScope.LinodeCluster.Name), + Namespace: clusterScope.LinodeCluster.Namespace, + }, + Spec: *r.createControlPlaneFirewallSpec(clusterScope.LinodeCluster), + } + + // Handle the Firewall + if err := r.Client.Create(ctx, controlPlaneFW); err != nil { + r.setFailureReason(clusterScope, cerrs.CreateClusterError, err) - // Handle firewalls - firewall, err := services.HandleFirewall(ctx, clusterScope.ControlPlaneFirewall, clusterScope.LinodeClient, logger) + return err + } + clusterScope.LinodeCluster.Spec.ControlPlaneFirewallRef = &corev1.ObjectReference{ + Kind: controlPlaneFW.Kind, + Namespace: controlPlaneFW.Namespace, + Name: controlPlaneFW.Name, + } + // NOTE: if we add a reconciler later on don't call this as the reconciler will take care of it + firewall, err := services.HandleFirewall(ctx, controlPlaneFW, clusterScope.LinodeClient, logger) if err != nil { - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.CreateClusterError, err) return err } @@ -262,17 +273,28 @@ func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger lo return nil } + func (r *LinodeClusterReconciler) reconcileUpdate( ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope, ) error { - // build out the control plane firewall rules - clusterScope.ControlPlaneFirewall.Spec = *createControlPlaneFirewallSpec(clusterScope.LinodeCluster) + // Update the Firewall if necessary + controlPlaneFW := &infrav1alpha1.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-api-server", clusterScope.LinodeCluster.Name), + Namespace: clusterScope.LinodeCluster.Namespace, + }, + Spec: *r.createControlPlaneFirewallSpec(clusterScope.LinodeCluster), + } + + if err := r.Client.Update(ctx, controlPlaneFW); err != nil { + r.setFailureReason(clusterScope, cerrs.UpdateClusterError, err) - // Handle firewalls - if _, err := services.HandleFirewall(ctx, clusterScope.ControlPlaneFirewall, clusterScope.LinodeClient, logger); err != nil { - setFailureReason(clusterScope, cerrs.UpdateClusterError, err, r) + return err + } + if _, err := services.HandleFirewall(ctx, controlPlaneFW, clusterScope.LinodeClient, logger); err != nil { + r.setFailureReason(clusterScope, cerrs.UpdateClusterError, err) return err } @@ -292,7 +314,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo // Not found is not an error apiErr := linodego.Error{} if errors.As(err, &apiErr) && apiErr.Code != http.StatusNotFound { - setFailureReason(clusterScope, cerrs.DeleteClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.DeleteClusterError, err) return err } @@ -312,7 +334,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo // Not found is not an error apiErr := linodego.Error{} if errors.As(err, &apiErr) && apiErr.Code != http.StatusNotFound { - setFailureReason(clusterScope, cerrs.DeleteClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.DeleteClusterError, err) return err } diff --git a/controller/linodemachine_controller.go b/controller/linodemachine_controller.go index bff70b4de..5bef25d10 100644 --- a/controller/linodemachine_controller.go +++ b/controller/linodemachine_controller.go @@ -281,6 +281,7 @@ func (r *LinodeMachineReconciler) reconcile( return } +//nolint:gocyclo,cyclop // As simple as possible. func (r *LinodeMachineReconciler) reconcileCreate( ctx context.Context, logger logr.Logger, @@ -380,6 +381,18 @@ func (r *LinodeMachineReconciler) reconcileCreate( return linodeInstance, err } + linodeFW, err := r.getFirewall(ctx, machineScope) + if err != nil { + logger.Error(err, "Failed to fetch LinodeFirewall") + + return linodeInstance, err + } + if err = services.AddNodeToApiServerFW(ctx, logger, machineScope, linodeFW); err != nil { + logger.Error(err, "Failed to add instance to Firewall") + + return linodeInstance, err + } + return linodeInstance, nil } @@ -514,6 +527,18 @@ func (r *LinodeMachineReconciler) reconcileDelete( return err } + linodeFW, err := r.getFirewall(ctx, machineScope) + if err != nil { + logger.Error(err, "Failed to fetch LinodeFirewall") + + return err + } + if err := services.DeleteNodeFromApiServerFW(ctx, logger, machineScope, linodeFW); err != nil { + logger.Error(err, "Failed to remove node from Firewall") + + return err + } + if err := machineScope.LinodeClient.DeleteInstance(ctx, *machineScope.LinodeMachine.Spec.InstanceID); err != nil { if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { logger.Error(err, "Failed to delete Linode machine instance") diff --git a/controller/linodemachine_controller_helpers.go b/controller/linodemachine_controller_helpers.go index 51705428c..6bb565118 100644 --- a/controller/linodemachine_controller_helpers.go +++ b/controller/linodemachine_controller_helpers.go @@ -46,7 +46,12 @@ import ( // The decoded user_data must not exceed 16384 bytes per the Linode API const maxBootstrapDataBytes = 16384 -func (*LinodeMachineReconciler) newCreateConfig(ctx context.Context, machineScope *scope.MachineScope, tags []string, logger logr.Logger) (*linodego.InstanceCreateOptions, error) { +func (*LinodeMachineReconciler) newCreateConfig( + ctx context.Context, + machineScope *scope.MachineScope, + tags []string, + logger logr.Logger, +) (*linodego.InstanceCreateOptions, error) { var err error createConfig := linodeMachineSpecToInstanceCreateConfig(machineScope.LinodeMachine.Spec) @@ -58,13 +63,17 @@ func (*LinodeMachineReconciler) newCreateConfig(ctx context.Context, machineScop return nil, err } + // Do not boot the linode until extra configuration is done createConfig.Booted = util.Pointer(false) createConfig.PrivateIP = true - if kutil.IsControlPlaneMachine(machineScope.Machine) && - machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID != nil { - createConfig.FirewallID = *machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID + if machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID != nil { + // If this is a control plane machine, set it to be protected by the + // control plane Cloud Firewall + if kutil.IsControlPlaneMachine(machineScope.Machine) { + createConfig.FirewallID = *machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID + } } bootstrapData, err := machineScope.GetBootstrapData(ctx) @@ -292,3 +301,25 @@ func linodeMachineSpecToInstanceCreateConfig(machineSpec infrav1alpha1.LinodeMac return &createConfig } + +func (r *LinodeMachineReconciler) getFirewall( + ctx context.Context, + machineScope *scope.MachineScope, +) (*infrav1alpha1.LinodeFirewall, error) { + name := machineScope.LinodeCluster.Spec.ControlPlaneFirewallRef.Name + namespace := machineScope.LinodeCluster.Spec.ControlPlaneFirewallRef.Namespace + if namespace == "" { + namespace = machineScope.LinodeCluster.Namespace + } + linodeFW := &infrav1alpha1.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + if err := r.Get(ctx, client.ObjectKeyFromObject(linodeFW), linodeFW); err != nil { + return nil, err + } + + return linodeFW, nil +}