From ead385c309e2c708fe11120a5492ca1d92f7e4dd Mon Sep 17 00:00:00 2001 From: Ashley Dumaine Date: Fri, 1 Mar 2024 14:29:27 -0500 Subject: [PATCH 1/5] start adding FW controller --- PROJECT | 9 + api/v1alpha1/linodecluster_types.go | 10 + api/v1alpha1/linodefirewall_types.go | 175 +++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 200 ++++++++++++ cloud/scope/firewall.go | 102 ++++++ cloud/services/firewalls.go | 296 ++++++++++++++++++ cmd/main.go | 21 +- ...cture.cluster.x-k8s.io_linodeclusters.yaml | 132 ++++++++ ...uster.x-k8s.io_linodeclustertemplates.yaml | 132 ++++++++ ...ture.cluster.x-k8s.io_linodefirewalls.yaml | 238 ++++++++++++++ config/crd/kustomization.yaml | 3 + .../cainjection_in_linodefirewalls.yaml | 7 + .../patches/webhook_in_linodefirewalls.yaml | 16 + config/rbac/linodefirewall_editor_role.yaml | 31 ++ config/rbac/linodefirewall_viewer_role.yaml | 27 ++ config/rbac/role.yaml | 26 ++ ...nfrastructure_v1alpha1_linodefirewall.yaml | 12 + config/samples/kustomization.yaml | 1 + controller/linodefirewall_controller.go | 244 +++++++++++++++ docs/src/SUMMARY.md | 1 + docs/src/topics/firewalls.md | 73 +++++ 21 files changed, 1751 insertions(+), 5 deletions(-) create mode 100644 api/v1alpha1/linodefirewall_types.go create mode 100644 cloud/scope/firewall.go create mode 100644 cloud/services/firewalls.go create mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml create mode 100644 config/crd/patches/cainjection_in_linodefirewalls.yaml create mode 100644 config/crd/patches/webhook_in_linodefirewalls.yaml create mode 100644 config/rbac/linodefirewall_editor_role.yaml create mode 100644 config/rbac/linodefirewall_viewer_role.yaml create mode 100644 config/samples/infrastructure_v1alpha1_linodefirewall.yaml create mode 100644 controller/linodefirewall_controller.go create mode 100644 docs/src/topics/firewalls.md diff --git a/PROJECT b/PROJECT index 320515e01..d1c01fa43 100644 --- a/PROJECT +++ b/PROJECT @@ -51,4 +51,13 @@ resources: kind: LinodeObjectStorageBucket path: github.com/linode/cluster-api-provider-linode/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: infrastructure + kind: LinodeFirewall + path: github.com/linode/cluster-api-provider-linode/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/api/v1alpha1/linodecluster_types.go b/api/v1alpha1/linodecluster_types.go index a5424cbec..a476aa4c5 100644 --- a/api/v1alpha1/linodecluster_types.go +++ b/api/v1alpha1/linodecluster_types.go @@ -45,6 +45,16 @@ type LinodeClusterSpec struct { // supplied then the credentials of the controller will be used. // +optional CredentialsRef *corev1.SecretReference `json:"credentialsRef,omitempty"` + + // +optional + // ControlPlaneFirewallRefs contains a list of LinodeFirewall references to restrict traffic + // to/from the control plane nodes + ControlPlaneFirewallRefs []*corev1.ObjectReference `json:"controlPlaneFirewallRefs,omitempty"` + + // +optional + // WorkerFirewallRefs contains a list of LinodeFirewall references to restrict traffic + // to/from the worker nodes + WorkerFirewallRefs []*corev1.ObjectReference `json:"workerFirewallRefs,omitempty"` } // LinodeClusterStatus defines the observed state of LinodeCluster diff --git a/api/v1alpha1/linodefirewall_types.go b/api/v1alpha1/linodefirewall_types.go new file mode 100644 index 000000000..edd9c5b27 --- /dev/null +++ b/api/v1alpha1/linodefirewall_types.go @@ -0,0 +1,175 @@ +/* +Copyright 2023 Akamai Technologies, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/linode/linodego" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// LinodeFirewallSpec defines the desired state of LinodeFirewall +type LinodeFirewallSpec struct { + // +optional + FirewallID *int `json:"firewallID,omitempty"` + + // +optional + // +kubebuilder:default=false + Enabled bool `json:"enabled,omitempty"` + + // +kubebuilder:validation:MinLength=3 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + Label string `json:"label,omitempty"` + + // +optional + InboundRules []FirewallRule `json:"inboundRules,omitempty"` + + // +kubebuilder:validation:Enum=ACCEPT;DROP + // +kubebuilder:default=ACCEPT + // +optional + InboundPolicy string `json:"inboundPolicy,omitempty"` + + // +optional + OutboundRules []FirewallRule `json:"outboundRules,omitempty"` + + // +kubebuilder:validation:Enum=ACCEPT;DROP + // +kubebuilder:default=ACCEPT + // +optional + OutboundPolicy string `json:"outboundPolicy,omitempty"` +} + +type FirewallRule struct { + Action string `json:"action"` + Label string `json:"label"` + Description string `json:"description,omitempty"` + Ports string `json:"ports,omitempty"` + Protocol linodego.NetworkProtocol `json:"protocol"` + Addresses *NetworkAddresses `json:"addresses"` +} + +// NetworkAddresses holds a list of IPv4 and IPv6 addresses +// We don't use linodego here since kubebuilder can't generate DeepCopyInto +// for linodego.NetworkAddresses +type NetworkAddresses struct { + IPv4 *[]string `json:"ipv4,omitempty"` + IPv6 *[]string `json:"ipv6,omitempty"` +} + +// LinodeFirewallStatus defines the observed state of LinodeFirewall +type LinodeFirewallStatus struct { + // Ready is true when the provider resource is ready. + // +optional + // +kubebuilder:default=false + Ready bool `json:"ready"` + + // FailureReason will be set in the event that there is a terminal problem + // reconciling the Firewall and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Firewall's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Firewalls + // can be added as events to the Firewall object and/or logged in the + // controller's output. + // +optional + FailureReason *FirewallStatusError `json:"failureReason,omitempty"` + + // FailureMessage will be set in the event that there is a terminal problem + // reconciling the Firewall and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Firewall's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Firewalls + // can be added as events to the Firewall object and/or logged in the + // controller's output. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + + // Conditions defines current service state of the LinodeFirewall. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// LinodeFirewall is the Schema for the linodefirewalls API +type LinodeFirewall struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LinodeFirewallSpec `json:"spec,omitempty"` + Status LinodeFirewallStatus `json:"status,omitempty"` +} + +func (lf *LinodeFirewall) GetConditions() clusterv1.Conditions { + return lf.Status.Conditions +} + +func (lf *LinodeFirewall) SetConditions(conditions clusterv1.Conditions) { + lf.Status.Conditions = conditions +} + +//+kubebuilder:object:root=true + +// LinodeFirewallList contains a list of LinodeFirewall +type LinodeFirewallList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinodeFirewall `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LinodeFirewall{}, &LinodeFirewallList{}) +} + +// FirewallStatusError defines errors states for Firewall objects. +type FirewallStatusError string + +const ( + // CreateFirewallError indicates that an error was encountered + // when trying to create the Firewall. + CreateFirewallError FirewallStatusError = "CreateError" + + // UpdateFirewallError indicates that an error was encountered + // when trying to update the Firewall. + UpdateFirewallError FirewallStatusError = "UpdateError" + + // DeleteFirewallError indicates that an error was encountered + // when trying to delete the Firewall. + DeleteFirewallError FirewallStatusError = "DeleteError" +) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 7286e06a8..bcdb40e79 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -28,6 +28,26 @@ import ( "sigs.k8s.io/cluster-api/errors" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallRule) DeepCopyInto(out *FirewallRule) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = new(NetworkAddresses) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallRule. +func (in *FirewallRule) DeepCopy() *FirewallRule { + if in == nil { + return nil + } + out := new(FirewallRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceConfigInterfaceCreateOptions) DeepCopyInto(out *InstanceConfigInterfaceCreateOptions) { *out = *in @@ -147,6 +167,28 @@ func (in *LinodeClusterSpec) DeepCopyInto(out *LinodeClusterSpec) { *out = new(v1.SecretReference) **out = **in } + if in.ControlPlaneFirewallRefs != nil { + in, out := &in.ControlPlaneFirewallRefs, &out.ControlPlaneFirewallRefs + *out = make([]*v1.ObjectReference, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(v1.ObjectReference) + **out = **in + } + } + } + if in.WorkerFirewallRefs != nil { + in, out := &in.WorkerFirewallRefs, &out.WorkerFirewallRefs + *out = make([]*v1.ObjectReference, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(v1.ObjectReference) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeClusterSpec. @@ -281,6 +323,131 @@ func (in *LinodeClusterTemplateSpec) DeepCopy() *LinodeClusterTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeFirewall) DeepCopyInto(out *LinodeFirewall) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeFirewall. +func (in *LinodeFirewall) DeepCopy() *LinodeFirewall { + if in == nil { + return nil + } + out := new(LinodeFirewall) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinodeFirewall) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeFirewallList) DeepCopyInto(out *LinodeFirewallList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinodeFirewall, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeFirewallList. +func (in *LinodeFirewallList) DeepCopy() *LinodeFirewallList { + if in == nil { + return nil + } + out := new(LinodeFirewallList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinodeFirewallList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeFirewallSpec) DeepCopyInto(out *LinodeFirewallSpec) { + *out = *in + if in.FirewallID != nil { + in, out := &in.FirewallID, &out.FirewallID + *out = new(int) + **out = **in + } + if in.InboundRules != nil { + in, out := &in.InboundRules, &out.InboundRules + *out = make([]FirewallRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutboundRules != nil { + in, out := &in.OutboundRules, &out.OutboundRules + *out = make([]FirewallRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeFirewallSpec. +func (in *LinodeFirewallSpec) DeepCopy() *LinodeFirewallSpec { + if in == nil { + return nil + } + out := new(LinodeFirewallSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinodeFirewallStatus) DeepCopyInto(out *LinodeFirewallStatus) { + *out = *in + if in.FailureReason != nil { + in, out := &in.FailureReason, &out.FailureReason + *out = new(FirewallStatusError) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeFirewallStatus. +func (in *LinodeFirewallStatus) DeepCopy() *LinodeFirewallStatus { + if in == nil { + return nil + } + out := new(LinodeFirewallStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LinodeMachine) DeepCopyInto(out *LinodeMachine) { *out = *in @@ -787,6 +954,39 @@ func (in *LinodeVPCStatus) DeepCopy() *LinodeVPCStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAddresses) DeepCopyInto(out *NetworkAddresses) { + *out = *in + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAddresses. +func (in *NetworkAddresses) DeepCopy() *NetworkAddresses { + if in == nil { + return nil + } + out := new(NetworkAddresses) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = *in diff --git a/cloud/scope/firewall.go b/cloud/scope/firewall.go new file mode 100644 index 000000000..bc1fd4a76 --- /dev/null +++ b/cloud/scope/firewall.go @@ -0,0 +1,102 @@ +/* +Copyright 2023 Akamai Technologies, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "context" + "errors" + "fmt" + + "github.com/linode/linodego" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" +) + +// FirewallScope defines the basic context for an actuator to operate upon. +type FirewallScope struct { + client client.Client + + PatchHelper *patch.Helper + LinodeClient *linodego.Client + LinodeCluster *infrav1alpha1.LinodeCluster + LinodeFirewall *infrav1alpha1.LinodeFirewall +} + +// FirewallScopeParams defines the input parameters used to create a new Scope. +type FirewallScopeParams struct { + Client client.Client + LinodeCluster *infrav1alpha1.LinodeCluster + LinodeFirewall *infrav1alpha1.LinodeFirewall +} + +func validateFirewallScopeParams(params FirewallScopeParams) error { + if params.LinodeFirewall == nil { + return errors.New("linodeFirewall is required when creating a FirewallScope") + } + + if params.LinodeCluster == nil { + return errors.New("linodeCluster is required when creating a FirewallScope") + } + + return nil +} + +// NewFirewallScope creates a new Scope from the supplied parameters. +// This is meant to be called for each reconcile iteration. +func NewFirewallScope(apiKey string, params FirewallScopeParams) (*FirewallScope, error) { + if err := validateFirewallScopeParams(params); err != nil { + return nil, err + } + + linodeClient := createLinodeClient(apiKey) + + helper, err := patch.NewHelper(params.LinodeFirewall, params.Client) + if err != nil { + return nil, fmt.Errorf("failed to init patch helper: %w", err) + } + + return &FirewallScope{ + client: params.Client, + LinodeClient: linodeClient, + LinodeFirewall: params.LinodeFirewall, + LinodeCluster: params.LinodeCluster, + PatchHelper: helper, + }, nil +} + +// PatchObject persists the machine configuration and status. +func (s *FirewallScope) PatchObject(ctx context.Context) error { + return s.PatchHelper.Patch(ctx, s.LinodeFirewall) +} + +// Close closes the current scope persisting the machine configuration and status. +func (s *FirewallScope) Close(ctx context.Context) error { + return s.PatchObject(ctx) +} + +// AddFinalizer adds a finalizer if not present and immediately patches the +// object to avoid any race conditions. +func (s *FirewallScope) AddFinalizer(ctx context.Context) error { + if controllerutil.AddFinalizer(s.LinodeFirewall, infrav1alpha1.GroupVersion.String()) { + return s.Close(ctx) + } + + return nil +} diff --git a/cloud/services/firewalls.go b/cloud/services/firewalls.go new file mode 100644 index 000000000..8d2043c57 --- /dev/null +++ b/cloud/services/firewalls.go @@ -0,0 +1,296 @@ +package services + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "github.com/linode/cluster-api-provider-linode/util" + "net/http" + "slices" + + "github.com/go-logr/logr" + "github.com/linode/linodego" + + infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + "github.com/linode/cluster-api-provider-linode/cloud/scope" +) + +const ( + maxFirewallRuleLabelLen = 32 + maxIPsPerFirewallRule = 255 + maxRulesPerFirewall = 25 +) + +var ( + errTooManyIPs = errors.New("too many IPs in this ACL, will exceed rules per firewall limit") + errDuplicateFirewalls = errors.New("duplicate firewalls found") +) + +func HandleFirewall( + ctx context.Context, + firewallScope *scope.FirewallScope, + logger logr.Logger, +) (linodeFW *linodego.Firewall, err error) { + clusterUID := string(firewallScope.LinodeCluster.UID) + tags := []string{string(firewallScope.LinodeCluster.UID)} + fwName := firewallScope.LinodeFirewall.Name + + linodeFWs, err := fetchFirewalls(ctx, firewallScope) + if err != nil { + logger.Info("Failed to list Firewalls", "error", err.Error()) + + return nil, err + } + + // firewall conflict + if len(linodeFWs) > 1 { + logger.Info("Multiple firewalls found", "error", errDuplicateFirewalls.Error()) + + return nil, errDuplicateFirewalls + } + + // build out the firewall rules for create or update + fwConfig, err := processACL(firewallScope.LinodeFirewall, tags) + if err != nil { + logger.Info("Failed to process ACL", "error", err.Error()) + + return nil, err + } + + if len(linodeFWs) == 0 { + logger.Info(fmt.Sprintf("Creating firewall %s", fwName)) + + if linodeFW, err = firewallScope.LinodeClient.CreateFirewall(ctx, *fwConfig); err != nil { + logger.Info("Failed to create Linode Firewall", "error", err.Error()) + // Already exists is not an error + apiErr := linodego.Error{} + if errors.As(err, &apiErr) && apiErr.Code != http.StatusFound { + return nil, err + } + + if linodeFW != nil { + logger.Info(fmt.Sprintf("Linode Firewall %s already exists", fwName)) + } + } + + } else { + logger.Info(fmt.Sprintf("Updating firewall %s", fwName)) + + linodeFW = &linodeFWs[0] + if !slices.Contains(linodeFW.Tags, clusterUID) { + err := errors.New("firewall conflict") + logger.Error(err, fmt.Sprintf( + "Firewall %s is not associated with cluster UID %s. Owner cluster is %s", + fwName, + clusterUID, + linodeFW.Tags[0], + )) + + return nil, err + } + + if _, err := firewallScope.LinodeClient.UpdateFirewallRules(ctx, linodeFW.ID, fwConfig.Rules); err != nil { + logger.Info("Failed to update Linode Firewall", "error", err.Error()) + + return nil, err + } + } + + // Need to make sure the firewall is appropriately enabled or disabled after + // create or update and the tags are properly set + var status linodego.FirewallStatus + if firewallScope.LinodeFirewall.Spec.Enabled { + status = linodego.FirewallEnabled + } else { + status = linodego.FirewallDisabled + } + if _, err = firewallScope.LinodeClient.UpdateFirewall( + ctx, + linodeFW.ID, + linodego.FirewallUpdateOptions{ + Status: status, + Tags: util.Pointer(tags), + }, + ); err != nil { + logger.Info("Failed to update Linode Firewall status and tags", "error", err.Error()) + + return nil, err + } + + return linodeFW, nil +} + +// fetch Firewalls returns all Linode firewalls with a label matching the CAPL Firewall name +func fetchFirewalls(ctx context.Context, firewallScope *scope.FirewallScope) (firewalls []linodego.Firewall, err error) { + var linodeFWs []linodego.Firewall + filter := map[string]string{ + "label": firewallScope.LinodeFirewall.Name, + } + + rawFilter, err := json.Marshal(filter) + if err != nil { + return nil, err + } + if linodeFWs, err = firewallScope.LinodeClient.ListFirewalls(ctx, linodego.NewListOptions(1, string(rawFilter))); err != nil { + return nil, err + } + return linodeFWs, nil +} + +// chunkIPs takes a list of strings representing IPs and breaks them up into +// one or more lists capped at the maxIPsPerFirewallRule for length +func chunkIPs(ips []string) [][]string { + chunks := [][]string{} + ipCount := len(ips) + + // If the number of IPs is less than or equal to maxIPsPerFirewall, + // return a single chunk containing all IPs. + if ipCount <= maxIPsPerFirewallRule { + return [][]string{ips} + } + + // Otherwise, break the IPs into chunks with maxIPsPerFirewall IPs per chunk. + chunkCount := 0 + for ipCount > maxIPsPerFirewallRule { + start := chunkCount * maxIPsPerFirewallRule + end := (chunkCount + 1) * maxIPsPerFirewallRule + chunks = append(chunks, ips[start:end]) + chunkCount++ + ipCount -= maxIPsPerFirewallRule + } + + // Append the remaining IPs as a chunk. + chunks = append(chunks, ips[chunkCount*maxIPsPerFirewallRule:]) + + return chunks +} + +// processACL builds out a Linode firewall configuration for a given CAPL Firewall object which can then +// be used to create or update a Linode firewall +func processACL(firewall *infrav1alpha1.LinodeFirewall, tags []string) (*linodego.FirewallCreateOptions, error) { + createOpts := &linodego.FirewallCreateOptions{ + Label: firewall.Name, + Tags: tags, + } + + // process inbound rules + for _, rule := range firewall.Spec.InboundRules { + var ruleIPv4s []string + var ruleIPv6s []string + + if rule.Addresses.IPv4 != nil { + ruleIPv4s = append(ruleIPv4s, *rule.Addresses.IPv4...) + } + + if rule.Addresses.IPv6 != nil { + ruleIPv6s = append(ruleIPv6s, *rule.Addresses.IPv6...) + } + + ruleLabel := fmt.Sprintf("%s-%s", firewall.Spec.InboundPolicy, rule.Label) + if len(ruleLabel) > maxFirewallRuleLabelLen { + ruleLabel = ruleLabel[0:maxFirewallRuleLabelLen] + } + + // Process IPv4 + // chunk IPs to be in 255 chunks or fewer + ipv4chunks := chunkIPs(ruleIPv4s) + for i, chunk := range ipv4chunks { + v4chunk := chunk + createOpts.Rules.Inbound = append(createOpts.Rules.Inbound, linodego.FirewallRule{ + Action: rule.Action, + Label: ruleLabel, + Description: fmt.Sprintf("Rule %d, Created by CAPL: %s", i, rule.Label), + Protocol: linodego.TCP, + Ports: rule.Ports, + Addresses: linodego.NetworkAddresses{IPv4: &v4chunk}, + }) + } + + // Process IPv6 + // chunk IPs to be in 255 chunks or fewer + ipv6chunks := chunkIPs(ruleIPv6s) + for i, chunk := range ipv6chunks { + v6chunk := chunk + createOpts.Rules.Inbound = append(createOpts.Rules.Inbound, linodego.FirewallRule{ + Action: rule.Action, + Label: ruleLabel, + Description: fmt.Sprintf("Rule %d, Created by CAPL: %s", i, rule.Label), + Protocol: linodego.TCP, + Ports: rule.Ports, + Addresses: linodego.NetworkAddresses{IPv6: &v6chunk}, + }) + } + } + if firewall.Spec.InboundPolicy == "ACCEPT" { + // if an allow list is present, we drop everything else. + createOpts.Rules.InboundPolicy = "DROP" + } else { + // if a deny list is present, we accept everything else. + createOpts.Rules.InboundPolicy = "ACCEPT" + } + + // process outbound rules + for _, rule := range firewall.Spec.OutboundRules { + var ruleIPv4s []string + var ruleIPv6s []string + + if rule.Addresses.IPv4 != nil { + ruleIPv4s = append(ruleIPv4s, *rule.Addresses.IPv4...) + } + + if rule.Addresses.IPv6 != nil { + ruleIPv6s = append(ruleIPv6s, *rule.Addresses.IPv6...) + } + + ruleLabel := fmt.Sprintf("%s-%s", firewall.Spec.OutboundPolicy, rule.Label) + if len(ruleLabel) > maxFirewallRuleLabelLen { + ruleLabel = ruleLabel[0:maxFirewallRuleLabelLen] + } + + // Process IPv4 + // chunk IPs to be in 255 chunks or fewer + ipv4chunks := chunkIPs(ruleIPv4s) + for i, chunk := range ipv4chunks { + v4chunk := chunk + createOpts.Rules.Outbound = append(createOpts.Rules.Outbound, linodego.FirewallRule{ + Action: rule.Action, + Label: ruleLabel, + Description: fmt.Sprintf("Rule %d, Created by CAPL: %s", i, rule.Label), + Protocol: linodego.TCP, + Ports: rule.Ports, + Addresses: linodego.NetworkAddresses{IPv4: &v4chunk}, + }) + } + + // Process IPv6 + // chunk IPs to be in 255 chunks or fewer + ipv6chunks := chunkIPs(ruleIPv6s) + for i, chunk := range ipv6chunks { + v6chunk := chunk + createOpts.Rules.Outbound = append(createOpts.Rules.Outbound, linodego.FirewallRule{ + Action: rule.Action, + Label: ruleLabel, + Description: fmt.Sprintf("Rule %d, Created by CAPL: %s", i, rule.Label), + Protocol: linodego.TCP, + Ports: rule.Ports, + Addresses: linodego.NetworkAddresses{IPv6: &v6chunk}, + }) + } + } + if firewall.Spec.OutboundPolicy == "ACCEPT" { + // if an allow list is present, we drop everything else. + createOpts.Rules.OutboundPolicy = "DROP" + } else { + // if a deny list is present, we accept everything else. + createOpts.Rules.OutboundPolicy = "ACCEPT" + } + + // need to check if we ended up needing to make too many rules + // with IP chunking + if len(createOpts.Rules.Inbound)+len(createOpts.Rules.Outbound) > maxRulesPerFirewall { + return nil, errTooManyIPs + } + + return createOpts, nil +} diff --git a/cmd/main.go b/cmd/main.go index c007012c4..a84e79671 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -26,7 +26,7 @@ import ( _ "go.uber.org/automaxprocs" - controller2 "github.com/linode/cluster-api-provider-linode/controller" + caplController "github.com/linode/cluster-api-provider-linode/controller" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -65,6 +65,7 @@ func main() { machineWatchFilter string clusterWatchFilter string objectStorageBucketWatchFilter string + firewallWatchFilter string metricsAddr string enableLeaderElection bool probeAddr string @@ -72,6 +73,7 @@ func main() { flag.StringVar(&machineWatchFilter, "machine-watch-filter", "", "The machines to watch by label.") flag.StringVar(&clusterWatchFilter, "cluster-watch-filter", "", "The clusters to watch by label.") flag.StringVar(&objectStorageBucketWatchFilter, "object-storage-bucket-watch-filter", "", "The object bucket storages to watch by label.") + flag.StringVar(&firewallWatchFilter, "firewall-watch-filter", "", "The firewalls to watch by label.") flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, @@ -114,7 +116,7 @@ func main() { os.Exit(1) } - if err = (&controller2.LinodeClusterReconciler{ + if err = (&caplController.LinodeClusterReconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor("LinodeClusterReconciler"), WatchFilterValue: clusterWatchFilter, @@ -123,7 +125,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "LinodeCluster") os.Exit(1) } - if err = (&controller2.LinodeMachineReconciler{ + if err = (&caplController.LinodeMachineReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("LinodeMachineReconciler"), @@ -133,7 +135,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "LinodeMachine") os.Exit(1) } - if err = (&controller2.LinodeVPCReconciler{ + if err = (&caplController.LinodeVPCReconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor("LinodeVPCReconciler"), WatchFilterValue: clusterWatchFilter, @@ -142,7 +144,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "LinodeVPC") os.Exit(1) } - if err = (&controller2.LinodeObjectStorageBucketReconciler{ + if err = (&caplController.LinodeObjectStorageBucketReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Logger: ctrl.Log.WithName("LinodeObjectStorageBucketReconciler"), @@ -153,6 +155,15 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "LinodeObjectStorageBucket") os.Exit(1) } + if err = (&caplController.LinodeFirewallReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("LinodeFirewallReconciler"), + WatchFilterValue: firewallWatchFilter, + LinodeApiKey: linodeToken, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "LinodeFirewall") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml index e0fd9764b..01e051294 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml @@ -73,6 +73,72 @@ spec: - host - port type: object + controlPlaneFirewallRefs: + description: |- + ControlPlaneFirewallRefs contains a list of LinodeFirewall references to restrict traffic + to/from the control plane nodes + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array credentialsRef: description: |- CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not @@ -180,6 +246,72 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf + workerFirewallRefs: + description: |- + WorkerFirewallRefs contains a list of LinodeFirewall references to restrict traffic + to/from the worker nodes + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array required: - region type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml index b494d89c9..956ccad8e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml @@ -67,6 +67,72 @@ spec: - host - port type: object + controlPlaneFirewallRefs: + description: |- + ControlPlaneFirewallRefs contains a list of LinodeFirewall references to restrict traffic + to/from the control plane nodes + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array credentialsRef: description: |- CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not @@ -174,6 +240,72 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf + workerFirewallRefs: + description: |- + WorkerFirewallRefs contains a list of LinodeFirewall references to restrict traffic + to/from the worker nodes + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array required: - region type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml new file mode 100644 index 000000000..6625cc7c8 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml @@ -0,0 +1,238 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: linodefirewalls.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + kind: LinodeFirewall + listKind: LinodeFirewallList + plural: linodefirewalls + singular: linodefirewall + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: LinodeFirewall is the Schema for the linodefirewalls API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinodeFirewallSpec defines the desired state of LinodeFirewall + properties: + enabled: + default: false + type: boolean + firewallID: + type: integer + inboundPolicy: + default: ACCEPT + enum: + - ACCEPT + - DROP + type: string + inboundRules: + items: + properties: + action: + type: string + addresses: + description: |- + NetworkAddresses holds a list of IPv4 and IPv6 addresses + We don't use linodego here since kubebuilder can't generate DeepCopyInto + for linodego.NetworkAddresses + properties: + ipv4: + items: + type: string + type: array + ipv6: + items: + type: string + type: array + type: object + description: + type: string + label: + type: string + ports: + type: string + protocol: + description: NetworkProtocol enum type + type: string + required: + - action + - addresses + - label + - protocol + type: object + type: array + label: + maxLength: 63 + minLength: 3 + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + outboundPolicy: + default: ACCEPT + enum: + - ACCEPT + - DROP + type: string + outboundRules: + items: + properties: + action: + type: string + addresses: + description: |- + NetworkAddresses holds a list of IPv4 and IPv6 addresses + We don't use linodego here since kubebuilder can't generate DeepCopyInto + for linodego.NetworkAddresses + properties: + ipv4: + items: + type: string + type: array + ipv6: + items: + type: string + type: array + type: object + description: + type: string + label: + type: string + ports: + type: string + protocol: + description: NetworkProtocol enum type + type: string + required: + - action + - addresses + - label + - protocol + type: object + type: array + type: object + status: + description: LinodeFirewallStatus defines the observed state of LinodeFirewall + properties: + conditions: + description: Conditions defines current service state of the LinodeFirewall. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + A human readable message indicating details about the transition. + This field may be empty. + type: string + reason: + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. + type: string + severity: + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failureMessage: + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Firewall and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Firewall's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Firewalls + can be added as events to the Firewall object and/or logged in the + controller's output. + type: string + failureReason: + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Firewall and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Firewall's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Firewalls + can be added as events to the Firewall object and/or logged in the + controller's output. + type: string + ready: + default: false + description: Ready is true when the provider resource is ready. + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 6ced66145..2d9f967d0 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -14,6 +14,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml - bases/infrastructure.cluster.x-k8s.io_linodevpcs.yaml - bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml +- bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: @@ -24,6 +25,7 @@ patches: #- path: patches/webhook_in_linodemachinetemplates.yaml #- path: patches/webhook_in_linodeclustertemplates.yaml #- path: patches/webhook_in_linodeobjectstoragebuckets.yaml +#- path: patches/webhook_in_linodefirewalls.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -33,6 +35,7 @@ patches: #- path: patches/cainjection_in_linodemachinetemplates.yaml #- path: patches/cainjection_in_linodeclustertemplates.yaml #- path: patches/cainjection_in_linodeobjectstoragebuckets.yaml +#- path: patches/cainjection_in_linodefirewalls.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_linodefirewalls.yaml b/config/crd/patches/cainjection_in_linodefirewalls.yaml new file mode 100644 index 000000000..8f83c139f --- /dev/null +++ b/config/crd/patches/cainjection_in_linodefirewalls.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: linodefirewalls.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_linodefirewalls.yaml b/config/crd/patches/webhook_in_linodefirewalls.yaml new file mode 100644 index 000000000..36298e478 --- /dev/null +++ b/config/crd/patches/webhook_in_linodefirewalls.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: linodefirewalls.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/linodefirewall_editor_role.yaml b/config/rbac/linodefirewall_editor_role.yaml new file mode 100644 index 000000000..b64c63307 --- /dev/null +++ b/config/rbac/linodefirewall_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit linodefirewalls. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: linodefirewall-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: cluster-api-provider-linode + app.kubernetes.io/part-of: cluster-api-provider-linode + app.kubernetes.io/managed-by: kustomize + name: linodefirewall-editor-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodefirewalls + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodefirewalls/status + verbs: + - get diff --git a/config/rbac/linodefirewall_viewer_role.yaml b/config/rbac/linodefirewall_viewer_role.yaml new file mode 100644 index 000000000..a031b23bd --- /dev/null +++ b/config/rbac/linodefirewall_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view linodefirewalls. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: linodefirewall-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: cluster-api-provider-linode + app.kubernetes.io/part-of: cluster-api-provider-linode + app.kubernetes.io/managed-by: kustomize + name: linodefirewall-viewer-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodefirewalls + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodefirewalls/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index ce62e56e6..21520bc6e 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -68,6 +68,32 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodefirewalls + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodefirewalls/finalizers + verbs: + - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - linodefirewalls/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/config/samples/infrastructure_v1alpha1_linodefirewall.yaml b/config/samples/infrastructure_v1alpha1_linodefirewall.yaml new file mode 100644 index 000000000..bdeacc24f --- /dev/null +++ b/config/samples/infrastructure_v1alpha1_linodefirewall.yaml @@ -0,0 +1,12 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: LinodeFirewall +metadata: + labels: + app.kubernetes.io/name: linodefirewall + app.kubernetes.io/instance: linodefirewall-sample + app.kubernetes.io/part-of: cluster-api-provider-linode + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: cluster-api-provider-linode + name: linodefirewall-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index cbc113d74..fcf1f17f7 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -6,4 +6,5 @@ resources: - infrastructure_v1alpha1_linodeclustertemplate.yaml - infrastructure_v1alpha1_linodevpc.yaml - infrastructure_v1alpha1_linodeobjectstoragebucket.yaml +- infrastructure_v1alpha1_linodefirewall.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/controller/linodefirewall_controller.go b/controller/linodefirewall_controller.go new file mode 100644 index 000000000..ba3ced026 --- /dev/null +++ b/controller/linodefirewall_controller.go @@ -0,0 +1,244 @@ +/* +Copyright 2023 Akamai Technologies, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/go-logr/logr" + "github.com/linode/linodego" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/cloud/services" + "github.com/linode/cluster-api-provider-linode/util" + "github.com/linode/cluster-api-provider-linode/util/reconciler" +) + +// LinodeFirewallReconciler reconciles a LinodeFirewall object +type LinodeFirewallReconciler struct { + client.Client + Recorder record.EventRecorder + LinodeApiKey string + WatchFilterValue string + ReconcileTimeout time.Duration +} + +//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodefirewalls,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodefirewalls/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodefirewalls/finalizers,verbs=update + +func (r *LinodeFirewallReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) + defer cancel() + + logger := ctrl.LoggerFrom(ctx).WithName("LinodeFirewallReconciler").WithValues("name", req.NamespacedName.String()) + linodeFirewall := &infrav1alpha1.LinodeFirewall{} + if err := r.Client.Get(ctx, req.NamespacedName, linodeFirewall); err != nil { + logger.Info("Failed to fetch Linode firewall", "error", err.Error()) + + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + linodeCluster := &infrav1alpha1.LinodeCluster{} + + // Create the firewall scope. + firewallScope, err := scope.NewFirewallScope( + r.LinodeApiKey, + scope.FirewallScopeParams{ + Client: r.Client, + LinodeFirewall: linodeFirewall, + LinodeCluster: linodeCluster, + }) + if err != nil { + logger.Info("Failed to create firewall scope", "error", err.Error()) + + return ctrl.Result{}, fmt.Errorf("failed to create cluster scope: %w", err) + } + + return r.reconcile(ctx, firewallScope, logger) +} + +func (r *LinodeFirewallReconciler) reconcile( + ctx context.Context, + firewallScope *scope.FirewallScope, + logger logr.Logger, +) (res ctrl.Result, reterr error) { + res = ctrl.Result{} + + firewallScope.LinodeFirewall.Status.Ready = false + firewallScope.LinodeFirewall.Status.FailureReason = nil + firewallScope.LinodeFirewall.Status.FailureMessage = util.Pointer("") + + // Always close the scope when exiting this function so we can persist any LinodeCluster changes. + defer func() { + // Filter out any IsNotFound message since client.IgnoreNotFound does not handle aggregate errors + if err := firewallScope.Close(ctx); utilerrors.FilterOut(err, apierrors.IsNotFound) != nil && reterr == nil { + logger.Error(err, "failed to patch LinodeCluster") + reterr = err + } + }() + + // Handle delete + if !firewallScope.LinodeFirewall.DeletionTimestamp.IsZero() { + return res, r.reconcileDelete(ctx, logger, firewallScope) + } + + // Add finalizer if it's not already there + if err := firewallScope.AddFinalizer(ctx); err != nil { + return res, err + } + + // Handle create + if firewallScope.LinodeFirewall.Spec.FirewallID == nil { + if err := r.reconcileCreate(ctx, logger, firewallScope); err != nil { + return res, err + } + r.Recorder.Event( + firewallScope.LinodeFirewall, + corev1.EventTypeNormal, + string(clusterv1.ReadyCondition), + "Firewall is ready", + ) + } + + // Handle updates + if err := r.reconcileUpdate(ctx, logger, firewallScope); err != nil { + return res, err + } + r.Recorder.Event( + firewallScope.LinodeFirewall, + corev1.EventTypeNormal, + string(clusterv1.ReadyCondition), + "Firewall is ready", + ) + + firewallScope.LinodeFirewall.Status.Ready = true + conditions.MarkTrue(firewallScope.LinodeFirewall, clusterv1.ReadyCondition) + + return res, nil +} + +func (r *LinodeFirewallReconciler) setFailureReason( + firewallScope *scope.FirewallScope, + failureReason infrav1alpha1.FirewallStatusError, + err error, +) { + firewallScope.LinodeFirewall.Status.FailureReason = util.Pointer(failureReason) + firewallScope.LinodeFirewall.Status.FailureMessage = util.Pointer(err.Error()) + + conditions.MarkFalse( + firewallScope.LinodeFirewall, + clusterv1.ReadyCondition, + string(failureReason), + clusterv1.ConditionSeverityError, + "%s", + err.Error(), + ) + + r.Recorder.Event(firewallScope.LinodeFirewall, corev1.EventTypeWarning, string(failureReason), err.Error()) +} + +func (r *LinodeFirewallReconciler) reconcileCreate( + ctx context.Context, + logger logr.Logger, + firewallScope *scope.FirewallScope, +) error { + linodeFW, err := services.HandleFirewall(ctx, firewallScope, logger) + if err != nil || linodeFW == nil { + r.setFailureReason(firewallScope, infrav1alpha1.CreateFirewallError, err) + + return err + } + firewallScope.LinodeFirewall.Spec.FirewallID = util.Pointer(linodeFW.ID) + + return nil +} + +func (r *LinodeFirewallReconciler) reconcileUpdate( + ctx context.Context, + logger logr.Logger, + firewallScope *scope.FirewallScope, +) error { + linodeFW, err := services.HandleFirewall(ctx, firewallScope, logger) + if err != nil || linodeFW == nil { + r.setFailureReason(firewallScope, infrav1alpha1.UpdateFirewallError, err) + + return err + } + firewallScope.LinodeFirewall.Spec.FirewallID = util.Pointer(linodeFW.ID) + + return nil +} + +func (r *LinodeFirewallReconciler) reconcileDelete( + ctx context.Context, + logger logr.Logger, + firewallScope *scope.FirewallScope, +) error { + if firewallScope.LinodeFirewall.Spec.FirewallID == nil { + logger.Info("Firewall ID is missing, nothing to do") + controllerutil.RemoveFinalizer(firewallScope.LinodeFirewall, infrav1alpha1.GroupVersion.String()) + + return nil + } + + if err := firewallScope.LinodeClient.DeleteFirewall(ctx, *firewallScope.LinodeFirewall.Spec.FirewallID); err != nil { + logger.Info("Failed to delete Linode NodeBalancer", "error", err.Error()) + + // Not found is not an error + apiErr := linodego.Error{} + if errors.As(err, &apiErr) && apiErr.Code != http.StatusNotFound { + r.setFailureReason(firewallScope, infrav1alpha1.DeleteFirewallError, err) + + return err + } + } + + conditions.MarkFalse( + firewallScope.LinodeFirewall, + clusterv1.ReadyCondition, + clusterv1.DeletedReason, + clusterv1.ConditionSeverityInfo, + "Firewall deleted", + ) + + firewallScope.LinodeFirewall.Spec.FirewallID = nil + controllerutil.RemoveFinalizer(firewallScope.LinodeFirewall, infrav1alpha1.GroupVersion.String()) + + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *LinodeFirewallReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1alpha1.LinodeFirewall{}). + Complete(r) +} diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index fbf5abe64..ad94909c7 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -12,6 +12,7 @@ - [rke2](./topics/flavors/rke2.md) - [Etcd](./topics/etcd.md) - [Multi-Tenancy](./topics/multi-tenancy.md) + - [Firewalls](./topics/firewalls.md) - [Development](./developers/development.md) - [Releasing](./developers/releasing.md) - [Reference](./reference/reference.md) diff --git a/docs/src/topics/firewalls.md b/docs/src/topics/firewalls.md new file mode 100644 index 000000000..14691f42e --- /dev/null +++ b/docs/src/topics/firewalls.md @@ -0,0 +1,73 @@ +# Firewalls + +CAPL supports attaching Linode [Cloud Firewalls](https://www.linode.com/docs/products/networking/cloud-firewall/get-started/) +to workload clusters to secure network traffic. There are +two different types of Cloud Firewalls that CAPL can create. + +~~~admonish warning +Cloud Firewall rules are applied to traffic over the public and private +network but are not applied to traffic over a private +[VLAN](https://www.linode.com/docs/products/networking/vlans/). +~~~ + +## Control Plane Firewall + +By default, workload clusters are created with their own Cloud Firewall +attached to each Linode instance assigned as a control plane node. + +Access to these instances are automatically updated for any rule changes made to +the default control plane firewall after the cluster is created. + +### Inbound Access + +At cluster provisioning time, this firewall can be configured with +an allowlist of IPs to permit access. If no list is provided, all +IPs are permitted. For the Kubernetes API endpoint, all cluster nodes +are permitted access + +Please refer to the below table for configured service access: + +| Service (`Port`) | Allowed IPs | +| ------------------------- | -------------------------------------------------------------------- | +| Kubernetes API (`6443`) | `,` (default: `[0.0.0.0/0,::/0]`) | +| NodePorts (`30000-32767`) | `` (default: `[0.0.0.0/0,::/0]`) | +| SSH (`22`) | `` (default: `[0.0.0.0/0,::/0]`) | + +### Outbound Access + +All outbound access from the control plane is by default permitted. + +## Worker Firewall + +By default, workload clusters are created with their own Cloud Firewall +attached to each Linode instance assigned as a worker node. + +Access to these instances are automatically updated for any rule changes made to +the default worker firewall after the cluster is created. + +### Inbound Access + +At cluster provisioning time, this firewall can be configured with +an allowlist of IPs to permit access. If no list is provided, all +IPs are permitted. + +Please refer to the below table for configured service access: + +| Service (`Port`) | Allowed IPs | +| ------------------------- | -------------------------------------------------------------------- | +| NodePorts (`30000-32767`) | `` (default: `[0.0.0.0/0,::/0]`) | +| SSH (`22`) | `` (default: `[0.0.0.0/0,::/0]`) | + +### Outbound Access + +All outbound access from the workers is by default permitted. + +## Additional Cloud Firewalls + +If needed, additional control plane and/or worker firewalls can be created for +one or more workload clusters. This is done by creating a `LinodeFirewall` CRD +and adding it to the Cluster's `spec.controlPlaneFirewallRefs` or +`spec.workerFirewallRefs`. + +To remove the additional firewall(s) from a workload cluster, update the Cluster's +`spec.controlPlaneFirewallRefs` or `spec.workerFirewallRefs`. From aff43d155390651851dff864f9d0a47d0d9cfeac Mon Sep 17 00:00:00 2001 From: Ashley Dumaine Date: Tue, 5 Mar 2024 16:34:32 -0500 Subject: [PATCH 2/5] refactoring --- api/v1alpha1/linodecluster_types.go | 37 +++- api/v1alpha1/linodefirewall_types.go | 4 + api/v1alpha1/zz_generated.deepcopy.go | 58 ++++--- cloud/scope/cluster.go | 30 ++-- cloud/scope/firewall.go | 7 - cloud/services/firewalls.go | 135 ++++++++------- cloud/services/loadbalancers.go | 11 +- ...cture.cluster.x-k8s.io_linodeclusters.yaml | 162 ++++-------------- ...uster.x-k8s.io_linodeclustertemplates.yaml | 162 ++++-------------- ...ture.cluster.x-k8s.io_linodefirewalls.yaml | 4 + ...cture.cluster.x-k8s.io_linodemachines.yaml | 6 +- ...uster.x-k8s.io_linodemachinetemplates.yaml | 6 +- controller/linodecluster_controller.go | 139 +++++++++++++-- controller/linodefirewall_controller.go | 7 +- controller/linodemachine_controller.go | 25 +-- .../linodemachine_controller_helpers.go | 5 + templates/flavors/base/linodeCluster.yaml | 3 + 17 files changed, 382 insertions(+), 419 deletions(-) diff --git a/api/v1alpha1/linodecluster_types.go b/api/v1alpha1/linodecluster_types.go index a476aa4c5..8ce6e56a0 100644 --- a/api/v1alpha1/linodecluster_types.go +++ b/api/v1alpha1/linodecluster_types.go @@ -46,15 +46,10 @@ type LinodeClusterSpec struct { // +optional CredentialsRef *corev1.SecretReference `json:"credentialsRef,omitempty"` + // ControlPlaneFirewall encapsulates all things related to the Firewall for the + // control plane nodes. // +optional - // ControlPlaneFirewallRefs contains a list of LinodeFirewall references to restrict traffic - // to/from the control plane nodes - ControlPlaneFirewallRefs []*corev1.ObjectReference `json:"controlPlaneFirewallRefs,omitempty"` - - // +optional - // WorkerFirewallRefs contains a list of LinodeFirewall references to restrict traffic - // to/from the worker nodes - WorkerFirewallRefs []*corev1.ObjectReference `json:"workerFirewallRefs,omitempty"` + ControlPlaneFirewall FirewallSpec `json:"controlPlaneFirewall,omitempty"` } // LinodeClusterStatus defines the observed state of LinodeCluster @@ -123,6 +118,32 @@ type NetworkSpec struct { NodeBalancerConfigID *int `json:"nodeBalancerConfigID,omitempty"` } +// FirewallSpec encapsulates Linode Firewall configuration for nodes. +type FirewallSpec struct { + // Enabled specifies if the default api server firewall should be enabled + // +kubebuilder:default:=true + // +optional + Enabled bool `json:"enabled,omitempty"` + // AllowedIPV4Addresses specifies additional IPV4 addresses aside from the worker nodes + // that should be permitted to reach the K8s API server + // Per the Linode API: + // Must contain only valid IPv4 addresses or networks (both must be in ip/mask format) + // +optional + // +kubebuilder:default:={"0.0.0.0/0"} + AllowedIPV4Addresses []string `json:"allowedIPV4Addresses,omitempty"` + // AllowedIPV6Addresses specifies additional IPV6 addresses aside from the worker nodes + // that should be permitted to reach the K8s API server + // +optional + // +kubebuilder:default:={"::/0"} + AllowedIPV6Addresses []string `json:"allowedIPV6Addresses,omitempty"` + // AllowSSH specifies if SSH should be permitted for the firewall + // +optional + AllowSSH bool `json:"allowSSH,omitempty"` + // FirewallID is the ID of the Cloud Firewall. + // +optional + FirewallID *int `json:"firewallID,omitempty"` +} + // +kubebuilder:object:root=true // LinodeClusterList contains a list of LinodeCluster diff --git a/api/v1alpha1/linodefirewall_types.go b/api/v1alpha1/linodefirewall_types.go index edd9c5b27..da590b032 100644 --- a/api/v1alpha1/linodefirewall_types.go +++ b/api/v1alpha1/linodefirewall_types.go @@ -30,6 +30,10 @@ type LinodeFirewallSpec struct { // +optional FirewallID *int `json:"firewallID,omitempty"` + // +optional + // ClusterUID is used by the LinodeCluster controller to associate a Cloud Firewall to a LinodeCluster + ClusterUID string `json:"clusterUID,omitempty"` + // +optional // +kubebuilder:default=false Enabled bool `json:"enabled,omitempty"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index bcdb40e79..226384169 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -48,6 +48,36 @@ func (in *FirewallRule) DeepCopy() *FirewallRule { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallSpec) DeepCopyInto(out *FirewallSpec) { + *out = *in + if in.AllowedIPV4Addresses != nil { + in, out := &in.AllowedIPV4Addresses, &out.AllowedIPV4Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedIPV6Addresses != nil { + in, out := &in.AllowedIPV6Addresses, &out.AllowedIPV6Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FirewallID != nil { + in, out := &in.FirewallID, &out.FirewallID + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallSpec. +func (in *FirewallSpec) DeepCopy() *FirewallSpec { + if in == nil { + return nil + } + out := new(FirewallSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceConfigInterfaceCreateOptions) DeepCopyInto(out *InstanceConfigInterfaceCreateOptions) { *out = *in @@ -167,28 +197,7 @@ func (in *LinodeClusterSpec) DeepCopyInto(out *LinodeClusterSpec) { *out = new(v1.SecretReference) **out = **in } - if in.ControlPlaneFirewallRefs != nil { - in, out := &in.ControlPlaneFirewallRefs, &out.ControlPlaneFirewallRefs - *out = make([]*v1.ObjectReference, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(v1.ObjectReference) - **out = **in - } - } - } - if in.WorkerFirewallRefs != nil { - in, out := &in.WorkerFirewallRefs, &out.WorkerFirewallRefs - *out = make([]*v1.ObjectReference, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(v1.ObjectReference) - **out = **in - } - } - } + in.ControlPlaneFirewall.DeepCopyInto(&out.ControlPlaneFirewall) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeClusterSpec. @@ -554,6 +563,11 @@ func (in *LinodeMachineSpec) DeepCopyInto(out *LinodeMachineSpec) { *out = new(InstanceMetadataOptions) **out = **in } + if in.CredentialsRef != nil { + in, out := &in.CredentialsRef, &out.CredentialsRef + *out = new(v1.SecretReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeMachineSpec. diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 05a1e6b45..d6fd4011c 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -32,9 +32,10 @@ import ( // ClusterScopeParams defines the input parameters used to create a new Scope. type ClusterScopeParams struct { - Client client.Client - Cluster *clusterv1.Cluster - LinodeCluster *infrav1alpha1.LinodeCluster + Client client.Client + Cluster *clusterv1.Cluster + LinodeCluster *infrav1alpha1.LinodeCluster + ControlPlaneFirewall *infrav1alpha1.LinodeFirewall } func validateClusterScopeParams(params ClusterScopeParams) error { @@ -44,6 +45,9 @@ func validateClusterScopeParams(params ClusterScopeParams) error { if params.LinodeCluster == nil { return errors.New("linodeCluster is required when creating a ClusterScope") } + if params.ControlPlaneFirewall == nil { + return errors.New("controlPlaneFirewall is required when creating a ClusterScope") + } return nil } @@ -71,11 +75,12 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara } return &ClusterScope{ - client: params.Client, - Cluster: params.Cluster, - LinodeClient: linodeClient, - LinodeCluster: params.LinodeCluster, - PatchHelper: helper, + client: params.Client, + Cluster: params.Cluster, + LinodeClient: linodeClient, + LinodeCluster: params.LinodeCluster, + ControlPlaneFirewall: params.ControlPlaneFirewall, + PatchHelper: helper, }, nil } @@ -83,10 +88,11 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara type ClusterScope struct { client client.Client - PatchHelper *patch.Helper - LinodeClient *linodego.Client - Cluster *clusterv1.Cluster - LinodeCluster *infrav1alpha1.LinodeCluster + PatchHelper *patch.Helper + LinodeClient *linodego.Client + Cluster *clusterv1.Cluster + LinodeCluster *infrav1alpha1.LinodeCluster + ControlPlaneFirewall *infrav1alpha1.LinodeFirewall } // PatchObject persists the cluster configuration and status. diff --git a/cloud/scope/firewall.go b/cloud/scope/firewall.go index bc1fd4a76..fe390242c 100644 --- a/cloud/scope/firewall.go +++ b/cloud/scope/firewall.go @@ -35,14 +35,12 @@ type FirewallScope struct { PatchHelper *patch.Helper LinodeClient *linodego.Client - LinodeCluster *infrav1alpha1.LinodeCluster LinodeFirewall *infrav1alpha1.LinodeFirewall } // FirewallScopeParams defines the input parameters used to create a new Scope. type FirewallScopeParams struct { Client client.Client - LinodeCluster *infrav1alpha1.LinodeCluster LinodeFirewall *infrav1alpha1.LinodeFirewall } @@ -51,10 +49,6 @@ func validateFirewallScopeParams(params FirewallScopeParams) error { return errors.New("linodeFirewall is required when creating a FirewallScope") } - if params.LinodeCluster == nil { - return errors.New("linodeCluster is required when creating a FirewallScope") - } - return nil } @@ -76,7 +70,6 @@ func NewFirewallScope(apiKey string, params FirewallScopeParams) (*FirewallScope client: params.Client, LinodeClient: linodeClient, LinodeFirewall: params.LinodeFirewall, - LinodeCluster: params.LinodeCluster, PatchHelper: helper, }, nil } diff --git a/cloud/services/firewalls.go b/cloud/services/firewalls.go index 8d2043c57..6889e0368 100644 --- a/cloud/services/firewalls.go +++ b/cloud/services/firewalls.go @@ -2,18 +2,15 @@ package services import ( "context" - "encoding/json" "errors" "fmt" - "github.com/linode/cluster-api-provider-linode/util" - "net/http" "slices" "github.com/go-logr/logr" "github.com/linode/linodego" infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" - "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/util" ) const ( @@ -27,16 +24,16 @@ var ( errDuplicateFirewalls = errors.New("duplicate firewalls found") ) +// HandleFirewall takes the CAPL firewall representation and uses it to either create or update the Cloud Firewall +// via the given linode client func HandleFirewall( ctx context.Context, - firewallScope *scope.FirewallScope, + firewall *infrav1alpha1.LinodeFirewall, + linodeClient *linodego.Client, logger logr.Logger, ) (linodeFW *linodego.Firewall, err error) { - clusterUID := string(firewallScope.LinodeCluster.UID) - tags := []string{string(firewallScope.LinodeCluster.UID)} - fwName := firewallScope.LinodeFirewall.Name - - linodeFWs, err := fetchFirewalls(ctx, firewallScope) + clusterUID := firewall.Spec.ClusterUID + linodeFWs, err := fetchFirewalls(ctx, firewall.Name, *linodeClient) if err != nil { logger.Info("Failed to list Firewalls", "error", err.Error()) @@ -51,7 +48,7 @@ func HandleFirewall( } // build out the firewall rules for create or update - fwConfig, err := processACL(firewallScope.LinodeFirewall, tags) + fwConfig, err := processACL(firewall, []string{clusterUID}) if err != nil { logger.Info("Failed to process ACL", "error", err.Error()) @@ -59,39 +56,25 @@ func HandleFirewall( } if len(linodeFWs) == 0 { - logger.Info(fmt.Sprintf("Creating firewall %s", fwName)) - - if linodeFW, err = firewallScope.LinodeClient.CreateFirewall(ctx, *fwConfig); err != nil { - logger.Info("Failed to create Linode Firewall", "error", err.Error()) - // Already exists is not an error - apiErr := linodego.Error{} - if errors.As(err, &apiErr) && apiErr.Code != http.StatusFound { - return nil, err - } - - if linodeFW != nil { - logger.Info(fmt.Sprintf("Linode Firewall %s already exists", fwName)) - } - } + logger.Info(fmt.Sprintf("Creating firewall %s", firewall.Name)) + linodeFW, err = linodeClient.CreateFirewall(ctx, *fwConfig) + if err != nil { + logger.Info("Failed to create firewall", "error", err.Error()) - } else { - logger.Info(fmt.Sprintf("Updating firewall %s", fwName)) - - linodeFW = &linodeFWs[0] - if !slices.Contains(linodeFW.Tags, clusterUID) { - err := errors.New("firewall conflict") - logger.Error(err, fmt.Sprintf( - "Firewall %s is not associated with cluster UID %s. Owner cluster is %s", - fwName, - clusterUID, - linodeFW.Tags[0], - )) + return nil, err + } + if linodeFW == nil { + err = errors.New("nil firewall") + logger.Error(err, "Created firewall is nil") return nil, err } + } else { + logger.Info(fmt.Sprintf("Updating firewall %s", firewall.Name)) - if _, err := firewallScope.LinodeClient.UpdateFirewallRules(ctx, linodeFW.ID, fwConfig.Rules); err != nil { - logger.Info("Failed to update Linode Firewall", "error", err.Error()) + linodeFW = &linodeFWs[0] + if err = updateFirewall(ctx, linodeClient, linodeFW, clusterUID, fwConfig); err != nil { + logger.Info("Failed to update firewall", "error", err.Error()) return nil, err } @@ -100,17 +83,17 @@ func HandleFirewall( // Need to make sure the firewall is appropriately enabled or disabled after // create or update and the tags are properly set var status linodego.FirewallStatus - if firewallScope.LinodeFirewall.Spec.Enabled { + if firewall.Spec.Enabled { status = linodego.FirewallEnabled } else { status = linodego.FirewallDisabled } - if _, err = firewallScope.LinodeClient.UpdateFirewall( + if _, err = linodeClient.UpdateFirewall( ctx, linodeFW.ID, linodego.FirewallUpdateOptions{ Status: status, - Tags: util.Pointer(tags), + Tags: util.Pointer([]string{clusterUID}), }, ); err != nil { logger.Info("Failed to update Linode Firewall status and tags", "error", err.Error()) @@ -121,20 +104,48 @@ func HandleFirewall( return linodeFW, nil } -// fetch Firewalls returns all Linode firewalls with a label matching the CAPL Firewall name -func fetchFirewalls(ctx context.Context, firewallScope *scope.FirewallScope) (firewalls []linodego.Firewall, err error) { - var linodeFWs []linodego.Firewall - filter := map[string]string{ - "label": firewallScope.LinodeFirewall.Name, +func updateFirewall( + ctx context.Context, + linodeClient *linodego.Client, + linodeFW *linodego.Firewall, + clusterUID string, + fwConfig *linodego.FirewallCreateOptions, +) error { + if !slices.Contains(linodeFW.Tags, clusterUID) { + err := fmt.Errorf( + "firewall %s is not associated with cluster UID %s. Owner cluster is %s", + linodeFW.Label, + clusterUID, + linodeFW.Tags[0], + ) + + return err } - rawFilter, err := json.Marshal(filter) - if err != nil { - return nil, err + if _, err := linodeClient.UpdateFirewallRules(ctx, linodeFW.ID, fwConfig.Rules); err != nil { + return err } - if linodeFWs, err = firewallScope.LinodeClient.ListFirewalls(ctx, linodego.NewListOptions(1, string(rawFilter))); err != nil { + + return nil +} + +// fetch Firewalls returns all Linode firewalls with a label matching the CAPL Firewall name +func fetchFirewalls( + ctx context.Context, + name string, + linodeClient linodego.Client, +) (firewalls []linodego.Firewall, err error) { + var linodeFWs []linodego.Firewall + if linodeFWs, err = linodeClient.ListFirewalls( + ctx, + linodego.NewListOptions( + 1, + util.CreateLinodeAPIFilter(name, []string{}), + ), + ); err != nil { return nil, err } + return linodeFWs, nil } @@ -166,9 +177,15 @@ func chunkIPs(ips []string) [][]string { return chunks } -// processACL builds out a Linode firewall configuration for a given CAPL Firewall object which can then -// be used to create or update a Linode firewall -func processACL(firewall *infrav1alpha1.LinodeFirewall, tags []string) (*linodego.FirewallCreateOptions, error) { +// processACL uses the CAPL LinodeFirewall representation to build out the inbound +// and outbound rules for a linode Cloud Firewall and returns the configuration +// for creating or updating the Firewall +// +//nolint:gocyclo,cyclop // As simple as possible. +func processACL(firewall *infrav1alpha1.LinodeFirewall, tags []string) ( + *linodego.FirewallCreateOptions, + error, +) { createOpts := &linodego.FirewallCreateOptions{ Label: firewall.Name, Tags: tags, @@ -176,8 +193,8 @@ func processACL(firewall *infrav1alpha1.LinodeFirewall, tags []string) (*linodeg // process inbound rules for _, rule := range firewall.Spec.InboundRules { - var ruleIPv4s []string - var ruleIPv6s []string + ruleIPv4s := []string{} + ruleIPv6s := []string{} if rule.Addresses.IPv4 != nil { ruleIPv4s = append(ruleIPv4s, *rule.Addresses.IPv4...) @@ -187,7 +204,7 @@ func processACL(firewall *infrav1alpha1.LinodeFirewall, tags []string) (*linodeg ruleIPv6s = append(ruleIPv6s, *rule.Addresses.IPv6...) } - ruleLabel := fmt.Sprintf("%s-%s", firewall.Spec.InboundPolicy, rule.Label) + ruleLabel := fmt.Sprintf("%s-%s", rule.Action, rule.Label) if len(ruleLabel) > maxFirewallRuleLabelLen { ruleLabel = ruleLabel[0:maxFirewallRuleLabelLen] } @@ -232,8 +249,8 @@ func processACL(firewall *infrav1alpha1.LinodeFirewall, tags []string) (*linodeg // process outbound rules for _, rule := range firewall.Spec.OutboundRules { - var ruleIPv4s []string - var ruleIPv6s []string + ruleIPv4s := []string{} + ruleIPv6s := []string{} if rule.Addresses.IPv4 != nil { ruleIPv4s = append(ruleIPv4s, *rule.Addresses.IPv4...) diff --git a/cloud/services/loadbalancers.go b/cloud/services/loadbalancers.go index 4a5cdb012..6fdf20589 100644 --- a/cloud/services/loadbalancers.go +++ b/cloud/services/loadbalancers.go @@ -16,7 +16,7 @@ import ( ) const ( - defaultLBPort = 6443 + DefaultLBPort = 6443 ) // CreateNodeBalancer creates a new NodeBalancer if one doesn't exist @@ -84,7 +84,7 @@ func CreateNodeBalancerConfig( var linodeNBConfig *linodego.NodeBalancerConfig var err error - lbPort := defaultLBPort + lbPort := DefaultLBPort if clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort != 0 { lbPort = clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort } @@ -113,7 +113,6 @@ func AddNodeToNB( ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope, - clusterScope *scope.ClusterScope, ) error { // Update the NB backend with the new instance if it's a control plane node if !kutil.IsControlPlaneMachine(machineScope.Machine) { @@ -133,9 +132,9 @@ func AddNodeToNB( return err } - lbPort := defaultLBPort - if clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort != 0 { - lbPort = clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort + lbPort := DefaultLBPort + if machineScope.LinodeCluster.Spec.Network.LoadBalancerPort != 0 { + lbPort = machineScope.LinodeCluster.Spec.Network.LoadBalancerPort } if machineScope.LinodeCluster.Spec.Network.NodeBalancerConfigID == nil { err := errors.New("nil NodeBalancer Config ID") diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml index 01e051294..e6b0eb238 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml @@ -73,72 +73,42 @@ spec: - host - port type: object - controlPlaneFirewallRefs: + controlPlaneFirewall: description: |- - ControlPlaneFirewallRefs contains a list of LinodeFirewall references to restrict traffic - to/from the control plane nodes - items: - description: |- - ObjectReference contains enough information to let you inspect or modify the referred object. - --- - New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. - 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. - 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular - restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". - Those cannot be well described when embedded. - 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. - 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity - during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple - and the version of the actual struct is irrelevant. - 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type - will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. - - - Instead of using this type, create a locally provided and used type that is well-focused on your reference. - For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + ControlPlaneFirewall encapsulates all things related to the Firewall for the + control plane nodes. + properties: + allowSSH: + description: AllowSSH specifies if SSH should be permitted for + the firewall + type: boolean + allowedIPV4Addresses: + default: + - 0.0.0.0/0 + description: |- + AllowedIPV4Addresses specifies additional IPV4 addresses aside from the worker nodes + that should be permitted to reach the K8s API server + items: type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: array + allowedIPV6Addresses: + default: + - ::/0 + description: |- + AllowedIPV6Addresses specifies additional IPV6 addresses aside from the worker nodes + that should be permitted to reach the K8s API server + items: type: string - type: object - x-kubernetes-map-type: atomic - type: array + type: array + enabled: + default: true + description: Enabled specifies if the default api server firewall + should be enabled + type: boolean + firewallID: + description: FirewallID is the ID of the Cloud Firewall. + type: integer + type: object credentialsRef: description: |- CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not @@ -246,72 +216,6 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf - workerFirewallRefs: - description: |- - WorkerFirewallRefs contains a list of LinodeFirewall references to restrict traffic - to/from the worker nodes - items: - description: |- - ObjectReference contains enough information to let you inspect or modify the referred object. - --- - New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. - 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. - 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular - restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". - Those cannot be well described when embedded. - 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. - 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity - during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple - and the version of the actual struct is irrelevant. - 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type - will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. - - - Instead of using this type, create a locally provided and used type that is well-focused on your reference. - For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - type: array required: - region type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml index 956ccad8e..74f97d1c6 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml @@ -67,72 +67,42 @@ spec: - host - port type: object - controlPlaneFirewallRefs: + controlPlaneFirewall: description: |- - ControlPlaneFirewallRefs contains a list of LinodeFirewall references to restrict traffic - to/from the control plane nodes - items: - description: |- - ObjectReference contains enough information to let you inspect or modify the referred object. - --- - New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. - 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. - 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular - restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". - Those cannot be well described when embedded. - 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. - 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity - during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple - and the version of the actual struct is irrelevant. - 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type - will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. - - - Instead of using this type, create a locally provided and used type that is well-focused on your reference. - For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + ControlPlaneFirewall encapsulates all things related to the Firewall for the + control plane nodes. + properties: + allowSSH: + description: AllowSSH specifies if SSH should be permitted + for the firewall + type: boolean + allowedIPV4Addresses: + default: + - 0.0.0.0/0 + description: |- + AllowedIPV4Addresses specifies additional IPV4 addresses aside from the worker nodes + that should be permitted to reach the K8s API server + items: type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: array + allowedIPV6Addresses: + default: + - ::/0 + description: |- + AllowedIPV6Addresses specifies additional IPV6 addresses aside from the worker nodes + that should be permitted to reach the K8s API server + items: type: string - type: object - x-kubernetes-map-type: atomic - type: array + type: array + enabled: + default: true + description: Enabled specifies if the default api server + firewall should be enabled + type: boolean + firewallID: + description: FirewallID is the ID of the Cloud Firewall. + type: integer + type: object credentialsRef: description: |- CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not @@ -240,72 +210,6 @@ spec: x-kubernetes-validations: - message: Value is immutable rule: self == oldSelf - workerFirewallRefs: - description: |- - WorkerFirewallRefs contains a list of LinodeFirewall references to restrict traffic - to/from the worker nodes - items: - description: |- - ObjectReference contains enough information to let you inspect or modify the referred object. - --- - New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. - 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. - 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular - restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". - Those cannot be well described when embedded. - 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. - 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity - during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple - and the version of the actual struct is irrelevant. - 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type - will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. - - - Instead of using this type, create a locally provided and used type that is well-focused on your reference. - For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic - type: array required: - region type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml index 6625cc7c8..c1b7fbf2c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodefirewalls.yaml @@ -39,6 +39,10 @@ spec: spec: description: LinodeFirewallSpec defines the desired state of LinodeFirewall properties: + clusterUID: + description: ClusterUID is used by the LinodeCluster controller to + associate a Cloud Firewall to a LinodeCluster + type: string enabled: default: false type: boolean diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml index 6d340bc22..698b7ded4 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachines.yaml @@ -93,9 +93,9 @@ spec: CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this machine. If not supplied then these credentials will be used in-order: - 1. Machine - 2. Cluster - 2. Controller + 1. LinodeMachine + 2. Owner LinodeCluster + 3. Controller properties: name: description: name is unique within a namespace to reference a diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachinetemplates.yaml index 41e9977ee..739461352 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodemachinetemplates.yaml @@ -80,9 +80,9 @@ spec: CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this machine. If not supplied then these credentials will be used in-order: - 1. Machine - 2. Cluster - 2. Controller + 1. LinodeMachine + 2. Owner LinodeCluster + 3. Controller properties: name: description: name is unique within a namespace to reference diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index 998917755..ec1d97a41 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -21,18 +21,15 @@ import ( "errors" "fmt" "net/http" + "strconv" "time" - apierrors "k8s.io/apimachinery/pkg/api/errors" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "github.com/go-logr/logr" - "github.com/linode/cluster-api-provider-linode/cloud/scope" - "github.com/linode/cluster-api-provider-linode/cloud/services" - "github.com/linode/cluster-api-provider-linode/util" - "github.com/linode/cluster-api-provider-linode/util/reconciler" "github.com/linode/linodego" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" cerrs "sigs.k8s.io/cluster-api/errors" @@ -40,14 +37,17 @@ import ( "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + "github.com/linode/cluster-api-provider-linode/cloud/scope" + "github.com/linode/cluster-api-provider-linode/cloud/services" + "github.com/linode/cluster-api-provider-linode/util" + "github.com/linode/cluster-api-provider-linode/util/reconciler" ) // LinodeClusterReconciler reconciles a LinodeCluster object @@ -93,14 +93,21 @@ func (r *LinodeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } + + controlPlaneFW := &infrav1alpha1.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-api-server", linodeCluster.Name), + }, + } // Create the cluster scope. clusterScope, err := scope.NewClusterScope( ctx, r.LinodeApiKey, scope.ClusterScopeParams{ - Client: r.Client, - Cluster: cluster, - LinodeCluster: linodeCluster, + Client: r.Client, + Cluster: cluster, + LinodeCluster: linodeCluster, + ControlPlaneFirewall: controlPlaneFW, }) if err != nil { logger.Info("Failed to create cluster scope", "error", err.Error()) @@ -139,12 +146,18 @@ func (r *LinodeClusterReconciler) reconcile( if err := clusterScope.AddFinalizer(ctx); err != nil { return res, err } - // Create + + // Create cluster if clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint.Host == "" { if err := r.reconcileCreate(ctx, logger, clusterScope); err != nil { return res, err } - r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeNormal, string(clusterv1.ReadyCondition), "Load balancer is ready") + r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeNormal, string(clusterv1.ReadyCondition), "Cluster is ready") + } else { + // Update cluster + if err := r.reconcileUpdate(ctx, logger, clusterScope); err != nil { + return res, err + } } clusterScope.LinodeCluster.Status.Ready = true @@ -153,6 +166,54 @@ func (r *LinodeClusterReconciler) reconcile( return res, nil } +func createControlPlaneFirewallSpec(linodeCluster *infrav1alpha1.LinodeCluster) *infrav1alpha1.LinodeFirewallSpec { + // TODO: get node IPs and append + // Per the Linode API: + // Must contain only valid IPv4 addresses or networks (both must be in ip/mask format) + apiServerIPV4 := append( + []string{fmt.Sprintf("%s/32", linodeCluster.Spec.ControlPlaneEndpoint.Host)}, + linodeCluster.Spec.ControlPlaneFirewall.AllowedIPV4Addresses..., + ) + apiServerIPV6 := append( + []string{}, + linodeCluster.Spec.ControlPlaneFirewall.AllowedIPV6Addresses..., + ) + lbPort := services.DefaultLBPort + if linodeCluster.Spec.Network.LoadBalancerPort != 0 { + lbPort = linodeCluster.Spec.Network.LoadBalancerPort + } + controlPlaneRules := []infrav1alpha1.FirewallRule{{ + Action: "ACCEPT", + Label: "api-server", + Ports: strconv.Itoa(lbPort), + Addresses: &infrav1alpha1.NetworkAddresses{ + IPv4: util.Pointer(apiServerIPV4), + IPv6: util.Pointer(apiServerIPV6), + }, + }} + if linodeCluster.Spec.ControlPlaneFirewall.AllowSSH { + sshRule := infrav1alpha1.FirewallRule{ + Action: "ACCEPT", + Label: "ssh", + Ports: "22", + Addresses: &infrav1alpha1.NetworkAddresses{ + IPv4: util.Pointer(linodeCluster.Spec.ControlPlaneFirewall.AllowedIPV4Addresses), + IPv6: util.Pointer(linodeCluster.Spec.ControlPlaneFirewall.AllowedIPV6Addresses), + }, + } + controlPlaneRules = append(controlPlaneRules, sshRule) + } + + return &infrav1alpha1.LinodeFirewallSpec{ + ClusterUID: string(linodeCluster.UID), + FirewallID: linodeCluster.Spec.ControlPlaneFirewall.FirewallID, + Enabled: linodeCluster.Spec.ControlPlaneFirewall.Enabled, + Label: linodeCluster.Name, + InboundPolicy: "DROP", + InboundRules: controlPlaneRules, + } +} + func setFailureReason(clusterScope *scope.ClusterScope, failureReason cerrs.ClusterStatusError, err error, lcr *LinodeClusterReconciler) { clusterScope.LinodeCluster.Status.FailureReason = util.Pointer(failureReason) clusterScope.LinodeCluster.Status.FailureMessage = util.Pointer(err.Error()) @@ -186,11 +247,58 @@ func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger lo Port: int32(linodeNBConfig.Port), } + // build out the control plane firewall rules + clusterScope.ControlPlaneFirewall.Spec = *createControlPlaneFirewallSpec(clusterScope.LinodeCluster) + + // Handle firewalls + firewall, err := services.HandleFirewall(ctx, clusterScope.ControlPlaneFirewall, clusterScope.LinodeClient, logger) + if err != nil { + setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + + return err + } + + clusterScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID = util.Pointer(firewall.ID) + + return nil +} +func (r *LinodeClusterReconciler) reconcileUpdate( + ctx context.Context, + logger logr.Logger, + clusterScope *scope.ClusterScope, +) error { + // build out the control plane firewall rules + clusterScope.ControlPlaneFirewall.Spec = *createControlPlaneFirewallSpec(clusterScope.LinodeCluster) + + // Handle firewalls + if _, err := services.HandleFirewall(ctx, clusterScope.ControlPlaneFirewall, clusterScope.LinodeClient, logger); err != nil { + setFailureReason(clusterScope, cerrs.UpdateClusterError, err, r) + + return err + } + return nil } func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope) error { logger.Info("deleting cluster") + if clusterScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID != nil { + if err := clusterScope.LinodeClient.DeleteFirewall( + ctx, + *clusterScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID, + ); err != nil { + logger.Info("Failed to delete control plane Firewall", "error", err.Error()) + + // Not found is not an error + apiErr := linodego.Error{} + if errors.As(err, &apiErr) && apiErr.Code != http.StatusNotFound { + setFailureReason(clusterScope, cerrs.DeleteClusterError, err, r) + + return err + } + } + } + if clusterScope.LinodeCluster.Spec.Network.NodeBalancerID == 0 { logger.Info("NodeBalancer ID is missing, nothing to do") controllerutil.RemoveFinalizer(clusterScope.LinodeCluster, infrav1alpha1.GroupVersion.String()) @@ -213,6 +321,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "Load balancer deleted") clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = 0 + clusterScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID = nil clusterScope.LinodeCluster.Spec.Network.NodeBalancerConfigID = nil controllerutil.RemoveFinalizer(clusterScope.LinodeCluster, infrav1alpha1.GroupVersion.String()) diff --git a/controller/linodefirewall_controller.go b/controller/linodefirewall_controller.go index ba3ced026..23b0fee8a 100644 --- a/controller/linodefirewall_controller.go +++ b/controller/linodefirewall_controller.go @@ -67,15 +67,12 @@ func (r *LinodeFirewallReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, client.IgnoreNotFound(err) } - linodeCluster := &infrav1alpha1.LinodeCluster{} - // Create the firewall scope. firewallScope, err := scope.NewFirewallScope( r.LinodeApiKey, scope.FirewallScopeParams{ Client: r.Client, LinodeFirewall: linodeFirewall, - LinodeCluster: linodeCluster, }) if err != nil { logger.Info("Failed to create firewall scope", "error", err.Error()) @@ -171,7 +168,7 @@ func (r *LinodeFirewallReconciler) reconcileCreate( logger logr.Logger, firewallScope *scope.FirewallScope, ) error { - linodeFW, err := services.HandleFirewall(ctx, firewallScope, logger) + linodeFW, err := services.HandleFirewall(ctx, firewallScope.LinodeFirewall, firewallScope.LinodeClient, logger) if err != nil || linodeFW == nil { r.setFailureReason(firewallScope, infrav1alpha1.CreateFirewallError, err) @@ -187,7 +184,7 @@ func (r *LinodeFirewallReconciler) reconcileUpdate( logger logr.Logger, firewallScope *scope.FirewallScope, ) error { - linodeFW, err := services.HandleFirewall(ctx, firewallScope, logger) + linodeFW, err := services.HandleFirewall(ctx, firewallScope.LinodeFirewall, firewallScope.LinodeClient, logger) if err != nil || linodeFW == nil { r.setFailureReason(firewallScope, infrav1alpha1.UpdateFirewallError, err) diff --git a/controller/linodemachine_controller.go b/controller/linodemachine_controller.go index cdbb97b7f..bff70b4de 100644 --- a/controller/linodemachine_controller.go +++ b/controller/linodemachine_controller.go @@ -95,7 +95,7 @@ type LinodeMachineReconciler struct { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.16.0/pkg/reconcile // -//nolint:gocyclo,cyclop // As simple as possible. +//nolint:cyclop // As simple as possible. func (r *LinodeMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) defer cancel() @@ -184,29 +184,13 @@ func (r *LinodeMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, fmt.Errorf("failed to create machine scope: %w", err) } - clusterScope, err := scope.NewClusterScope( - ctx, - r.LinodeApiKey, - scope.ClusterScopeParams{ - Client: r.Client, - Cluster: cluster, - LinodeCluster: linodeCluster, - }, - ) - if err != nil { - log.Error(err, "Failed to create cluster scope") - - return ctrl.Result{}, fmt.Errorf("failed to create cluster scope: %w", err) - } - - return r.reconcile(ctx, log, machineScope, clusterScope) + return r.reconcile(ctx, log, machineScope) } func (r *LinodeMachineReconciler) reconcile( ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope, - clusterScope *scope.ClusterScope, ) (res ctrl.Result, err error) { res = ctrl.Result{} @@ -292,7 +276,7 @@ func (r *LinodeMachineReconciler) reconcile( return } - linodeInstance, err = r.reconcileCreate(ctx, logger, machineScope, clusterScope) + linodeInstance, err = r.reconcileCreate(ctx, logger, machineScope) return } @@ -301,7 +285,6 @@ func (r *LinodeMachineReconciler) reconcileCreate( ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope, - clusterScope *scope.ClusterScope, ) (*linodego.Instance, error) { logger.Info("creating machine") @@ -391,7 +374,7 @@ func (r *LinodeMachineReconciler) reconcileCreate( }) } - if err = services.AddNodeToNB(ctx, logger, machineScope, clusterScope); err != nil { + if err = services.AddNodeToNB(ctx, logger, machineScope); err != nil { logger.Error(err, "Failed to add instance to Node Balancer backend") return linodeInstance, err diff --git a/controller/linodemachine_controller_helpers.go b/controller/linodemachine_controller_helpers.go index 460b624ce..51705428c 100644 --- a/controller/linodemachine_controller_helpers.go +++ b/controller/linodemachine_controller_helpers.go @@ -62,6 +62,11 @@ func (*LinodeMachineReconciler) newCreateConfig(ctx context.Context, machineScop createConfig.PrivateIP = true + if kutil.IsControlPlaneMachine(machineScope.Machine) && + machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID != nil { + createConfig.FirewallID = *machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID + } + bootstrapData, err := machineScope.GetBootstrapData(ctx) if err != nil { logger.Info("Failed to get bootstrap data", "error", err.Error()) diff --git a/templates/flavors/base/linodeCluster.yaml b/templates/flavors/base/linodeCluster.yaml index 3f5956877..39f2ddda1 100644 --- a/templates/flavors/base/linodeCluster.yaml +++ b/templates/flavors/base/linodeCluster.yaml @@ -7,3 +7,6 @@ spec: region: ${LINODE_REGION} credentialsRef: name: ${CLUSTER_NAME}-credentials + controlPlaneFirewall: + enabled: true + allowSSH: true From 5457953c69e0aa89034ffe190f164b08d7312567 Mon Sep 17 00:00:00 2001 From: Ashley Dumaine Date: Wed, 6 Mar 2024 14:22:00 -0500 Subject: [PATCH 3/5] remove fw controller for now --- PROJECT | 1 - cmd/main.go | 11 -- controller/linodefirewall_controller.go | 241 ------------------------ 3 files changed, 253 deletions(-) delete mode 100644 controller/linodefirewall_controller.go diff --git a/PROJECT b/PROJECT index d1c01fa43..580f0fab7 100644 --- a/PROJECT +++ b/PROJECT @@ -54,7 +54,6 @@ resources: - api: crdVersion: v1 namespaced: true - controller: true domain: cluster.x-k8s.io group: infrastructure kind: LinodeFirewall diff --git a/cmd/main.go b/cmd/main.go index a84e79671..6396aa52f 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -65,7 +65,6 @@ func main() { machineWatchFilter string clusterWatchFilter string objectStorageBucketWatchFilter string - firewallWatchFilter string metricsAddr string enableLeaderElection bool probeAddr string @@ -73,7 +72,6 @@ func main() { flag.StringVar(&machineWatchFilter, "machine-watch-filter", "", "The machines to watch by label.") flag.StringVar(&clusterWatchFilter, "cluster-watch-filter", "", "The clusters to watch by label.") flag.StringVar(&objectStorageBucketWatchFilter, "object-storage-bucket-watch-filter", "", "The object bucket storages to watch by label.") - flag.StringVar(&firewallWatchFilter, "firewall-watch-filter", "", "The firewalls to watch by label.") flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, @@ -155,15 +153,6 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "LinodeObjectStorageBucket") os.Exit(1) } - if err = (&caplController.LinodeFirewallReconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor("LinodeFirewallReconciler"), - WatchFilterValue: firewallWatchFilter, - LinodeApiKey: linodeToken, - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "LinodeFirewall") - os.Exit(1) - } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/controller/linodefirewall_controller.go b/controller/linodefirewall_controller.go deleted file mode 100644 index 23b0fee8a..000000000 --- a/controller/linodefirewall_controller.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2023 Akamai Technologies, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "net/http" - "time" - - "github.com/go-logr/logr" - "github.com/linode/linodego" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - - infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" - "github.com/linode/cluster-api-provider-linode/cloud/scope" - "github.com/linode/cluster-api-provider-linode/cloud/services" - "github.com/linode/cluster-api-provider-linode/util" - "github.com/linode/cluster-api-provider-linode/util/reconciler" -) - -// LinodeFirewallReconciler reconciles a LinodeFirewall object -type LinodeFirewallReconciler struct { - client.Client - Recorder record.EventRecorder - LinodeApiKey string - WatchFilterValue string - ReconcileTimeout time.Duration -} - -//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodefirewalls,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodefirewalls/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodefirewalls/finalizers,verbs=update - -func (r *LinodeFirewallReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) - defer cancel() - - logger := ctrl.LoggerFrom(ctx).WithName("LinodeFirewallReconciler").WithValues("name", req.NamespacedName.String()) - linodeFirewall := &infrav1alpha1.LinodeFirewall{} - if err := r.Client.Get(ctx, req.NamespacedName, linodeFirewall); err != nil { - logger.Info("Failed to fetch Linode firewall", "error", err.Error()) - - return ctrl.Result{}, client.IgnoreNotFound(err) - } - - // Create the firewall scope. - firewallScope, err := scope.NewFirewallScope( - r.LinodeApiKey, - scope.FirewallScopeParams{ - Client: r.Client, - LinodeFirewall: linodeFirewall, - }) - if err != nil { - logger.Info("Failed to create firewall scope", "error", err.Error()) - - return ctrl.Result{}, fmt.Errorf("failed to create cluster scope: %w", err) - } - - return r.reconcile(ctx, firewallScope, logger) -} - -func (r *LinodeFirewallReconciler) reconcile( - ctx context.Context, - firewallScope *scope.FirewallScope, - logger logr.Logger, -) (res ctrl.Result, reterr error) { - res = ctrl.Result{} - - firewallScope.LinodeFirewall.Status.Ready = false - firewallScope.LinodeFirewall.Status.FailureReason = nil - firewallScope.LinodeFirewall.Status.FailureMessage = util.Pointer("") - - // Always close the scope when exiting this function so we can persist any LinodeCluster changes. - defer func() { - // Filter out any IsNotFound message since client.IgnoreNotFound does not handle aggregate errors - if err := firewallScope.Close(ctx); utilerrors.FilterOut(err, apierrors.IsNotFound) != nil && reterr == nil { - logger.Error(err, "failed to patch LinodeCluster") - reterr = err - } - }() - - // Handle delete - if !firewallScope.LinodeFirewall.DeletionTimestamp.IsZero() { - return res, r.reconcileDelete(ctx, logger, firewallScope) - } - - // Add finalizer if it's not already there - if err := firewallScope.AddFinalizer(ctx); err != nil { - return res, err - } - - // Handle create - if firewallScope.LinodeFirewall.Spec.FirewallID == nil { - if err := r.reconcileCreate(ctx, logger, firewallScope); err != nil { - return res, err - } - r.Recorder.Event( - firewallScope.LinodeFirewall, - corev1.EventTypeNormal, - string(clusterv1.ReadyCondition), - "Firewall is ready", - ) - } - - // Handle updates - if err := r.reconcileUpdate(ctx, logger, firewallScope); err != nil { - return res, err - } - r.Recorder.Event( - firewallScope.LinodeFirewall, - corev1.EventTypeNormal, - string(clusterv1.ReadyCondition), - "Firewall is ready", - ) - - firewallScope.LinodeFirewall.Status.Ready = true - conditions.MarkTrue(firewallScope.LinodeFirewall, clusterv1.ReadyCondition) - - return res, nil -} - -func (r *LinodeFirewallReconciler) setFailureReason( - firewallScope *scope.FirewallScope, - failureReason infrav1alpha1.FirewallStatusError, - err error, -) { - firewallScope.LinodeFirewall.Status.FailureReason = util.Pointer(failureReason) - firewallScope.LinodeFirewall.Status.FailureMessage = util.Pointer(err.Error()) - - conditions.MarkFalse( - firewallScope.LinodeFirewall, - clusterv1.ReadyCondition, - string(failureReason), - clusterv1.ConditionSeverityError, - "%s", - err.Error(), - ) - - r.Recorder.Event(firewallScope.LinodeFirewall, corev1.EventTypeWarning, string(failureReason), err.Error()) -} - -func (r *LinodeFirewallReconciler) reconcileCreate( - ctx context.Context, - logger logr.Logger, - firewallScope *scope.FirewallScope, -) error { - linodeFW, err := services.HandleFirewall(ctx, firewallScope.LinodeFirewall, firewallScope.LinodeClient, logger) - if err != nil || linodeFW == nil { - r.setFailureReason(firewallScope, infrav1alpha1.CreateFirewallError, err) - - return err - } - firewallScope.LinodeFirewall.Spec.FirewallID = util.Pointer(linodeFW.ID) - - return nil -} - -func (r *LinodeFirewallReconciler) reconcileUpdate( - ctx context.Context, - logger logr.Logger, - firewallScope *scope.FirewallScope, -) error { - linodeFW, err := services.HandleFirewall(ctx, firewallScope.LinodeFirewall, firewallScope.LinodeClient, logger) - if err != nil || linodeFW == nil { - r.setFailureReason(firewallScope, infrav1alpha1.UpdateFirewallError, err) - - return err - } - firewallScope.LinodeFirewall.Spec.FirewallID = util.Pointer(linodeFW.ID) - - return nil -} - -func (r *LinodeFirewallReconciler) reconcileDelete( - ctx context.Context, - logger logr.Logger, - firewallScope *scope.FirewallScope, -) error { - if firewallScope.LinodeFirewall.Spec.FirewallID == nil { - logger.Info("Firewall ID is missing, nothing to do") - controllerutil.RemoveFinalizer(firewallScope.LinodeFirewall, infrav1alpha1.GroupVersion.String()) - - return nil - } - - if err := firewallScope.LinodeClient.DeleteFirewall(ctx, *firewallScope.LinodeFirewall.Spec.FirewallID); err != nil { - logger.Info("Failed to delete Linode NodeBalancer", "error", err.Error()) - - // Not found is not an error - apiErr := linodego.Error{} - if errors.As(err, &apiErr) && apiErr.Code != http.StatusNotFound { - r.setFailureReason(firewallScope, infrav1alpha1.DeleteFirewallError, err) - - return err - } - } - - conditions.MarkFalse( - firewallScope.LinodeFirewall, - clusterv1.ReadyCondition, - clusterv1.DeletedReason, - clusterv1.ConditionSeverityInfo, - "Firewall deleted", - ) - - firewallScope.LinodeFirewall.Spec.FirewallID = nil - controllerutil.RemoveFinalizer(firewallScope.LinodeFirewall, infrav1alpha1.GroupVersion.String()) - - return nil -} - -// SetupWithManager sets up the controller with the Manager. -func (r *LinodeFirewallReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&infrav1alpha1.LinodeFirewall{}). - Complete(r) -} From c8e2609e3697019c78b11b27d952fe9bbad02288 Mon Sep 17 00:00:00 2001 From: Ashley Dumaine Date: Wed, 6 Mar 2024 14:40:47 -0500 Subject: [PATCH 4/5] collapse generated files --- .gitattributes | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index 30cc62af5..00b517e19 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,4 @@ *.sh text eol=lf -*.yaml text eol=lf \ No newline at end of file +*.yaml text eol=lf +PROJECT linguist-generated=true +api/**/zz_generated.deepcopy.go linguist-generated=true From a6c083fb0dd2d5e238f27dd1c3265522ac0a840a Mon Sep 17 00:00:00 2001 From: Ashley Dumaine Date: Wed, 6 Mar 2024 18:14:14 -0500 Subject: [PATCH 5/5] even more refactoring! --- api/v1alpha1/linodecluster_types.go | 4 + api/v1alpha1/zz_generated.deepcopy.go | 5 + cloud/scope/cluster.go | 30 ++-- cloud/services/firewalls.go | 139 ++++++++++++++++++ ...cture.cluster.x-k8s.io_linodeclusters.yaml | 47 ++++++ ...uster.x-k8s.io_linodeclustertemplates.yaml | 47 ++++++ config/rbac/role.yaml | 14 -- controller/linodecluster_controller.go | 84 +++++++---- controller/linodemachine_controller.go | 27 ++++ .../linodemachine_controller_helpers.go | 39 ++++- 10 files changed, 370 insertions(+), 66 deletions(-) diff --git a/api/v1alpha1/linodecluster_types.go b/api/v1alpha1/linodecluster_types.go index 8ce6e56a0..0f84cfcd0 100644 --- a/api/v1alpha1/linodecluster_types.go +++ b/api/v1alpha1/linodecluster_types.go @@ -50,6 +50,10 @@ type LinodeClusterSpec struct { // control plane nodes. // +optional ControlPlaneFirewall FirewallSpec `json:"controlPlaneFirewall,omitempty"` + + // ControlPlaneFirewallRef is a reference to the Firewall for the control plane nodes. + // +optional + ControlPlaneFirewallRef *corev1.ObjectReference `json:"controlPlaneFirewallRef,omitempty"` } // LinodeClusterStatus defines the observed state of LinodeCluster diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 226384169..2358b337c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -198,6 +198,11 @@ func (in *LinodeClusterSpec) DeepCopyInto(out *LinodeClusterSpec) { **out = **in } in.ControlPlaneFirewall.DeepCopyInto(&out.ControlPlaneFirewall) + if in.ControlPlaneFirewallRef != nil { + in, out := &in.ControlPlaneFirewallRef, &out.ControlPlaneFirewallRef + *out = new(v1.ObjectReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeClusterSpec. diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index d6fd4011c..05a1e6b45 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -32,10 +32,9 @@ import ( // ClusterScopeParams defines the input parameters used to create a new Scope. type ClusterScopeParams struct { - Client client.Client - Cluster *clusterv1.Cluster - LinodeCluster *infrav1alpha1.LinodeCluster - ControlPlaneFirewall *infrav1alpha1.LinodeFirewall + Client client.Client + Cluster *clusterv1.Cluster + LinodeCluster *infrav1alpha1.LinodeCluster } func validateClusterScopeParams(params ClusterScopeParams) error { @@ -45,9 +44,6 @@ func validateClusterScopeParams(params ClusterScopeParams) error { if params.LinodeCluster == nil { return errors.New("linodeCluster is required when creating a ClusterScope") } - if params.ControlPlaneFirewall == nil { - return errors.New("controlPlaneFirewall is required when creating a ClusterScope") - } return nil } @@ -75,12 +71,11 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara } return &ClusterScope{ - client: params.Client, - Cluster: params.Cluster, - LinodeClient: linodeClient, - LinodeCluster: params.LinodeCluster, - ControlPlaneFirewall: params.ControlPlaneFirewall, - PatchHelper: helper, + client: params.Client, + Cluster: params.Cluster, + LinodeClient: linodeClient, + LinodeCluster: params.LinodeCluster, + PatchHelper: helper, }, nil } @@ -88,11 +83,10 @@ func NewClusterScope(ctx context.Context, apiKey string, params ClusterScopePara type ClusterScope struct { client client.Client - PatchHelper *patch.Helper - LinodeClient *linodego.Client - Cluster *clusterv1.Cluster - LinodeCluster *infrav1alpha1.LinodeCluster - ControlPlaneFirewall *infrav1alpha1.LinodeFirewall + PatchHelper *patch.Helper + LinodeClient *linodego.Client + Cluster *clusterv1.Cluster + LinodeCluster *infrav1alpha1.LinodeCluster } // PatchObject persists the cluster configuration and status. diff --git a/cloud/services/firewalls.go b/cloud/services/firewalls.go index 6889e0368..b65a284b1 100644 --- a/cloud/services/firewalls.go +++ b/cloud/services/firewalls.go @@ -10,6 +10,7 @@ import ( "github.com/linode/linodego" infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1" + "github.com/linode/cluster-api-provider-linode/cloud/scope" "github.com/linode/cluster-api-provider-linode/util" ) @@ -311,3 +312,141 @@ func processACL(firewall *infrav1alpha1.LinodeFirewall, tags []string) ( return createOpts, nil } + +// AddNodeToApiServerFW adds a Node's IPs to the given Cloud Firewall's inbound rules +func AddNodeToApiServerFW( + ctx context.Context, + logger logr.Logger, + machineScope *scope.MachineScope, + firewall *infrav1alpha1.LinodeFirewall, +) error { + if firewall.Spec.FirewallID == nil { + err := errors.New("no firewall ID") + logger.Error(err, "no ID is set for the firewall") + + return err + } + + ipv4s, ipv6s, err := getInstanceIPs(ctx, machineScope.LinodeClient, machineScope.LinodeMachine.Spec.InstanceID) + if err != nil { + logger.Error(err, "Failed get instance IP addresses") + + return err + } + + // get the rules and append a new rule for this Node to access the api server + newRule := infrav1alpha1.FirewallRule{ + Action: "ACCEPT", + Label: "api-server", + Description: "Rule created by CAPL", + Ports: fmt.Sprint(machineScope.LinodeCluster.Spec.ControlPlaneEndpoint.Port), + Protocol: linodego.TCP, + Addresses: &infrav1alpha1.NetworkAddresses{ + IPv4: util.Pointer(ipv4s), + IPv6: util.Pointer(ipv6s), + }, + } + // update the inbound rules + firewall.Spec.InboundRules = append(firewall.Spec.InboundRules, newRule) + + // reprocess the firewall to make sure we won't exceed the IP and rule limit + clusterUID := firewall.Spec.ClusterUID + fwConfig, err := processACL(firewall, []string{clusterUID}) + if err != nil { + logger.Info("Failed to process ACL", "error", err.Error()) + + return err + } + + // finally, update the firewall + if _, err := machineScope.LinodeClient.UpdateFirewallRules(ctx, *firewall.Spec.FirewallID, fwConfig.Rules); err != nil { + logger.Info("Failed to update firewall", "error", err.Error()) + + return err + } + + return nil +} + +// DeleteNodeFromApiServerFW removes Node from the given Cloud Firewall's inbound rules +func DeleteNodeFromApiServerFW( + ctx context.Context, + logger logr.Logger, + machineScope *scope.MachineScope, + firewall *infrav1alpha1.LinodeFirewall, +) error { + if firewall.Spec.FirewallID == nil { + logger.Info("Firewall already deleted, no Firewall address to remove") + + return nil + } + + if machineScope.LinodeMachine.Spec.InstanceID == nil { + return errors.New("no InstanceID") + } + + ipv4s, ipv6s, err := getInstanceIPs(ctx, machineScope.LinodeClient, machineScope.LinodeMachine.Spec.InstanceID) + if err != nil { + logger.Error(err, "Failed get instance IP addresses") + + return err + } + + for _, rule := range firewall.Spec.InboundRules { + rule.Addresses.IPv4 = util.Pointer(setDiff(*rule.Addresses.IPv4, ipv4s)) + rule.Addresses.IPv6 = util.Pointer(setDiff(*rule.Addresses.IPv6, ipv6s)) + } + + // reprocess the firewall + clusterUID := firewall.Spec.ClusterUID + fwConfig, err := processACL(firewall, []string{clusterUID}) + if err != nil { + logger.Info("Failed to process ACL", "error", err.Error()) + + return err + } + + // finally, update the firewall + if _, err := machineScope.LinodeClient.UpdateFirewallRules(ctx, *firewall.Spec.FirewallID, fwConfig.Rules); err != nil { + logger.Info("Failed to update firewall", "error", err.Error()) + + return err + } + + return nil +} + +func getInstanceIPs(ctx context.Context, client *linodego.Client, instanceID *int) (ipv4s, ipv6s []string, err error) { + addresses, err := client.GetInstanceIPAddresses(ctx, *instanceID) + if err != nil { + return ipv4s, ipv6s, err + } + + // get all the ipv4 addresses for the node + for _, addr := range addresses.IPv4.Private { + ipv4s = append(ipv4s, addr.Address) + } + for _, addr := range addresses.IPv4.Public { + ipv4s = append(ipv4s, addr.Address) + } + + // get all the ipv6 addresses for the node + ipv6s = []string{addresses.IPv6.SLAAC.Address, addresses.IPv6.LinkLocal.Address} + + return ipv4s, ipv6s, nil +} + +// setDiff: A - B +func setDiff(a, b []string) (diff []string) { + m := make(map[string]bool) + for _, item := range b { + m[item] = true + } + for _, item := range a { + if _, ok := m[item]; !ok { + diff = append(diff, item) + } + } + + return diff +} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml index e6b0eb238..2c0a7464c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml @@ -88,6 +88,8 @@ spec: description: |- AllowedIPV4Addresses specifies additional IPV4 addresses aside from the worker nodes that should be permitted to reach the K8s API server + Per the Linode API: + Must contain only valid IPv4 addresses or networks (both must be in ip/mask format) items: type: string type: array @@ -109,6 +111,51 @@ spec: description: FirewallID is the ID of the Cloud Firewall. type: integer type: object + controlPlaneFirewallRef: + description: ControlPlaneFirewallRef is a reference to the Firewall + for the control plane nodes. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic credentialsRef: description: |- CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml index 74f97d1c6..1cde9ad7b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclustertemplates.yaml @@ -82,6 +82,8 @@ spec: description: |- AllowedIPV4Addresses specifies additional IPV4 addresses aside from the worker nodes that should be permitted to reach the K8s API server + Per the Linode API: + Must contain only valid IPv4 addresses or networks (both must be in ip/mask format) items: type: string type: array @@ -103,6 +105,51 @@ spec: description: FirewallID is the ID of the Cloud Firewall. type: integer type: object + controlPlaneFirewallRef: + description: ControlPlaneFirewallRef is a reference to the + Firewall for the control plane nodes. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic credentialsRef: description: |- CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning this cluster. If not diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 21520bc6e..c11289e71 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -80,20 +80,6 @@ rules: - patch - update - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - linodefirewalls/finalizers - verbs: - - update -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - linodefirewalls/status - verbs: - - get - - patch - - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index ec1d97a41..a705091bb 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -63,6 +63,8 @@ type LinodeClusterReconciler struct { // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodefirewalls,verbs=get;list;watch;create;update;patch;delete + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -94,20 +96,14 @@ func (r *LinodeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - controlPlaneFW := &infrav1alpha1.LinodeFirewall{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-api-server", linodeCluster.Name), - }, - } // Create the cluster scope. clusterScope, err := scope.NewClusterScope( ctx, r.LinodeApiKey, scope.ClusterScopeParams{ - Client: r.Client, - Cluster: cluster, - LinodeCluster: linodeCluster, - ControlPlaneFirewall: controlPlaneFW, + Client: r.Client, + Cluster: cluster, + LinodeCluster: linodeCluster, }) if err != nil { logger.Info("Failed to create cluster scope", "error", err.Error()) @@ -166,8 +162,9 @@ func (r *LinodeClusterReconciler) reconcile( return res, nil } -func createControlPlaneFirewallSpec(linodeCluster *infrav1alpha1.LinodeCluster) *infrav1alpha1.LinodeFirewallSpec { - // TODO: get node IPs and append +func (r *LinodeClusterReconciler) createControlPlaneFirewallSpec( + linodeCluster *infrav1alpha1.LinodeCluster, +) *infrav1alpha1.LinodeFirewallSpec { // Per the Linode API: // Must contain only valid IPv4 addresses or networks (both must be in ip/mask format) apiServerIPV4 := append( @@ -214,46 +211,62 @@ func createControlPlaneFirewallSpec(linodeCluster *infrav1alpha1.LinodeCluster) } } -func setFailureReason(clusterScope *scope.ClusterScope, failureReason cerrs.ClusterStatusError, err error, lcr *LinodeClusterReconciler) { +func (r *LinodeClusterReconciler) setFailureReason(clusterScope *scope.ClusterScope, failureReason cerrs.ClusterStatusError, err error) { clusterScope.LinodeCluster.Status.FailureReason = util.Pointer(failureReason) clusterScope.LinodeCluster.Status.FailureMessage = util.Pointer(err.Error()) conditions.MarkFalse(clusterScope.LinodeCluster, clusterv1.ReadyCondition, string(failureReason), clusterv1.ConditionSeverityError, "%s", err.Error()) - lcr.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, string(failureReason), err.Error()) + r.Recorder.Event(clusterScope.LinodeCluster, corev1.EventTypeWarning, string(failureReason), err.Error()) } func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope) error { + // handle NodeBalancer linodeNB, err := services.CreateNodeBalancer(ctx, clusterScope, logger) if err != nil { - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.CreateClusterError, err) return err } - clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = linodeNB.ID - linodeNBConfig, err := services.CreateNodeBalancerConfig(ctx, clusterScope, logger) if err != nil { - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.CreateClusterError, err) return err } - clusterScope.LinodeCluster.Spec.Network.NodeBalancerConfigID = util.Pointer(linodeNBConfig.ID) + // Set the control plane endpoint with the new Nodebalancer host and port clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: *linodeNB.IPv4, Port: int32(linodeNBConfig.Port), } - // build out the control plane firewall rules - clusterScope.ControlPlaneFirewall.Spec = *createControlPlaneFirewallSpec(clusterScope.LinodeCluster) + // build out the control plane Firewall rules + controlPlaneFW := &infrav1alpha1.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-api-server", clusterScope.LinodeCluster.Name), + Namespace: clusterScope.LinodeCluster.Namespace, + }, + Spec: *r.createControlPlaneFirewallSpec(clusterScope.LinodeCluster), + } + + // Handle the Firewall + if err := r.Client.Create(ctx, controlPlaneFW); err != nil { + r.setFailureReason(clusterScope, cerrs.CreateClusterError, err) - // Handle firewalls - firewall, err := services.HandleFirewall(ctx, clusterScope.ControlPlaneFirewall, clusterScope.LinodeClient, logger) + return err + } + clusterScope.LinodeCluster.Spec.ControlPlaneFirewallRef = &corev1.ObjectReference{ + Kind: controlPlaneFW.Kind, + Namespace: controlPlaneFW.Namespace, + Name: controlPlaneFW.Name, + } + // NOTE: if we add a reconciler later on don't call this as the reconciler will take care of it + firewall, err := services.HandleFirewall(ctx, controlPlaneFW, clusterScope.LinodeClient, logger) if err != nil { - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.CreateClusterError, err) return err } @@ -262,17 +275,28 @@ func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger lo return nil } + func (r *LinodeClusterReconciler) reconcileUpdate( ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope, ) error { - // build out the control plane firewall rules - clusterScope.ControlPlaneFirewall.Spec = *createControlPlaneFirewallSpec(clusterScope.LinodeCluster) + // Update the Firewall if necessary + controlPlaneFW := &infrav1alpha1.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-api-server", clusterScope.LinodeCluster.Name), + Namespace: clusterScope.LinodeCluster.Namespace, + }, + Spec: *r.createControlPlaneFirewallSpec(clusterScope.LinodeCluster), + } - // Handle firewalls - if _, err := services.HandleFirewall(ctx, clusterScope.ControlPlaneFirewall, clusterScope.LinodeClient, logger); err != nil { - setFailureReason(clusterScope, cerrs.UpdateClusterError, err, r) + if err := r.Client.Update(ctx, controlPlaneFW); err != nil { + r.setFailureReason(clusterScope, cerrs.UpdateClusterError, err) + + return err + } + if _, err := services.HandleFirewall(ctx, controlPlaneFW, clusterScope.LinodeClient, logger); err != nil { + r.setFailureReason(clusterScope, cerrs.UpdateClusterError, err) return err } @@ -292,7 +316,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo // Not found is not an error apiErr := linodego.Error{} if errors.As(err, &apiErr) && apiErr.Code != http.StatusNotFound { - setFailureReason(clusterScope, cerrs.DeleteClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.DeleteClusterError, err) return err } @@ -312,7 +336,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo // Not found is not an error apiErr := linodego.Error{} if errors.As(err, &apiErr) && apiErr.Code != http.StatusNotFound { - setFailureReason(clusterScope, cerrs.DeleteClusterError, err, r) + r.setFailureReason(clusterScope, cerrs.DeleteClusterError, err) return err } diff --git a/controller/linodemachine_controller.go b/controller/linodemachine_controller.go index bff70b4de..ea95440b0 100644 --- a/controller/linodemachine_controller.go +++ b/controller/linodemachine_controller.go @@ -89,6 +89,8 @@ type LinodeMachineReconciler struct { // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodefirewalls,verbs=get;list;watch;update;patch; + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // @@ -281,6 +283,7 @@ func (r *LinodeMachineReconciler) reconcile( return } +//nolint:gocyclo,cyclop // As simple as possible. func (r *LinodeMachineReconciler) reconcileCreate( ctx context.Context, logger logr.Logger, @@ -380,6 +383,18 @@ func (r *LinodeMachineReconciler) reconcileCreate( return linodeInstance, err } + linodeFW, err := r.getFirewall(ctx, machineScope) + if err != nil { + logger.Error(err, "Failed to fetch LinodeFirewall") + + return linodeInstance, err + } + if err = services.AddNodeToApiServerFW(ctx, logger, machineScope, linodeFW); err != nil { + logger.Error(err, "Failed to add instance to Firewall") + + return linodeInstance, err + } + return linodeInstance, nil } @@ -514,6 +529,18 @@ func (r *LinodeMachineReconciler) reconcileDelete( return err } + linodeFW, err := r.getFirewall(ctx, machineScope) + if err != nil { + logger.Error(err, "Failed to fetch LinodeFirewall") + + return err + } + if err := services.DeleteNodeFromApiServerFW(ctx, logger, machineScope, linodeFW); err != nil { + logger.Error(err, "Failed to remove node from Firewall") + + return err + } + if err := machineScope.LinodeClient.DeleteInstance(ctx, *machineScope.LinodeMachine.Spec.InstanceID); err != nil { if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { logger.Error(err, "Failed to delete Linode machine instance") diff --git a/controller/linodemachine_controller_helpers.go b/controller/linodemachine_controller_helpers.go index 51705428c..6bb565118 100644 --- a/controller/linodemachine_controller_helpers.go +++ b/controller/linodemachine_controller_helpers.go @@ -46,7 +46,12 @@ import ( // The decoded user_data must not exceed 16384 bytes per the Linode API const maxBootstrapDataBytes = 16384 -func (*LinodeMachineReconciler) newCreateConfig(ctx context.Context, machineScope *scope.MachineScope, tags []string, logger logr.Logger) (*linodego.InstanceCreateOptions, error) { +func (*LinodeMachineReconciler) newCreateConfig( + ctx context.Context, + machineScope *scope.MachineScope, + tags []string, + logger logr.Logger, +) (*linodego.InstanceCreateOptions, error) { var err error createConfig := linodeMachineSpecToInstanceCreateConfig(machineScope.LinodeMachine.Spec) @@ -58,13 +63,17 @@ func (*LinodeMachineReconciler) newCreateConfig(ctx context.Context, machineScop return nil, err } + // Do not boot the linode until extra configuration is done createConfig.Booted = util.Pointer(false) createConfig.PrivateIP = true - if kutil.IsControlPlaneMachine(machineScope.Machine) && - machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID != nil { - createConfig.FirewallID = *machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID + if machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID != nil { + // If this is a control plane machine, set it to be protected by the + // control plane Cloud Firewall + if kutil.IsControlPlaneMachine(machineScope.Machine) { + createConfig.FirewallID = *machineScope.LinodeCluster.Spec.ControlPlaneFirewall.FirewallID + } } bootstrapData, err := machineScope.GetBootstrapData(ctx) @@ -292,3 +301,25 @@ func linodeMachineSpecToInstanceCreateConfig(machineSpec infrav1alpha1.LinodeMac return &createConfig } + +func (r *LinodeMachineReconciler) getFirewall( + ctx context.Context, + machineScope *scope.MachineScope, +) (*infrav1alpha1.LinodeFirewall, error) { + name := machineScope.LinodeCluster.Spec.ControlPlaneFirewallRef.Name + namespace := machineScope.LinodeCluster.Spec.ControlPlaneFirewallRef.Namespace + if namespace == "" { + namespace = machineScope.LinodeCluster.Namespace + } + linodeFW := &infrav1alpha1.LinodeFirewall{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + if err := r.Get(ctx, client.ObjectKeyFromObject(linodeFW), linodeFW); err != nil { + return nil, err + } + + return linodeFW, nil +}