From acd26016dbaba574f7c51cbfdd06fafcccea986e Mon Sep 17 00:00:00 2001 From: Ashley Dumaine <5779804+AshleyDumaine@users.noreply.github.com> Date: Tue, 2 Jul 2024 20:46:12 -0400 Subject: [PATCH] [fix] make apiserver port configurable for DNS-based LB (#386) * make apiserver port configurable for dns LB * set a non-6443 port in tests and verify * fix linter issues * make apiserver port config urable in templates * make konnectivity port configurable in templates * only allow kubeadm flavors to have configurable apiserver port for now --------- Co-authored-by: Amol Deodhar --- cloud/services/loadbalancers.go | 8 +- cloud/services/loadbalancers_test.go | 28 +++---- controller/linodecluster_controller.go | 74 +++++++++---------- controller/linodecluster_controller_test.go | 11 +-- docs/src/topics/firewalling.md | 10 +-- .../ciliumNetworkPolicies.yaml | 2 +- .../addons/konnectivity/konnectivity.yaml | 1 + .../kubeadm/default/kubeadmControlPlane.yaml | 2 + .../kubeadm/default/kustomization.yaml | 12 +++ .../kubeadm/konnectivity/kustomization.yaml | 6 +- .../kubeadm/vpcless/kustomization.yaml | 2 +- 11 files changed, 86 insertions(+), 70 deletions(-) diff --git a/cloud/services/loadbalancers.go b/cloud/services/loadbalancers.go index aae58d059..7500e9ff3 100644 --- a/cloud/services/loadbalancers.go +++ b/cloud/services/loadbalancers.go @@ -16,8 +16,8 @@ import ( ) const ( - defaultApiserverLBPort = 6443 - defaultKonnectivityLBPort = 8132 + DefaultApiserverLBPort = 6443 + DefaultKonnectivityLBPort = 8132 ) // CreateNodeBalancer creates a new NodeBalancer if one doesn't exist @@ -79,7 +79,7 @@ func CreateNodeBalancerConfigs( logger logr.Logger, ) ([]*linodego.NodeBalancerConfig, error) { nbConfigs := []*linodego.NodeBalancerConfig{} - apiLBPort := defaultApiserverLBPort + apiLBPort := DefaultApiserverLBPort if clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort != 0 { apiLBPort = clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort } @@ -153,7 +153,7 @@ func AddNodeToNB( return err } - apiserverLBPort := defaultApiserverLBPort + apiserverLBPort := DefaultApiserverLBPort if machineScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort != 0 { apiserverLBPort = machineScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort } diff --git a/cloud/services/loadbalancers_test.go b/cloud/services/loadbalancers_test.go index b5749b6bf..27507c3e1 100644 --- a/cloud/services/loadbalancers_test.go +++ b/cloud/services/loadbalancers_test.go @@ -41,7 +41,7 @@ func TestCreateNodeBalancer(t *testing.T) { NodeBalancerID: ptr.To(1234), AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{ { - Port: 8132, + Port: DefaultKonnectivityLBPort, NodeBalancerConfigID: ptr.To(1234), }, }, @@ -215,7 +215,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { }, expectedConfigs: []*linodego.NodeBalancerConfig{ { - Port: defaultApiserverLBPort, + Port: DefaultApiserverLBPort, Protocol: linodego.ProtocolTCP, Algorithm: linodego.AlgorithmRoundRobin, Check: linodego.CheckConnection, @@ -224,7 +224,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { }, expects: func(mockClient *mock.MockLinodeClient) { mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerConfig{ - Port: defaultApiserverLBPort, + Port: DefaultApiserverLBPort, Protocol: linodego.ProtocolTCP, Algorithm: linodego.AlgorithmRoundRobin, Check: linodego.CheckConnection, @@ -302,7 +302,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { NodeBalancerID: ptr.To(1234), AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{ { - Port: 8132, + Port: DefaultKonnectivityLBPort, NodeBalancerConfigID: ptr.To(1234), }, }, @@ -312,14 +312,14 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { }, expectedConfigs: []*linodego.NodeBalancerConfig{ { - Port: defaultApiserverLBPort, + Port: DefaultApiserverLBPort, Protocol: linodego.ProtocolTCP, Algorithm: linodego.AlgorithmRoundRobin, Check: linodego.CheckConnection, NodeBalancerID: 1234, }, { - Port: defaultKonnectivityLBPort, + Port: DefaultKonnectivityLBPort, Protocol: linodego.ProtocolTCP, Algorithm: linodego.AlgorithmRoundRobin, Check: linodego.CheckConnection, @@ -345,7 +345,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { NodeBalancerID: ptr.To(1234), AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{ { - Port: 8132, + Port: DefaultKonnectivityLBPort, NodeBalancerConfigID: ptr.To(1234), }, }, @@ -355,14 +355,14 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { }, expectedConfigs: []*linodego.NodeBalancerConfig{ { - Port: defaultApiserverLBPort, + Port: DefaultApiserverLBPort, Protocol: linodego.ProtocolTCP, Algorithm: linodego.AlgorithmRoundRobin, Check: linodego.CheckConnection, NodeBalancerID: 1234, }, { - Port: defaultKonnectivityLBPort, + Port: DefaultKonnectivityLBPort, Protocol: linodego.ProtocolTCP, Algorithm: linodego.AlgorithmRoundRobin, Check: linodego.CheckConnection, @@ -372,7 +372,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { expectedError: fmt.Errorf("error creating NodeBalancerConfig"), expects: func(mockClient *mock.MockLinodeClient) { mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerConfig{ - Port: defaultApiserverLBPort, + Port: DefaultApiserverLBPort, Protocol: linodego.ProtocolTCP, Algorithm: linodego.AlgorithmRoundRobin, Check: linodego.CheckConnection, @@ -437,7 +437,7 @@ func TestAddNodeToNBConditions(t *testing.T) { Network: infrav1alpha2.NetworkSpec{ NodeBalancerID: ptr.To(1234), ApiserverNodeBalancerConfigID: nil, - ApiserverLoadBalancerPort: defaultApiserverLBPort, + ApiserverLoadBalancerPort: DefaultApiserverLBPort, }, }, }, @@ -600,7 +600,7 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) { ApiserverNodeBalancerConfigID: ptr.To(5678), AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{ { - Port: 8132, + Port: DefaultKonnectivityLBPort, NodeBalancerConfigID: ptr.To(1234), }, }, @@ -800,7 +800,7 @@ func TestDeleteNodeFromNB(t *testing.T) { ApiserverNodeBalancerConfigID: ptr.To(5678), AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{ { - Port: 8132, + Port: DefaultKonnectivityLBPort, NodeBalancerConfigID: ptr.To(1234), }, }, @@ -886,7 +886,7 @@ func TestDeleteNodeFromNB(t *testing.T) { ApiserverNodeBalancerConfigID: ptr.To(5678), AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{ { - Port: 8132, + Port: DefaultKonnectivityLBPort, NodeBalancerConfigID: ptr.To(1234), }, }, diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index 258c7333d..62fb30884 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -46,10 +46,6 @@ import ( "github.com/linode/cluster-api-provider-linode/util/reconciler" ) -const ( - defaultApiserverPort = 6443 -) - // LinodeClusterReconciler reconciles a LinodeCluster object type LinodeClusterReconciler struct { client.Client @@ -181,48 +177,52 @@ func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger lo if clusterScope.LinodeCluster.Spec.Network.LoadBalancerType == "dns" { domainName := clusterScope.LinodeCluster.ObjectMeta.Name + "-" + clusterScope.LinodeCluster.Spec.Network.DNSUniqueIdentifier + "." + clusterScope.LinodeCluster.Spec.Network.DNSRootDomain + apiLBPort := services.DefaultApiserverLBPort + if clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort != 0 { + apiLBPort = clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort + } clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: domainName, - Port: int32(defaultApiserverPort), - } - } else { - linodeNB, err := services.CreateNodeBalancer(ctx, clusterScope, logger) - if err != nil { - logger.Error(err, "failed to create nodebalancer") - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) - return err + Port: int32(apiLBPort), } + return nil + } + linodeNB, err := services.CreateNodeBalancer(ctx, clusterScope, logger) + if err != nil { + logger.Error(err, "failed to create nodebalancer") + setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + return err + } - if linodeNB == nil { - err = fmt.Errorf("nodeBalancer created was nil") - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) - return err - } + if linodeNB == nil { + err = fmt.Errorf("nodeBalancer created was nil") + setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + return err + } - clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = &linodeNB.ID + clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = &linodeNB.ID - configs, err := services.CreateNodeBalancerConfigs(ctx, clusterScope, logger) - if err != nil { - logger.Error(err, "failed to create nodebalancer config") - setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) - return err - } + configs, err := services.CreateNodeBalancerConfigs(ctx, clusterScope, logger) + if err != nil { + logger.Error(err, "failed to create nodebalancer config") + setFailureReason(clusterScope, cerrs.CreateClusterError, err, r) + return err + } - clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID = util.Pointer(configs[0].ID) - additionalPorts := make([]infrav1alpha2.LinodeNBPortConfig, 0) - for _, config := range configs[1:] { - portConfig := infrav1alpha2.LinodeNBPortConfig{ - Port: config.Port, - NodeBalancerConfigID: &config.ID, - } - additionalPorts = append(additionalPorts, portConfig) + clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID = util.Pointer(configs[0].ID) + additionalPorts := make([]infrav1alpha2.LinodeNBPortConfig, 0) + for _, config := range configs[1:] { + portConfig := infrav1alpha2.LinodeNBPortConfig{ + Port: config.Port, + NodeBalancerConfigID: &config.ID, } - clusterScope.LinodeCluster.Spec.Network.AdditionalPorts = additionalPorts + additionalPorts = append(additionalPorts, portConfig) + } + clusterScope.LinodeCluster.Spec.Network.AdditionalPorts = additionalPorts - clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ - Host: *linodeNB.IPv4, - Port: int32(configs[0].Port), - } + clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: *linodeNB.IPv4, + Port: int32(configs[0].Port), } return nil diff --git a/controller/linodecluster_controller_test.go b/controller/linodecluster_controller_test.go index b4f06dc82..08f8db7b2 100644 --- a/controller/linodecluster_controller_test.go +++ b/controller/linodecluster_controller_test.go @@ -227,7 +227,7 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc var _ = Describe("cluster-lifecycle-dns", Ordered, Label("cluster", "cluster-lifecycle-dns"), func() { controlPlaneEndpointHost := "cluster-lifecycle-dns-abc123.lkedevs.net" - controlPlaneEndpointPort := 6443 + controlPlaneEndpointPort := 1000 clusterName := "cluster-lifecycle-dns" clusterNameSpace := "default" ownerRef := metav1.OwnerReference{ @@ -248,10 +248,11 @@ var _ = Describe("cluster-lifecycle-dns", Ordered, Label("cluster", "cluster-lif Spec: infrav1alpha2.LinodeClusterSpec{ Region: "us-ord", Network: infrav1alpha2.NetworkSpec{ - LoadBalancerType: "dns", - DNSRootDomain: "lkedevs.net", - DNSUniqueIdentifier: "abc123", - DNSTTLSec: 30, + LoadBalancerType: "dns", + DNSRootDomain: "lkedevs.net", + DNSUniqueIdentifier: "abc123", + DNSTTLSec: 30, + ApiserverLoadBalancerPort: controlPlaneEndpointPort, }, }, } diff --git a/docs/src/topics/firewalling.md b/docs/src/topics/firewalling.md index 4922f01ea..a918a06a4 100644 --- a/docs/src/topics/firewalling.md +++ b/docs/src/topics/firewalling.md @@ -7,10 +7,10 @@ By default, the following policies are set to audit mode(without any enforcement * [Kubeadm](./flavors/default.md) cluster allow rules - | Ports | Use-case | Allowed clients | - |-----------|--------------------------|-----------------------| - | 6443 | API Server Traffic | World | - | * | In Cluster Communication | Intra Cluster Traffic | + | Ports | Use-case | Allowed clients | + |-------------------------|--------------------------|-----------------------| + | ${APISERVER_PORT:=6443} | API Server Traffic | World | + | * | In Cluster Communication | Intra Cluster Traffic | ```admonish note For kubeadm clusters running outside of VPC, ports 2379 and 2380 are also allowed for etcd-traffic. @@ -55,7 +55,7 @@ spec: toPorts: - ports: - port: "22" # added for SSH Access to the nodes - - port: "6443" + - port: "${APISERVER_PORT:=6443}" ``` Alternatively, additional rules can be added by creating a new policy ```yaml diff --git a/templates/addons/cilium-network-policies/ciliumNetworkPolicies.yaml b/templates/addons/cilium-network-policies/ciliumNetworkPolicies.yaml index 2a0519803..8e2e0862f 100644 --- a/templates/addons/cilium-network-policies/ciliumNetworkPolicies.yaml +++ b/templates/addons/cilium-network-policies/ciliumNetworkPolicies.yaml @@ -34,7 +34,7 @@ data: - all toPorts: - ports: - - port: "6443" + - port: "${APISERVER_PORT:=6443}" --- apiVersion: addons.cluster.x-k8s.io/v1beta1 kind: ClusterResourceSet diff --git a/templates/addons/konnectivity/konnectivity.yaml b/templates/addons/konnectivity/konnectivity.yaml index a60d74a00..d0bbfd005 100644 --- a/templates/addons/konnectivity/konnectivity.yaml +++ b/templates/addons/konnectivity/konnectivity.yaml @@ -16,4 +16,5 @@ spec: timeout: 5m valuesTemplate: | proxyServerHost: {{ .InfraCluster.spec.controlPlaneEndpoint.host }} + proxyServerPort: ${KONNECTIVITY_PORT:=8132} serverCount: ${CONTROL_PLANE_MACHINE_COUNT} diff --git a/templates/flavors/kubeadm/default/kubeadmControlPlane.yaml b/templates/flavors/kubeadm/default/kubeadmControlPlane.yaml index 1852cacce..666cbfa39 100644 --- a/templates/flavors/kubeadm/default/kubeadmControlPlane.yaml +++ b/templates/flavors/kubeadm/default/kubeadmControlPlane.yaml @@ -27,6 +27,8 @@ spec: extraArgs: cloud-provider: external initConfiguration: + localAPIEndpoint: + bindPort: ${APISERVER_PORT:=6443} skipPhases: - addon/kube-proxy nodeRegistration: diff --git a/templates/flavors/kubeadm/default/kustomization.yaml b/templates/flavors/kubeadm/default/kustomization.yaml index aca59e671..8c608be7f 100644 --- a/templates/flavors/kubeadm/default/kustomization.yaml +++ b/templates/flavors/kubeadm/default/kustomization.yaml @@ -41,3 +41,15 @@ patches: ccm: ${CLUSTER_NAME}-linode csi: ${CLUSTER_NAME}-linode crs: ${CLUSTER_NAME}-crs + - target: + group: infrastructure.cluster.x-k8s.io + version: v1alpha2 + kind: LinodeCluster + patch: |- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 + kind: LinodeCluster + metadata: + name: ${CLUSTER_NAME} + spec: + network: + apiserverLoadBalancerPort: ${APISERVER_PORT:=6443} diff --git a/templates/flavors/kubeadm/konnectivity/kustomization.yaml b/templates/flavors/kubeadm/konnectivity/kustomization.yaml index 58d4bc2ca..e5744c655 100644 --- a/templates/flavors/kubeadm/konnectivity/kustomization.yaml +++ b/templates/flavors/kubeadm/konnectivity/kustomization.yaml @@ -62,7 +62,7 @@ patches: spec: network: additionalPorts: - - port: 8132 + - port: ${KONNECTIVITY_PORT:=8132} - target: kind: ConfigMap name: .*-cilium-policy @@ -100,7 +100,7 @@ patches: - all toPorts: - ports: - - port: "6443" + - port: "${APISERVER_PORT:=6443}" --- apiVersion: "cilium.io/v2" kind: CiliumClusterwideNetworkPolicy @@ -114,4 +114,4 @@ patches: - all toPorts: - ports: - - port: "8132" + - port: "${KONNECTIVITY_PORT:=8132}" diff --git a/templates/flavors/kubeadm/vpcless/kustomization.yaml b/templates/flavors/kubeadm/vpcless/kustomization.yaml index 1e1b3afd1..b5df314a7 100644 --- a/templates/flavors/kubeadm/vpcless/kustomization.yaml +++ b/templates/flavors/kubeadm/vpcless/kustomization.yaml @@ -78,7 +78,7 @@ patches: - all toPorts: - ports: - - port: "6443" + - port: "${APISERVER_PORT:=6443}" --- apiVersion: "cilium.io/v2" kind: CiliumClusterwideNetworkPolicy