Skip to content

Commit

Permalink
[fix] make apiserver port configurable for DNS-based LB (#386)
Browse files Browse the repository at this point in the history
* make apiserver port configurable for dns LB

* set a non-6443 port in tests and verify

* fix linter issues

* make apiserver port config
urable in templates

* make konnectivity port configurable in templates

* only allow kubeadm flavors to have configurable apiserver port for now

---------

Co-authored-by: Amol Deodhar <[email protected]>
  • Loading branch information
AshleyDumaine and amold1 authored Jul 3, 2024
1 parent 9ed266e commit acd2601
Show file tree
Hide file tree
Showing 11 changed files with 86 additions and 70 deletions.
8 changes: 4 additions & 4 deletions cloud/services/loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ import (
)

const (
defaultApiserverLBPort = 6443
defaultKonnectivityLBPort = 8132
DefaultApiserverLBPort = 6443
DefaultKonnectivityLBPort = 8132
)

// CreateNodeBalancer creates a new NodeBalancer if one doesn't exist
Expand Down Expand Up @@ -79,7 +79,7 @@ func CreateNodeBalancerConfigs(
logger logr.Logger,
) ([]*linodego.NodeBalancerConfig, error) {
nbConfigs := []*linodego.NodeBalancerConfig{}
apiLBPort := defaultApiserverLBPort
apiLBPort := DefaultApiserverLBPort
if clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort != 0 {
apiLBPort = clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort
}
Expand Down Expand Up @@ -153,7 +153,7 @@ func AddNodeToNB(
return err
}

apiserverLBPort := defaultApiserverLBPort
apiserverLBPort := DefaultApiserverLBPort
if machineScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort != 0 {
apiserverLBPort = machineScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort
}
Expand Down
28 changes: 14 additions & 14 deletions cloud/services/loadbalancers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func TestCreateNodeBalancer(t *testing.T) {
NodeBalancerID: ptr.To(1234),
AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{
{
Port: 8132,
Port: DefaultKonnectivityLBPort,
NodeBalancerConfigID: ptr.To(1234),
},
},
Expand Down Expand Up @@ -215,7 +215,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) {
},
expectedConfigs: []*linodego.NodeBalancerConfig{
{
Port: defaultApiserverLBPort,
Port: DefaultApiserverLBPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
Expand All @@ -224,7 +224,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) {
},
expects: func(mockClient *mock.MockLinodeClient) {
mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerConfig{
Port: defaultApiserverLBPort,
Port: DefaultApiserverLBPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
Expand Down Expand Up @@ -302,7 +302,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) {
NodeBalancerID: ptr.To(1234),
AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{
{
Port: 8132,
Port: DefaultKonnectivityLBPort,
NodeBalancerConfigID: ptr.To(1234),
},
},
Expand All @@ -312,14 +312,14 @@ func TestCreateNodeBalancerConfigs(t *testing.T) {
},
expectedConfigs: []*linodego.NodeBalancerConfig{
{
Port: defaultApiserverLBPort,
Port: DefaultApiserverLBPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
NodeBalancerID: 1234,
},
{
Port: defaultKonnectivityLBPort,
Port: DefaultKonnectivityLBPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
Expand All @@ -345,7 +345,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) {
NodeBalancerID: ptr.To(1234),
AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{
{
Port: 8132,
Port: DefaultKonnectivityLBPort,
NodeBalancerConfigID: ptr.To(1234),
},
},
Expand All @@ -355,14 +355,14 @@ func TestCreateNodeBalancerConfigs(t *testing.T) {
},
expectedConfigs: []*linodego.NodeBalancerConfig{
{
Port: defaultApiserverLBPort,
Port: DefaultApiserverLBPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
NodeBalancerID: 1234,
},
{
Port: defaultKonnectivityLBPort,
Port: DefaultKonnectivityLBPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
Expand All @@ -372,7 +372,7 @@ func TestCreateNodeBalancerConfigs(t *testing.T) {
expectedError: fmt.Errorf("error creating NodeBalancerConfig"),
expects: func(mockClient *mock.MockLinodeClient) {
mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerConfig{
Port: defaultApiserverLBPort,
Port: DefaultApiserverLBPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
Expand Down Expand Up @@ -437,7 +437,7 @@ func TestAddNodeToNBConditions(t *testing.T) {
Network: infrav1alpha2.NetworkSpec{
NodeBalancerID: ptr.To(1234),
ApiserverNodeBalancerConfigID: nil,
ApiserverLoadBalancerPort: defaultApiserverLBPort,
ApiserverLoadBalancerPort: DefaultApiserverLBPort,
},
},
},
Expand Down Expand Up @@ -600,7 +600,7 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) {
ApiserverNodeBalancerConfigID: ptr.To(5678),
AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{
{
Port: 8132,
Port: DefaultKonnectivityLBPort,
NodeBalancerConfigID: ptr.To(1234),
},
},
Expand Down Expand Up @@ -800,7 +800,7 @@ func TestDeleteNodeFromNB(t *testing.T) {
ApiserverNodeBalancerConfigID: ptr.To(5678),
AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{
{
Port: 8132,
Port: DefaultKonnectivityLBPort,
NodeBalancerConfigID: ptr.To(1234),
},
},
Expand Down Expand Up @@ -886,7 +886,7 @@ func TestDeleteNodeFromNB(t *testing.T) {
ApiserverNodeBalancerConfigID: ptr.To(5678),
AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{
{
Port: 8132,
Port: DefaultKonnectivityLBPort,
NodeBalancerConfigID: ptr.To(1234),
},
},
Expand Down
74 changes: 37 additions & 37 deletions controller/linodecluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,6 @@ import (
"github.com/linode/cluster-api-provider-linode/util/reconciler"
)

const (
defaultApiserverPort = 6443
)

// LinodeClusterReconciler reconciles a LinodeCluster object
type LinodeClusterReconciler struct {
client.Client
Expand Down Expand Up @@ -181,48 +177,52 @@ func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger lo

if clusterScope.LinodeCluster.Spec.Network.LoadBalancerType == "dns" {
domainName := clusterScope.LinodeCluster.ObjectMeta.Name + "-" + clusterScope.LinodeCluster.Spec.Network.DNSUniqueIdentifier + "." + clusterScope.LinodeCluster.Spec.Network.DNSRootDomain
apiLBPort := services.DefaultApiserverLBPort
if clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort != 0 {
apiLBPort = clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort
}
clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{
Host: domainName,
Port: int32(defaultApiserverPort),
}
} else {
linodeNB, err := services.CreateNodeBalancer(ctx, clusterScope, logger)
if err != nil {
logger.Error(err, "failed to create nodebalancer")
setFailureReason(clusterScope, cerrs.CreateClusterError, err, r)
return err
Port: int32(apiLBPort),
}
return nil
}
linodeNB, err := services.CreateNodeBalancer(ctx, clusterScope, logger)
if err != nil {
logger.Error(err, "failed to create nodebalancer")
setFailureReason(clusterScope, cerrs.CreateClusterError, err, r)
return err
}

if linodeNB == nil {
err = fmt.Errorf("nodeBalancer created was nil")
setFailureReason(clusterScope, cerrs.CreateClusterError, err, r)
return err
}
if linodeNB == nil {
err = fmt.Errorf("nodeBalancer created was nil")
setFailureReason(clusterScope, cerrs.CreateClusterError, err, r)
return err
}

clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = &linodeNB.ID
clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = &linodeNB.ID

configs, err := services.CreateNodeBalancerConfigs(ctx, clusterScope, logger)
if err != nil {
logger.Error(err, "failed to create nodebalancer config")
setFailureReason(clusterScope, cerrs.CreateClusterError, err, r)
return err
}
configs, err := services.CreateNodeBalancerConfigs(ctx, clusterScope, logger)
if err != nil {
logger.Error(err, "failed to create nodebalancer config")
setFailureReason(clusterScope, cerrs.CreateClusterError, err, r)
return err
}

clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID = util.Pointer(configs[0].ID)
additionalPorts := make([]infrav1alpha2.LinodeNBPortConfig, 0)
for _, config := range configs[1:] {
portConfig := infrav1alpha2.LinodeNBPortConfig{
Port: config.Port,
NodeBalancerConfigID: &config.ID,
}
additionalPorts = append(additionalPorts, portConfig)
clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID = util.Pointer(configs[0].ID)
additionalPorts := make([]infrav1alpha2.LinodeNBPortConfig, 0)
for _, config := range configs[1:] {
portConfig := infrav1alpha2.LinodeNBPortConfig{
Port: config.Port,
NodeBalancerConfigID: &config.ID,
}
clusterScope.LinodeCluster.Spec.Network.AdditionalPorts = additionalPorts
additionalPorts = append(additionalPorts, portConfig)
}
clusterScope.LinodeCluster.Spec.Network.AdditionalPorts = additionalPorts

clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{
Host: *linodeNB.IPv4,
Port: int32(configs[0].Port),
}
clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{
Host: *linodeNB.IPv4,
Port: int32(configs[0].Port),
}

return nil
Expand Down
11 changes: 6 additions & 5 deletions controller/linodecluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc

var _ = Describe("cluster-lifecycle-dns", Ordered, Label("cluster", "cluster-lifecycle-dns"), func() {
controlPlaneEndpointHost := "cluster-lifecycle-dns-abc123.lkedevs.net"
controlPlaneEndpointPort := 6443
controlPlaneEndpointPort := 1000
clusterName := "cluster-lifecycle-dns"
clusterNameSpace := "default"
ownerRef := metav1.OwnerReference{
Expand All @@ -248,10 +248,11 @@ var _ = Describe("cluster-lifecycle-dns", Ordered, Label("cluster", "cluster-lif
Spec: infrav1alpha2.LinodeClusterSpec{
Region: "us-ord",
Network: infrav1alpha2.NetworkSpec{
LoadBalancerType: "dns",
DNSRootDomain: "lkedevs.net",
DNSUniqueIdentifier: "abc123",
DNSTTLSec: 30,
LoadBalancerType: "dns",
DNSRootDomain: "lkedevs.net",
DNSUniqueIdentifier: "abc123",
DNSTTLSec: 30,
ApiserverLoadBalancerPort: controlPlaneEndpointPort,
},
},
}
Expand Down
10 changes: 5 additions & 5 deletions docs/src/topics/firewalling.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ By default, the following policies are set to audit mode(without any enforcement

* [Kubeadm](./flavors/default.md) cluster allow rules

| Ports | Use-case | Allowed clients |
|-----------|--------------------------|-----------------------|
| 6443 | API Server Traffic | World |
| * | In Cluster Communication | Intra Cluster Traffic |
| Ports | Use-case | Allowed clients |
|-------------------------|--------------------------|-----------------------|
| ${APISERVER_PORT:=6443} | API Server Traffic | World |
| * | In Cluster Communication | Intra Cluster Traffic |

```admonish note
For kubeadm clusters running outside of VPC, ports 2379 and 2380 are also allowed for etcd-traffic.
Expand Down Expand Up @@ -55,7 +55,7 @@ spec:
toPorts:
- ports:
- port: "22" # added for SSH Access to the nodes
- port: "6443"
- port: "${APISERVER_PORT:=6443}"
```
Alternatively, additional rules can be added by creating a new policy
```yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ data:
- all
toPorts:
- ports:
- port: "6443"
- port: "${APISERVER_PORT:=6443}"
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
Expand Down
1 change: 1 addition & 0 deletions templates/addons/konnectivity/konnectivity.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,5 @@ spec:
timeout: 5m
valuesTemplate: |
proxyServerHost: {{ .InfraCluster.spec.controlPlaneEndpoint.host }}
proxyServerPort: ${KONNECTIVITY_PORT:=8132}
serverCount: ${CONTROL_PLANE_MACHINE_COUNT}
2 changes: 2 additions & 0 deletions templates/flavors/kubeadm/default/kubeadmControlPlane.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ spec:
extraArgs:
cloud-provider: external
initConfiguration:
localAPIEndpoint:
bindPort: ${APISERVER_PORT:=6443}
skipPhases:
- addon/kube-proxy
nodeRegistration:
Expand Down
12 changes: 12 additions & 0 deletions templates/flavors/kubeadm/default/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,15 @@ patches:
ccm: ${CLUSTER_NAME}-linode
csi: ${CLUSTER_NAME}-linode
crs: ${CLUSTER_NAME}-crs
- target:
group: infrastructure.cluster.x-k8s.io
version: v1alpha2
kind: LinodeCluster
patch: |-
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LinodeCluster
metadata:
name: ${CLUSTER_NAME}
spec:
network:
apiserverLoadBalancerPort: ${APISERVER_PORT:=6443}
6 changes: 3 additions & 3 deletions templates/flavors/kubeadm/konnectivity/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ patches:
spec:
network:
additionalPorts:
- port: 8132
- port: ${KONNECTIVITY_PORT:=8132}
- target:
kind: ConfigMap
name: .*-cilium-policy
Expand Down Expand Up @@ -100,7 +100,7 @@ patches:
- all
toPorts:
- ports:
- port: "6443"
- port: "${APISERVER_PORT:=6443}"
---
apiVersion: "cilium.io/v2"
kind: CiliumClusterwideNetworkPolicy
Expand All @@ -114,4 +114,4 @@ patches:
- all
toPorts:
- ports:
- port: "8132"
- port: "${KONNECTIVITY_PORT:=8132}"
2 changes: 1 addition & 1 deletion templates/flavors/kubeadm/vpcless/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ patches:
- all
toPorts:
- ports:
- port: "6443"
- port: "${APISERVER_PORT:=6443}"
---
apiVersion: "cilium.io/v2"
kind: CiliumClusterwideNetworkPolicy
Expand Down

0 comments on commit acd2601

Please sign in to comment.