From 2ea608d31d1258dbdd156144bf96564134cce800 Mon Sep 17 00:00:00 2001 From: Rahul Sharma Date: Thu, 6 Jun 2024 05:23:31 +0000 Subject: [PATCH] add konnectivity flavor for kubeadm --- .github/filters.yml | 2 + api/v1alpha1/linodecluster_conversion.go | 11 +- api/v1alpha1/linodecluster_conversion_test.go | 12 +- api/v1alpha2/linodecluster_types.go | 14 ++ api/v1alpha2/linodecluster_webhook_test.go | 1 + api/v1alpha2/zz_generated.deepcopy.go | 5 + cloud/services/loadbalancers.go | 77 ++++++- cloud/services/loadbalancers_test.go | 190 +++++++++++++++++- ...cture.cluster.x-k8s.io_linodeclusters.yaml | 16 ++ controller/linodecluster_controller.go | 4 + docs/src/SUMMARY.md | 1 + docs/src/topics/flavors/flavors.md | 1 + docs/src/topics/flavors/konnectivity.md | 28 +++ .../addons/konnectivity/konnectivity.yaml | 19 ++ .../addons/konnectivity/kustomization.yaml | 4 + .../kubeadm/full-vpcless/kustomization.yaml | 94 ++++++--- .../flavors/kubeadm/full/kustomization.yaml | 2 +- .../konnectivity/allow-konnectivity-port.yaml | 32 +++ .../kubeadm/konnectivity/kustomization.yaml | 67 ++++++ 19 files changed, 537 insertions(+), 43 deletions(-) create mode 100644 docs/src/topics/flavors/konnectivity.md create mode 100644 templates/addons/konnectivity/konnectivity.yaml create mode 100644 templates/addons/konnectivity/kustomization.yaml create mode 100644 templates/flavors/kubeadm/konnectivity/allow-konnectivity-port.yaml create mode 100644 templates/flavors/kubeadm/konnectivity/kustomization.yaml diff --git a/.github/filters.yml b/.github/filters.yml index 326b5dede..18dd31996 100644 --- a/.github/filters.yml +++ b/.github/filters.yml @@ -20,6 +20,8 @@ kubeadm_self-healing: - templates/flavors/kubeadm/self-healing/* kubeadm_vpcless: - templates/flavors/kubeadm/vpcless/* +kubeadm_konnectivity: + - templates/flavors/kubeadm/konnectivity/* k3s: - templates/flavors/k3s/default/* diff --git a/api/v1alpha1/linodecluster_conversion.go b/api/v1alpha1/linodecluster_conversion.go index 947c5f4fc..625fb3bb6 100644 --- a/api/v1alpha1/linodecluster_conversion.go +++ b/api/v1alpha1/linodecluster_conversion.go @@ -36,10 +36,13 @@ func (src *LinodeCluster) ConvertTo(dstRaw conversion.Hub) error { // Spec dst.Spec.Network = infrastructurev1alpha2.NetworkSpec{ - LoadBalancerType: src.Spec.Network.LoadBalancerType, - ApiserverLoadBalancerPort: src.Spec.Network.LoadBalancerPort, - NodeBalancerID: src.Spec.Network.NodeBalancerID, - ApiserverNodeBalancerConfigID: src.Spec.Network.NodeBalancerConfigID, + LoadBalancerType: src.Spec.Network.LoadBalancerType, + ApiserverLoadBalancerPort: src.Spec.Network.LoadBalancerPort, + NodeBalancerID: src.Spec.Network.NodeBalancerID, + ApiserverNodeBalancerConfigID: src.Spec.Network.NodeBalancerConfigID, + Konnectivity: false, + KonnectivityLoadBalancerPort: 0, + KonnectivityNodeBalancerConfigID: nil, } dst.Spec.ControlPlaneEndpoint = src.Spec.ControlPlaneEndpoint dst.Spec.Region = src.Spec.Region diff --git a/api/v1alpha1/linodecluster_conversion_test.go b/api/v1alpha1/linodecluster_conversion_test.go index 3c3c4b8d1..199b4d9af 100644 --- a/api/v1alpha1/linodecluster_conversion_test.go +++ b/api/v1alpha1/linodecluster_conversion_test.go @@ -59,6 +59,7 @@ func TestConvertTo(t *testing.T) { NodeBalancerID: ptr.To(1234), ApiserverLoadBalancerPort: 12345, ApiserverNodeBalancerConfigID: ptr.To(2345), + Konnectivity: false, }, ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "1.2.3.4"}, Region: "test-region", @@ -94,10 +95,13 @@ func TestConvertFrom(t *testing.T) { }, Spec: infrav1alpha2.LinodeClusterSpec{ Network: infrav1alpha2.NetworkSpec{ - LoadBalancerType: "test-type", - NodeBalancerID: ptr.To(1234), - ApiserverLoadBalancerPort: 12345, - ApiserverNodeBalancerConfigID: ptr.To(2345), + LoadBalancerType: "test-type", + NodeBalancerID: ptr.To(1234), + ApiserverLoadBalancerPort: 12345, + ApiserverNodeBalancerConfigID: ptr.To(2345), + Konnectivity: true, + KonnectivityLoadBalancerPort: 2222, + KonnectivityNodeBalancerConfigID: ptr.To(1111), }, ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "1.2.3.4"}, Region: "test-region", diff --git a/api/v1alpha2/linodecluster_types.go b/api/v1alpha2/linodecluster_types.go index 919ed2674..e7a10e21e 100644 --- a/api/v1alpha2/linodecluster_types.go +++ b/api/v1alpha2/linodecluster_types.go @@ -113,6 +113,20 @@ type NetworkSpec struct { // apiserverNodeBalancerConfigID is the config ID of api server NodeBalancer fonfig. // +optional ApiserverNodeBalancerConfigID *int `json:"apiserverNodeBalancerConfigID,omitempty"` + // Konnectivity flag tells whether or not the cluster is configured to use konnectivity. + // If omitted, default value is false. + // +kubebuilder:validation:Type=boolean + // +optional + Konnectivity bool `json:"konnectivity,omitempty"` + // konnectivityLoadBalancerPort used by the konnectivity server. It must be valid ports range (1-65535). + // If omitted, default value is 8132. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + KonnectivityLoadBalancerPort int `json:"konnectivityLoadBalancerPort,omitempty"` + // konnectivityNodeBalancerConfigID is the config ID of konnectivity server NodeBalancer config. + // +optional + KonnectivityNodeBalancerConfigID *int `json:"konnectivityNodeBalancerConfigID,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1alpha2/linodecluster_webhook_test.go b/api/v1alpha2/linodecluster_webhook_test.go index 70d70168d..f0e02d124 100644 --- a/api/v1alpha2/linodecluster_webhook_test.go +++ b/api/v1alpha2/linodecluster_webhook_test.go @@ -43,6 +43,7 @@ func TestValidateLinodeCluster(t *testing.T) { Region: "example", Network: NetworkSpec{ LoadBalancerType: "NodeBalancer", + Konnectivity: true, }, }, } diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go index e17bdc7d9..7639d33a8 100644 --- a/api/v1alpha2/zz_generated.deepcopy.go +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -158,6 +158,11 @@ func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = new(int) **out = **in } + if in.KonnectivityNodeBalancerConfigID != nil { + in, out := &in.KonnectivityNodeBalancerConfigID, &out.KonnectivityNodeBalancerConfigID + *out = new(int) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. diff --git a/cloud/services/loadbalancers.go b/cloud/services/loadbalancers.go index 4e1e47217..36c52a3c3 100644 --- a/cloud/services/loadbalancers.go +++ b/cloud/services/loadbalancers.go @@ -16,7 +16,8 @@ import ( ) const ( - defaultApiserverLBPort = 6443 + defaultApiserverLBPort = 6443 + defaultKonnectivityLBPort = 8132 ) // CreateNodeBalancer creates a new NodeBalancer if one doesn't exist @@ -100,6 +101,33 @@ func CreateNodeBalancerConfigs( } nbConfigs = append(nbConfigs, apiserverLinodeNBConfig) + // return if konnectivity should not be configured + if !clusterScope.LinodeCluster.Spec.Network.Konnectivity { + return nbConfigs, nil + } + + konnLBPort := defaultKonnectivityLBPort + if clusterScope.LinodeCluster.Spec.Network.KonnectivityLoadBalancerPort != 0 { + konnLBPort = clusterScope.LinodeCluster.Spec.Network.KonnectivityLoadBalancerPort + } + konnectivityCreateConfig := linodego.NodeBalancerConfigCreateOptions{ + Port: konnLBPort, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + } + + konnectivityLinodeNBConfig, err := clusterScope.LinodeClient.CreateNodeBalancerConfig( + ctx, + *clusterScope.LinodeCluster.Spec.Network.NodeBalancerID, + konnectivityCreateConfig, + ) + if err != nil { + logger.Info("Failed to create Linode NodeBalancer config", "error", err.Error()) + return nil, err + } + nbConfigs = append(nbConfigs, konnectivityLinodeNBConfig) + return nbConfigs, nil } @@ -155,6 +183,38 @@ func AddNodeToNB( return err } + // return if konnectivity should not be configured + if !machineScope.LinodeCluster.Spec.Network.Konnectivity { + return nil + } + + konnectivityLBPort := defaultKonnectivityLBPort + if machineScope.LinodeCluster.Spec.Network.KonnectivityLoadBalancerPort != 0 { + konnectivityLBPort = machineScope.LinodeCluster.Spec.Network.KonnectivityLoadBalancerPort + } + + if machineScope.LinodeCluster.Spec.Network.KonnectivityNodeBalancerConfigID == nil { + err := errors.New("nil NodeBalancer Config ID") + logger.Error(err, "config ID for NodeBalancer is nil") + + return err + } + + _, err = machineScope.LinodeClient.CreateNodeBalancerNode( + ctx, + *machineScope.LinodeCluster.Spec.Network.NodeBalancerID, + *machineScope.LinodeCluster.Spec.Network.KonnectivityNodeBalancerConfigID, + linodego.NodeBalancerNodeCreateOptions{ + Label: machineScope.Cluster.Name, + Address: fmt.Sprintf("%s:%d", addresses.IPv4.Private[0].Address, konnectivityLBPort), + Mode: linodego.ModeAccept, + }, + ) + if err != nil { + logger.Error(err, "Failed to update Node Balancer") + return err + } + return nil } @@ -187,5 +247,20 @@ func DeleteNodeFromNB( return err } + if !machineScope.LinodeCluster.Spec.Network.Konnectivity { + return nil + } + + err = machineScope.LinodeClient.DeleteNodeBalancerNode( + ctx, + *machineScope.LinodeCluster.Spec.Network.NodeBalancerID, + *machineScope.LinodeCluster.Spec.Network.KonnectivityNodeBalancerConfigID, + *machineScope.LinodeMachine.Spec.InstanceID, + ) + if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { + logger.Error(err, "Failed to update Node Balancer") + return err + } + return nil } diff --git a/cloud/services/loadbalancers_test.go b/cloud/services/loadbalancers_test.go index 26a36ac67..070d273a8 100644 --- a/cloud/services/loadbalancers_test.go +++ b/cloud/services/loadbalancers_test.go @@ -39,6 +39,7 @@ func TestCreateNodeBalancer(t *testing.T) { Spec: infrav1alpha2.LinodeClusterSpec{ Network: infrav1alpha2.NetworkSpec{ NodeBalancerID: ptr.To(1234), + Konnectivity: true, }, }, }, @@ -237,8 +238,10 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { }, Spec: infrav1alpha2.LinodeClusterSpec{ Network: infrav1alpha2.NetworkSpec{ - NodeBalancerID: ptr.To(1234), - ApiserverLoadBalancerPort: 80, + NodeBalancerID: ptr.To(1234), + Konnectivity: true, + ApiserverLoadBalancerPort: 80, + KonnectivityLoadBalancerPort: 90, }, }, }, @@ -251,6 +254,13 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { Check: linodego.CheckConnection, NodeBalancerID: 1234, }, + { + Port: 90, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + NodeBalancerID: 1234, + }, }, expects: func(mockClient *mock.MockLinodeClient) { mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerConfig{ @@ -260,6 +270,13 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { Check: linodego.CheckConnection, NodeBalancerID: 1234, }, nil) + mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerConfig{ + Port: 90, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + NodeBalancerID: 1234, + }, nil) }, }, { @@ -274,6 +291,45 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { Spec: infrav1alpha2.LinodeClusterSpec{ Network: infrav1alpha2.NetworkSpec{ NodeBalancerID: ptr.To(1234), + Konnectivity: true, + }, + }, + }, + }, + expectedConfigs: []*linodego.NodeBalancerConfig{ + { + Port: defaultApiserverLBPort, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + NodeBalancerID: 1234, + }, + { + Port: defaultKonnectivityLBPort, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + NodeBalancerID: 1234, + }, + }, + expectedError: fmt.Errorf("error creating NodeBalancerConfig"), + expects: func(mockClient *mock.MockLinodeClient) { + mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error creating NodeBalancerConfig")) + }, + }, + { + name: "Error - CreateNodeBalancerConfig() returns an error when creating nbconfig for konnectivity", + clusterScope: &scope.ClusterScope{ + LinodeClient: nil, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + UID: "test-uid", + }, + Spec: infrav1alpha2.LinodeClusterSpec{ + Network: infrav1alpha2.NetworkSpec{ + NodeBalancerID: ptr.To(1234), + Konnectivity: true, }, }, }, @@ -286,9 +342,23 @@ func TestCreateNodeBalancerConfigs(t *testing.T) { Check: linodego.CheckConnection, NodeBalancerID: 1234, }, + { + Port: defaultKonnectivityLBPort, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + NodeBalancerID: 1234, + }, }, expectedError: fmt.Errorf("error creating NodeBalancerConfig"), expects: func(mockClient *mock.MockLinodeClient) { + mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerConfig{ + Port: defaultApiserverLBPort, + Protocol: linodego.ProtocolTCP, + Algorithm: linodego.AlgorithmRoundRobin, + Check: linodego.CheckConnection, + NodeBalancerID: 1234, + }, nil) mockClient.EXPECT().CreateNodeBalancerConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error creating NodeBalancerConfig")) }, }, @@ -375,6 +445,64 @@ func TestAddNodeToNBConditions(t *testing.T) { }, nil) }, }, + { + name: "Error - KonnectivityNodeBalancerConfigID is not set", + machineScope: &scope.MachineScope{ + Machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + UID: "test-uid", + Labels: map[string]string{ + clusterv1.MachineControlPlaneLabel: "true", + }, + }, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + UID: "test-uid", + }, + }, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + UID: "test-uid", + }, + Spec: infrav1alpha2.LinodeClusterSpec{ + Network: infrav1alpha2.NetworkSpec{ + NodeBalancerID: ptr.To(1234), + ApiserverNodeBalancerConfigID: ptr.To(5678), + ApiserverLoadBalancerPort: defaultApiserverLBPort, + Konnectivity: true, + KonnectivityNodeBalancerConfigID: nil, + KonnectivityLoadBalancerPort: defaultKonnectivityLBPort, + }, + }, + }, + LinodeMachine: &infrav1alpha1.LinodeMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + UID: "test-uid", + }, + Spec: infrav1alpha1.LinodeMachineSpec{ + InstanceID: ptr.To(123), + }, + }, + }, + expectedError: fmt.Errorf("nil NodeBalancer Config ID"), + expects: func(mockClient *mock.MockLinodeClient) { + mockClient.EXPECT().GetInstanceIPAddresses(gomock.Any(), gomock.Any()).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Private: []*linodego.InstanceIP{ + { + Address: "1.2.3.4", + }, + }, + }, + }, nil) + mockClient.EXPECT().CreateNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerNode{}, nil) + }, + }, { name: "Error - No private IP addresses were set", machineScope: &scope.MachineScope{ @@ -507,8 +635,10 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) { }, Spec: infrav1alpha2.LinodeClusterSpec{ Network: infrav1alpha2.NetworkSpec{ - NodeBalancerID: ptr.To(1234), - ApiserverNodeBalancerConfigID: ptr.To(5678), + NodeBalancerID: ptr.To(1234), + ApiserverNodeBalancerConfigID: ptr.To(5678), + Konnectivity: true, + KonnectivityNodeBalancerConfigID: ptr.To(1111), }, }, }, @@ -532,7 +662,7 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) { }, }, }, nil) - mockClient.EXPECT().CreateNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&linodego.NodeBalancerNode{}, nil) + mockClient.EXPECT().CreateNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(&linodego.NodeBalancerNode{}, nil) }, }, { @@ -701,14 +831,17 @@ func TestDeleteNodeFromNB(t *testing.T) { Spec: infrav1alpha2.LinodeClusterSpec{ ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "1.2.3.4"}, Network: infrav1alpha2.NetworkSpec{ - NodeBalancerID: ptr.To(1234), - ApiserverNodeBalancerConfigID: ptr.To(5678), + NodeBalancerID: ptr.To(1234), + ApiserverNodeBalancerConfigID: ptr.To(5678), + Konnectivity: true, + KonnectivityNodeBalancerConfigID: ptr.To(4444), }, }, }, }, expects: func(mockClient *mock.MockLinodeClient) { mockClient.EXPECT().DeleteNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + mockClient.EXPECT().DeleteNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) }, }, { @@ -751,6 +884,49 @@ func TestDeleteNodeFromNB(t *testing.T) { mockClient.EXPECT().DeleteNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("error deleting node from NodeBalancer")) }, }, + { + name: "Error - Deleting Konnectivity Node from NodeBalancer", + machineScope: &scope.MachineScope{ + Machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + UID: "test-uid", + Labels: map[string]string{ + clusterv1.MachineControlPlaneLabel: "true", + }, + }, + }, + LinodeMachine: &infrav1alpha1.LinodeMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + UID: "test-uid", + }, + Spec: infrav1alpha1.LinodeMachineSpec{ + InstanceID: ptr.To(123), + }, + }, + LinodeCluster: &infrav1alpha2.LinodeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + UID: "test-uid", + }, + Spec: infrav1alpha2.LinodeClusterSpec{ + ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "1.2.3.4"}, + Network: infrav1alpha2.NetworkSpec{ + NodeBalancerID: ptr.To(1234), + ApiserverNodeBalancerConfigID: ptr.To(5678), + Konnectivity: true, + KonnectivityNodeBalancerConfigID: ptr.To(4444), + }, + }, + }, + }, + expectedError: fmt.Errorf("error deleting node from NodeBalancer"), + expects: func(mockClient *mock.MockLinodeClient) { + mockClient.EXPECT().DeleteNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + mockClient.EXPECT().DeleteNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("error deleting node from NodeBalancer")) + }, + }, } for _, tt := range tests { testcase := tt diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml index 6944bc412..5d2ed85be 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeclusters.yaml @@ -340,6 +340,22 @@ spec: description: apiserverNodeBalancerConfigID is the config ID of api server NodeBalancer fonfig. type: integer + konnectivity: + description: |- + Konnectivity flag tells whether or not the cluster is configured to use konnectivity. + If omitted, default value is false. + type: boolean + konnectivityLoadBalancerPort: + description: |- + konnectivityLoadBalancerPort used by the konnectivity server. It must be valid ports range (1-65535). + If omitted, default value is 8132. + maximum: 65535 + minimum: 1 + type: integer + konnectivityNodeBalancerConfigID: + description: konnectivityNodeBalancerConfigID is the config ID + of konnectivity server NodeBalancer config. + type: integer loadBalancerType: description: LoadBalancerType is the type of load balancer to use, defaults to NodeBalancer if not otherwise set diff --git a/controller/linodecluster_controller.go b/controller/linodecluster_controller.go index 6c325b7ca..a98d857e4 100644 --- a/controller/linodecluster_controller.go +++ b/controller/linodecluster_controller.go @@ -198,6 +198,9 @@ func (r *LinodeClusterReconciler) reconcileCreate(ctx context.Context, logger lo } clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID = util.Pointer(configs[0].ID) + if clusterScope.LinodeCluster.Spec.Network.Konnectivity { + clusterScope.LinodeCluster.Spec.Network.KonnectivityNodeBalancerConfigID = util.Pointer(configs[1].ID) + } clusterScope.LinodeCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: *linodeNB.IPv4, @@ -238,6 +241,7 @@ func (r *LinodeClusterReconciler) reconcileDelete(ctx context.Context, logger lo clusterScope.LinodeCluster.Spec.Network.NodeBalancerID = nil clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID = nil + clusterScope.LinodeCluster.Spec.Network.KonnectivityNodeBalancerConfigID = nil if err := clusterScope.RemoveCredentialsRefFinalizer(ctx); err != nil { logger.Error(err, "failed to remove credentials finalizer") diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 790aa9d15..97e1b025c 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -14,6 +14,7 @@ - [k3s](./topics/flavors/k3s.md) - [rke2](./topics/flavors/rke2.md) - [vpcless](./topics/flavors/vpcless.md) + - [konnectivity (kubeadm)](./topics/flavors/konnectivity.md) - [Etcd](./topics/etcd.md) - [Backups](./topics/backups.md) - [Multi-Tenancy](./topics/multi-tenancy.md) diff --git a/docs/src/topics/flavors/flavors.md b/docs/src/topics/flavors/flavors.md index 8ca3bfb24..aab01684c 100644 --- a/docs/src/topics/flavors/flavors.md +++ b/docs/src/topics/flavors/flavors.md @@ -31,6 +31,7 @@ See the [`clusterctl` flavors docs](https://cluster-api.sigs.k8s.io/clusterctl/c | | kubeadm-dualstack | Installs vpcless and enables IPv6 along with IPv4 | | | kubeadm-self-healing | Installs default along with the machine-health-check | | | | add-on | +| | kubeadm-konnectivity | Installs and configures konnectivity within cluster | | | kubeadm-full | Installs all non-vpcless based flavors combinations | | | kubeadm-fullvpcless | Installs all vpcless based flavors combinations | | k3s | k3s | Installs Linode infra resources, k3s resources and | diff --git a/docs/src/topics/flavors/konnectivity.md b/docs/src/topics/flavors/konnectivity.md new file mode 100644 index 000000000..55e89d9de --- /dev/null +++ b/docs/src/topics/flavors/konnectivity.md @@ -0,0 +1,28 @@ +# Konnectivity + +This flavor supports provisioning k8s clusters with konnectivity configured.It uses kubeadm +for setting up control plane and uses cilium with native routing for pod networking. + +## Specification +| Supported Control Plane | CNI | Default OS | Installs ClusterClass | IPv4 | IPv6 | +|-------------------------|--------|--------------|-----------------------|------|------| +| kubeadm | Cilium | Ubuntu 22.04 | No | Yes | No | + +## Prerequisites +[Quickstart](../getting-started.md) completed + +## Notes +This flavor configures apiserver with konnectivity. Traffic from apiserver to cluster flows +over the tunnels created between konnectivity-server and konnectivity-agent. + +## Usage +1. Generate cluster yaml + ```bash + clusterctl generate cluster test-cluster \ + --infrastructure linode-linode \ + --flavor -konnectivity > test-cluster.yaml + ``` +2. Apply cluster yaml + ```bash + kubectl apply -f test-cluster.yaml + ``` diff --git a/templates/addons/konnectivity/konnectivity.yaml b/templates/addons/konnectivity/konnectivity.yaml new file mode 100644 index 000000000..6cdd4ae06 --- /dev/null +++ b/templates/addons/konnectivity/konnectivity.yaml @@ -0,0 +1,19 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: ${CLUSTER_NAME}-konnectivity +spec: + clusterSelector: + matchLabels: + konn: "${CLUSTER_NAME}-konnectivity" + repoURL: https://rahulait.github.io/konnectivity/ + chartName: konnectivity + namespace: kube-system + version: ${KONNECTIVITY_VERSION:=v0.0.1} + options: + waitForJobs: true + wait: true + timeout: 5m + valuesTemplate: | + proxyServerHost: {{ .InfraCluster.spec.controlPlaneEndpoint.host }} + serverCount: ${CONTROL_PLANE_MACHINE_COUNT} diff --git a/templates/addons/konnectivity/kustomization.yaml b/templates/addons/konnectivity/kustomization.yaml new file mode 100644 index 000000000..249d51108 --- /dev/null +++ b/templates/addons/konnectivity/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - konnectivity.yaml diff --git a/templates/flavors/kubeadm/full-vpcless/kustomization.yaml b/templates/flavors/kubeadm/full-vpcless/kustomization.yaml index 2e3ac136d..2780b21f9 100644 --- a/templates/flavors/kubeadm/full-vpcless/kustomization.yaml +++ b/templates/flavors/kubeadm/full-vpcless/kustomization.yaml @@ -2,13 +2,80 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../vpcless + - ../konnectivity - ../../../addons/cluster-autoscaler - ../../../addons/etcd-backup-restore - ../../../addons/machine-health-check patches: + - target: + kind: HelmChartProxy + name: .*-cilium + patch: |- + - op: replace + path: /spec/valuesTemplate + value: | + bgpControlPlane: + enabled: true + policyAuditMode: ${FW_AUDIT_ONLY:=true} + hostFirewall: + enabled: true + extraConfig: + allow-localhost: policy + ipam: + mode: kubernetes + k8s: + requireIPv4PodCIDR: true + hubble: + relay: + enabled: true + ui: + enabled: true + - target: + kind: HelmChartProxy + name: .*-linode-cloud-controller-manager + patch: |- + - op: replace + path: /spec/valuesTemplate + value: | + secretRef: + name: "linode-token-region" + image: + pullPolicy: IfNotPresent + - target: + kind: LinodeVPC + patch: |- + $patch: delete + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: LinodeVPC + metadata: + name: ${VPC_NAME:=${CLUSTER_NAME}} + - target: + group: infrastructure.cluster.x-k8s.io + version: v1alpha2 + kind: LinodeCluster + patch: |- + - op: remove + path: /spec/vpcRef + - target: + group: controlplane.cluster.x-k8s.io + version: v1beta1 + kind: KubeadmControlPlane + patch: |- + - op: remove + path: /spec/kubeadmConfigSpec/initConfiguration/skipPhases + - target: + group: cluster.x-k8s.io + version: v1beta1 + kind: Cluster + patch: |- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + name: ${CLUSTER_NAME} + labels: + vpcless: "true" - target: group: cluster.x-k8s.io version: v1beta1 @@ -43,31 +110,6 @@ patches: controllerManager: extraArgs: node-cidr-mask-size-ipv6: "96" - - target: - kind: HelmChartProxy - name: .*-cilium - patch: |- - - op: replace - path: /spec/valuesTemplate - value: | - bgpControlPlane: - enabled: true - policyAuditMode: ${FW_AUDIT_ONLY:=true} - hostFirewall: - enabled: true - extraConfig: - allow-localhost: policy - ipv6: - enabled: true - ipam: - mode: kubernetes - k8s: - requireIPv4PodCIDR: true - hubble: - relay: - enabled: true - ui: - enabled: true - target: group: cluster.x-k8s.io version: v1beta1 diff --git a/templates/flavors/kubeadm/full/kustomization.yaml b/templates/flavors/kubeadm/full/kustomization.yaml index 648538dc1..fa7955f37 100644 --- a/templates/flavors/kubeadm/full/kustomization.yaml +++ b/templates/flavors/kubeadm/full/kustomization.yaml @@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../default + - ../konnectivity - ../../../addons/cluster-autoscaler - ../../../addons/etcd-backup-restore - ../../../addons/machine-health-check diff --git a/templates/flavors/kubeadm/konnectivity/allow-konnectivity-port.yaml b/templates/flavors/kubeadm/konnectivity/allow-konnectivity-port.yaml new file mode 100644 index 000000000..db04389fa --- /dev/null +++ b/templates/flavors/kubeadm/konnectivity/allow-konnectivity-port.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ${CLUSTER_NAME}-konnectivity-cilium-policy +data: + cilium-policy.yaml: |- + apiVersion: "cilium.io/v2" + kind: CiliumClusterwideNetworkPolicy + metadata: + name: "allow-konnectivity-policy" + spec: + description: "allow incoming connections to konnectivity-server from everywhere" + nodeSelector: {} + ingress: + - fromEntities: + - world + toPorts: + - ports: + - port: "8132" +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-konnectivity-cilium-policy +spec: + clusterSelector: + matchLabels: + cluster: ${CLUSTER_NAME} + resources: + - kind: ConfigMap + name: ${CLUSTER_NAME}-konnectivity-cilium-policy + strategy: Reconcile diff --git a/templates/flavors/kubeadm/konnectivity/kustomization.yaml b/templates/flavors/kubeadm/konnectivity/kustomization.yaml new file mode 100644 index 000000000..3239f7324 --- /dev/null +++ b/templates/flavors/kubeadm/konnectivity/kustomization.yaml @@ -0,0 +1,67 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../default + - ../../../addons/konnectivity + - allow-konnectivity-port.yaml + +patches: + - target: + group: controlplane.cluster.x-k8s.io + version: v1beta1 + kind: KubeadmControlPlane + patch: |- + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + metadata: + name: ${CLUSTER_NAME}-control-plane + spec: + kubeadmConfigSpec: + preKubeadmCommands: + - /kubeadm-pre-init.sh ${KUBERNETES_VERSION} + - sed -i '/swap/d' /etc/fstab + - swapoff -a + - hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname + - mkdir -p -m 755 /etc/kubernetes/konnectivity + - curl -s -L https://raw.githubusercontent.com/rahulait/konnectivity/main/config/egress-selector-configuration.yaml > /etc/kubernetes/konnectivity/egress-selector-configuration.yaml + postKubeadmCommands: + - curl -s -L https://raw.githubusercontent.com/rahulait/konnectivity/main/scripts/gen-konnectivity-kubeconfig.sh | bash + clusterConfiguration: + apiServer: + extraArgs: + egress-selector-config-file: /etc/kubernetes/konnectivity/egress-selector-configuration.yaml + extraVolumes: + - hostPath: /etc/kubernetes/konnectivity-server + mountPath: /etc/kubernetes/konnectivity-server + name: konnectivity-uds + pathType: DirectoryOrCreate + readOnly: false + - hostPath: /etc/kubernetes/konnectivity + mountPath: /etc/kubernetes/konnectivity + name: konnectivity + pathType: DirectoryOrCreate + readOnly: true + - target: + group: cluster.x-k8s.io + version: v1beta1 + kind: Cluster + patch: |- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + name: ${CLUSTER_NAME} + labels: + konn: ${CLUSTER_NAME}-konnectivity + - target: + group: infrastructure.cluster.x-k8s.io + version: v1alpha2 + kind: LinodeCluster + patch: |- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 + kind: LinodeCluster + metadata: + name: ${CLUSTER_NAME} + spec: + network: + konnectivity: true