Skip to content

Commit

Permalink
add konnectivity flavor for kubeadm
Browse files Browse the repository at this point in the history
  • Loading branch information
rahulait committed Jun 6, 2024
1 parent 9cd0d2d commit 2ea608d
Show file tree
Hide file tree
Showing 19 changed files with 537 additions and 43 deletions.
2 changes: 2 additions & 0 deletions .github/filters.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ kubeadm_self-healing:
- templates/flavors/kubeadm/self-healing/*
kubeadm_vpcless:
- templates/flavors/kubeadm/vpcless/*
kubeadm_konnectivity:
- templates/flavors/kubeadm/konnectivity/*

k3s:
- templates/flavors/k3s/default/*
Expand Down
11 changes: 7 additions & 4 deletions api/v1alpha1/linodecluster_conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,13 @@ func (src *LinodeCluster) ConvertTo(dstRaw conversion.Hub) error {

// Spec
dst.Spec.Network = infrastructurev1alpha2.NetworkSpec{
LoadBalancerType: src.Spec.Network.LoadBalancerType,
ApiserverLoadBalancerPort: src.Spec.Network.LoadBalancerPort,
NodeBalancerID: src.Spec.Network.NodeBalancerID,
ApiserverNodeBalancerConfigID: src.Spec.Network.NodeBalancerConfigID,
LoadBalancerType: src.Spec.Network.LoadBalancerType,
ApiserverLoadBalancerPort: src.Spec.Network.LoadBalancerPort,
NodeBalancerID: src.Spec.Network.NodeBalancerID,
ApiserverNodeBalancerConfigID: src.Spec.Network.NodeBalancerConfigID,
Konnectivity: false,
KonnectivityLoadBalancerPort: 0,
KonnectivityNodeBalancerConfigID: nil,
}
dst.Spec.ControlPlaneEndpoint = src.Spec.ControlPlaneEndpoint
dst.Spec.Region = src.Spec.Region
Expand Down
12 changes: 8 additions & 4 deletions api/v1alpha1/linodecluster_conversion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ func TestConvertTo(t *testing.T) {
NodeBalancerID: ptr.To(1234),
ApiserverLoadBalancerPort: 12345,
ApiserverNodeBalancerConfigID: ptr.To(2345),
Konnectivity: false,
},
ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "1.2.3.4"},
Region: "test-region",
Expand Down Expand Up @@ -94,10 +95,13 @@ func TestConvertFrom(t *testing.T) {
},
Spec: infrav1alpha2.LinodeClusterSpec{
Network: infrav1alpha2.NetworkSpec{
LoadBalancerType: "test-type",
NodeBalancerID: ptr.To(1234),
ApiserverLoadBalancerPort: 12345,
ApiserverNodeBalancerConfigID: ptr.To(2345),
LoadBalancerType: "test-type",
NodeBalancerID: ptr.To(1234),
ApiserverLoadBalancerPort: 12345,
ApiserverNodeBalancerConfigID: ptr.To(2345),
Konnectivity: true,
KonnectivityLoadBalancerPort: 2222,
KonnectivityNodeBalancerConfigID: ptr.To(1111),
},
ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "1.2.3.4"},
Region: "test-region",
Expand Down
14 changes: 14 additions & 0 deletions api/v1alpha2/linodecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,20 @@ type NetworkSpec struct {
// apiserverNodeBalancerConfigID is the config ID of api server NodeBalancer fonfig.
// +optional
ApiserverNodeBalancerConfigID *int `json:"apiserverNodeBalancerConfigID,omitempty"`
// Konnectivity flag tells whether or not the cluster is configured to use konnectivity.
// If omitted, default value is false.
// +kubebuilder:validation:Type=boolean
// +optional
Konnectivity bool `json:"konnectivity,omitempty"`
// konnectivityLoadBalancerPort used by the konnectivity server. It must be valid ports range (1-65535).
// If omitted, default value is 8132.
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +optional
KonnectivityLoadBalancerPort int `json:"konnectivityLoadBalancerPort,omitempty"`
// konnectivityNodeBalancerConfigID is the config ID of konnectivity server NodeBalancer config.
// +optional
KonnectivityNodeBalancerConfigID *int `json:"konnectivityNodeBalancerConfigID,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down
1 change: 1 addition & 0 deletions api/v1alpha2/linodecluster_webhook_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ func TestValidateLinodeCluster(t *testing.T) {
Region: "example",
Network: NetworkSpec{
LoadBalancerType: "NodeBalancer",
Konnectivity: true,
},
},
}
Expand Down
5 changes: 5 additions & 0 deletions api/v1alpha2/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

77 changes: 76 additions & 1 deletion cloud/services/loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ import (
)

const (
defaultApiserverLBPort = 6443
defaultApiserverLBPort = 6443
defaultKonnectivityLBPort = 8132
)

// CreateNodeBalancer creates a new NodeBalancer if one doesn't exist
Expand Down Expand Up @@ -100,6 +101,33 @@ func CreateNodeBalancerConfigs(
}
nbConfigs = append(nbConfigs, apiserverLinodeNBConfig)

// return if konnectivity should not be configured
if !clusterScope.LinodeCluster.Spec.Network.Konnectivity {
return nbConfigs, nil
}

konnLBPort := defaultKonnectivityLBPort
if clusterScope.LinodeCluster.Spec.Network.KonnectivityLoadBalancerPort != 0 {
konnLBPort = clusterScope.LinodeCluster.Spec.Network.KonnectivityLoadBalancerPort
}
konnectivityCreateConfig := linodego.NodeBalancerConfigCreateOptions{
Port: konnLBPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
}

konnectivityLinodeNBConfig, err := clusterScope.LinodeClient.CreateNodeBalancerConfig(
ctx,
*clusterScope.LinodeCluster.Spec.Network.NodeBalancerID,
konnectivityCreateConfig,
)
if err != nil {
logger.Info("Failed to create Linode NodeBalancer config", "error", err.Error())
return nil, err
}
nbConfigs = append(nbConfigs, konnectivityLinodeNBConfig)

return nbConfigs, nil
}

Expand Down Expand Up @@ -155,6 +183,38 @@ func AddNodeToNB(
return err
}

// return if konnectivity should not be configured
if !machineScope.LinodeCluster.Spec.Network.Konnectivity {
return nil

Check warning on line 188 in cloud/services/loadbalancers.go

View check run for this annotation

Codecov / codecov/patch

cloud/services/loadbalancers.go#L188

Added line #L188 was not covered by tests
}

konnectivityLBPort := defaultKonnectivityLBPort
if machineScope.LinodeCluster.Spec.Network.KonnectivityLoadBalancerPort != 0 {
konnectivityLBPort = machineScope.LinodeCluster.Spec.Network.KonnectivityLoadBalancerPort
}

if machineScope.LinodeCluster.Spec.Network.KonnectivityNodeBalancerConfigID == nil {
err := errors.New("nil NodeBalancer Config ID")
logger.Error(err, "config ID for NodeBalancer is nil")

return err
}

_, err = machineScope.LinodeClient.CreateNodeBalancerNode(
ctx,
*machineScope.LinodeCluster.Spec.Network.NodeBalancerID,
*machineScope.LinodeCluster.Spec.Network.KonnectivityNodeBalancerConfigID,
linodego.NodeBalancerNodeCreateOptions{
Label: machineScope.Cluster.Name,
Address: fmt.Sprintf("%s:%d", addresses.IPv4.Private[0].Address, konnectivityLBPort),
Mode: linodego.ModeAccept,
},
)
if err != nil {
logger.Error(err, "Failed to update Node Balancer")
return err

Check warning on line 215 in cloud/services/loadbalancers.go

View check run for this annotation

Codecov / codecov/patch

cloud/services/loadbalancers.go#L214-L215

Added lines #L214 - L215 were not covered by tests
}

return nil
}

Expand Down Expand Up @@ -187,5 +247,20 @@ func DeleteNodeFromNB(
return err
}

if !machineScope.LinodeCluster.Spec.Network.Konnectivity {
return nil

Check warning on line 251 in cloud/services/loadbalancers.go

View check run for this annotation

Codecov / codecov/patch

cloud/services/loadbalancers.go#L251

Added line #L251 was not covered by tests
}

err = machineScope.LinodeClient.DeleteNodeBalancerNode(
ctx,
*machineScope.LinodeCluster.Spec.Network.NodeBalancerID,
*machineScope.LinodeCluster.Spec.Network.KonnectivityNodeBalancerConfigID,
*machineScope.LinodeMachine.Spec.InstanceID,
)
if util.IgnoreLinodeAPIError(err, http.StatusNotFound) != nil {
logger.Error(err, "Failed to update Node Balancer")
return err
}

return nil
}
Loading

0 comments on commit 2ea608d

Please sign in to comment.