Skip to content

Commit

Permalink
add NB backend nodes only if it doesn't already exist
Browse files Browse the repository at this point in the history
  • Loading branch information
amold1 committed Aug 21, 2024
1 parent 59f9edc commit c9a154a
Show file tree
Hide file tree
Showing 7 changed files with 126 additions and 123 deletions.
1 change: 1 addition & 0 deletions clients/clients.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ type LinodeVPCClient interface {
type LinodeNodeBalancerClient interface {
CreateNodeBalancer(ctx context.Context, opts linodego.NodeBalancerCreateOptions) (*linodego.NodeBalancer, error)
GetNodeBalancer(ctx context.Context, nodebalancerID int) (*linodego.NodeBalancer, error)
ListNodeBalancerNodes(ctx context.Context, nodebalancerID int, configID int, opts *linodego.ListOptions) ([]linodego.NodeBalancerNode, error)
GetNodeBalancerConfig(ctx context.Context, nodebalancerID int, configID int) (*linodego.NodeBalancerConfig, error)
CreateNodeBalancerConfig(ctx context.Context, nodebalancerID int, opts linodego.NodeBalancerConfigCreateOptions) (*linodego.NodeBalancerConfig, error)
DeleteNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, nodeID int) error
Expand Down
78 changes: 46 additions & 32 deletions cloud/services/loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@ import (
"errors"
"fmt"
"net/http"
"strings"

"github.com/go-logr/logr"
"github.com/linode/linodego"
"sigs.k8s.io/cluster-api/api/v1beta1"

"github.com/linode/cluster-api-provider-linode/api/v1alpha2"
"github.com/linode/cluster-api-provider-linode/cloud/scope"
"github.com/linode/cluster-api-provider-linode/util"
)
Expand Down Expand Up @@ -112,61 +114,73 @@ func EnsureNodeBalancerConfigs(
}

// AddNodesToNB adds backend Nodes on the Node Balancer configuration
func AddNodesToNB(ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope) error {
func AddNodesToNB(ctx context.Context, logger logr.Logger, clusterScope *scope.ClusterScope, eachMachine v1alpha2.LinodeMachine) error {
apiserverLBPort := DefaultApiserverLBPort
if clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort != 0 {
apiserverLBPort = clusterScope.LinodeCluster.Spec.Network.ApiserverLoadBalancerPort
}

if clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID == nil {
err := errors.New("nil NodeBalancer Config ID")
logger.Error(err, "config ID for NodeBalancer is nil")
return errors.New("nil NodeBalancer Config ID")
}

nodeBalancerNodes, err := clusterScope.LinodeClient.ListNodeBalancerNodes(
ctx,
*clusterScope.LinodeCluster.Spec.Network.NodeBalancerID,
*clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID,
&linodego.ListOptions{},
)
if err != nil {
return err
}
internalIPFound := false
for _, IPs := range eachMachine.Status.Addresses {
if IPs.Type != v1beta1.MachineInternalIP || !strings.Contains(IPs.Address, "192.168") {
continue

Check warning on line 139 in cloud/services/loadbalancers.go

View check run for this annotation

Codecov / codecov/patch

cloud/services/loadbalancers.go#L139

Added line #L139 was not covered by tests
}
internalIPFound = true

for _, eachMachine := range clusterScope.LinodeMachines.Items {
internalIPFound := false
for _, IPs := range eachMachine.Status.Addresses {
if IPs.Type != v1beta1.MachineInternalIP {
continue
}
internalIPFound = true
_, err := clusterScope.LinodeClient.CreateNodeBalancerNode(
ctx,
*clusterScope.LinodeCluster.Spec.Network.NodeBalancerID,
*clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID,
linodego.NodeBalancerNodeCreateOptions{
Label: clusterScope.Cluster.Name,
Address: fmt.Sprintf("%s:%d", IPs.Address, apiserverLBPort),
Mode: linodego.ModeAccept,
},
)
if err != nil {
logger.Error(err, "Failed to update Node Balancer")
return err
}
// Set the port number and NB config ID for standard ports
portsToBeAdded := make([]map[string]int, 0)
standardPort := map[string]int{"configID": *clusterScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID, "port": apiserverLBPort}
portsToBeAdded = append(portsToBeAdded, standardPort)

// Set the port number and NB config ID for any additional ports
for _, portConfig := range clusterScope.LinodeCluster.Spec.Network.AdditionalPorts {
portsToBeAdded = append(portsToBeAdded, map[string]int{"configID": *portConfig.NodeBalancerConfigID, "port": portConfig.Port})

Check warning on line 150 in cloud/services/loadbalancers.go

View check run for this annotation

Codecov / codecov/patch

cloud/services/loadbalancers.go#L150

Added line #L150 was not covered by tests
}

logger.Info("abir", "portsToBeAdded", portsToBeAdded)

for _, portConfig := range clusterScope.LinodeCluster.Spec.Network.AdditionalPorts {
_, err = clusterScope.LinodeClient.CreateNodeBalancerNode(
// Cycle through all ports to be added
for _, ports := range portsToBeAdded {
ipPortComboExists := false
for _, nodes := range nodeBalancerNodes {
// Create the node if the IP:Port combination does not exist
if nodes.Address == fmt.Sprintf("%s:%d", IPs.Address, ports["port"]) {
ipPortComboExists = true
break

Check warning on line 162 in cloud/services/loadbalancers.go

View check run for this annotation

Codecov / codecov/patch

cloud/services/loadbalancers.go#L160-L162

Added lines #L160 - L162 were not covered by tests
}
}
if !ipPortComboExists {
_, err := clusterScope.LinodeClient.CreateNodeBalancerNode(
ctx,
*clusterScope.LinodeCluster.Spec.Network.NodeBalancerID,
*portConfig.NodeBalancerConfigID,
ports["configID"],
linodego.NodeBalancerNodeCreateOptions{
Label: clusterScope.Cluster.Name,
Address: fmt.Sprintf("%s:%d", IPs.Address, portConfig.Port),
Address: fmt.Sprintf("%s:%d", IPs.Address, ports["port"]),
Mode: linodego.ModeAccept,
},
)
if err != nil {
logger.Error(err, "Failed to update Node Balancer")
return err

Check warning on line 177 in cloud/services/loadbalancers.go

View check run for this annotation

Codecov / codecov/patch

cloud/services/loadbalancers.go#L177

Added line #L177 was not covered by tests
}
}
}
if !internalIPFound {
return errors.New("no private IP address")
}
}
if !internalIPFound {
return errors.New("no private IP address")
}

return nil
Expand Down
106 changes: 17 additions & 89 deletions cloud/services/loadbalancers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,9 @@ func TestAddNodeToNBConditions(t *testing.T) {
},
},
expectedError: fmt.Errorf("no private IP address"),
expects: func(mockClient *mock.MockLinodeClient) {},
expects: func(mockClient *mock.MockLinodeClient) {
mockClient.EXPECT().ListNodeBalancerNodes(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return([]linodego.NodeBalancerNode{}, nil)
},
expectK8sClient: func(mockK8sClient *mock.MockK8sClient) {
mockK8sClient.EXPECT().Scheme().Return(nil).AnyTimes()
},
Expand All @@ -509,9 +511,11 @@ func TestAddNodeToNBConditions(t *testing.T) {
testcase.clusterScope.Client = MockK8sClient
testcase.expectK8sClient(MockK8sClient)

err := AddNodesToNB(context.Background(), logr.Discard(), testcase.clusterScope)
if testcase.expectedError != nil {
assert.ErrorContains(t, err, testcase.expectedError.Error())
for _, eachMachine := range testcase.clusterScope.LinodeMachines.Items {
err := AddNodesToNB(context.Background(), logr.Discard(), testcase.clusterScope, eachMachine)
if testcase.expectedError != nil {
assert.ErrorContains(t, err, testcase.expectedError.Error())
}
}
})
}
Expand All @@ -527,53 +531,6 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) {
expects func(*mock.MockLinodeClient)
expectK8sClient func(*mock.MockK8sClient)
}{
{
name: "If the machine is not a control plane node, do nothing",
clusterScope: &scope.ClusterScope{
LinodeCluster: &infrav1alpha2.LinodeCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
UID: "test-uid",
},
Spec: infrav1alpha2.LinodeClusterSpec{
Network: infrav1alpha2.NetworkSpec{
NodeBalancerID: ptr.To(1234),
ApiserverNodeBalancerConfigID: ptr.To(5678),
AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{
{
Port: DefaultKonnectivityLBPort,
NodeBalancerConfigID: ptr.To(1234),
},
},
},
},
},
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
UID: "test-uid",
},
},
LinodeMachines: infrav1alpha2.LinodeMachineList{
Items: []infrav1alpha2.LinodeMachine{
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-machine",
UID: "test-uid",
},
Spec: infrav1alpha2.LinodeMachineSpec{
ProviderID: ptr.To("linode://123"),
InstanceID: ptr.To(123),
},
},
},
},
},
expects: func(*mock.MockLinodeClient) {},
expectK8sClient: func(mockK8sClient *mock.MockK8sClient) {
mockK8sClient.EXPECT().Scheme().Return(nil).AnyTimes()
},
},
{
name: "Success - If the machine is a control plane node, add the node to the NodeBalancer",
clusterScope: &scope.ClusterScope{
Expand Down Expand Up @@ -617,6 +574,7 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) {
},
},
expects: func(mockClient *mock.MockLinodeClient) {
mockClient.EXPECT().ListNodeBalancerNodes(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return([]linodego.NodeBalancerNode{}, nil)
mockClient.EXPECT().CreateNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(&linodego.NodeBalancerNode{}, nil)
},
expectK8sClient: func(mockK8sClient *mock.MockK8sClient) {
Expand Down Expand Up @@ -667,9 +625,10 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) {
},
},
},
expectedError: fmt.Errorf("could not create node balancer node"),
expectedError: nil,
expects: func(mockClient *mock.MockLinodeClient) {
mockClient.EXPECT().CreateNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("could not create node balancer node"))
mockClient.EXPECT().ListNodeBalancerNodes(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return([]linodego.NodeBalancerNode{}, nil)
mockClient.EXPECT().CreateNodeBalancerNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes()
},
expectK8sClient: func(mockK8sClient *mock.MockK8sClient) {
mockK8sClient.EXPECT().Scheme().Return(nil).AnyTimes()
Expand All @@ -691,9 +650,11 @@ func TestAddNodeToNBFullWorkflow(t *testing.T) {
testcase.clusterScope.Client = MockK8sClient
testcase.expectK8sClient(MockK8sClient)

err := AddNodesToNB(context.Background(), logr.Discard(), testcase.clusterScope)
if testcase.expectedError != nil {
assert.ErrorContains(t, err, testcase.expectedError.Error())
for _, eachMachine := range testcase.clusterScope.LinodeMachines.Items {
err := AddNodesToNB(context.Background(), logr.Discard(), testcase.clusterScope, eachMachine)
if testcase.expectedError != nil {
assert.ErrorContains(t, err, testcase.expectedError.Error())
}
}
})
}
Expand All @@ -710,39 +671,6 @@ func TestDeleteNodeFromNB(t *testing.T) {
expectK8sClient func(*mock.MockK8sClient)
}{
// TODO: Add test cases.
{
name: "If the machine is not a control plane node, do nothing",
clusterScope: &scope.ClusterScope{
LinodeCluster: &infrav1alpha2.LinodeCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
UID: "test-uid",
},
Spec: infrav1alpha2.LinodeClusterSpec{
Network: infrav1alpha2.NetworkSpec{
NodeBalancerID: ptr.To(1234),
ApiserverNodeBalancerConfigID: ptr.To(5678),
AdditionalPorts: []infrav1alpha2.LinodeNBPortConfig{
{
Port: DefaultKonnectivityLBPort,
NodeBalancerConfigID: ptr.To(1234),
},
},
},
},
},
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
UID: "test-uid",
},
},
},
expects: func(*mock.MockLinodeClient) {},
expectK8sClient: func(mockK8sClient *mock.MockK8sClient) {
mockK8sClient.EXPECT().Scheme().Return(nil).AnyTimes()
},
},
{
name: "NodeBalancer is already deleted",
clusterScope: &scope.ClusterScope{
Expand Down
6 changes: 4 additions & 2 deletions controller/linodecluster_controller_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@ import (
func (r *LinodeClusterReconciler) addMachineToLB(ctx context.Context, clusterScope *scope.ClusterScope) error {
logger := logr.FromContextOrDiscard(ctx)
if clusterScope.LinodeCluster.Spec.Network.LoadBalancerType != "dns" {
if err := services.AddNodesToNB(ctx, logger, clusterScope); err != nil {
return err
for _, eachMachine := range clusterScope.LinodeMachines.Items {
if err := services.AddNodesToNB(ctx, logger, clusterScope, eachMachine); err != nil {
return err

Check warning on line 17 in controller/linodecluster_controller_helpers.go

View check run for this annotation

Codecov / codecov/patch

controller/linodecluster_controller_helpers.go#L16-L17

Added lines #L16 - L17 were not covered by tests
}
}
} else {
if err := services.EnsureDNSEntries(ctx, clusterScope, "create"); err != nil {
Expand Down
1 change: 1 addition & 0 deletions controller/linodecluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc
Call("cluster is created", func(ctx context.Context, mck Mock) {
cScope.LinodeClient = mck.LinodeClient
cScope.LinodeCluster.Spec.Network.ApiserverNodeBalancerConfigID = nil
mck.LinodeClient.EXPECT().ListNodeBalancerNodes(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return([]linodego.NodeBalancerNode{}, nil).AnyTimes()
getNB := mck.LinodeClient.EXPECT().GetNodeBalancer(gomock.Any(), gomock.Any()).
Return(&linodego.NodeBalancer{
ID: nodebalancerID,
Expand Down
30 changes: 30 additions & 0 deletions mock/client.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

27 changes: 27 additions & 0 deletions observability/wrappers/linodeclient/linodeclient.gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit c9a154a

Please sign in to comment.