Skip to content

Commit

Permalink
add initial LinodeCluster controller logic
Browse files Browse the repository at this point in the history
  • Loading branch information
eljohnson92 committed Jan 25, 2024
1 parent ad28192 commit e10305c
Show file tree
Hide file tree
Showing 8 changed files with 454 additions and 31 deletions.
17 changes: 13 additions & 4 deletions api/v1alpha1/linodecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,13 @@ type LinodeClusterStatus struct {
// reconciling the Machine and will contain a succinct value suitable
// for machine interpretation.
// +optional
FailureReason *errors.MachineStatusError `json:"failureReason"`
FailureReason *errors.ClusterStatusError `json:"failureReason,omitempty"`

// FailureMessage will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a more verbose string suitable
// for logging and human consumption.
// +optional
FailureMessage *string `json:"failureMessage"`
FailureMessage *string `json:"failureMessage,omitempty"`

// Conditions defines current service state of the LinodeCluster.
// +optional
Expand Down Expand Up @@ -85,9 +85,18 @@ func (lm *LinodeCluster) SetConditions(conditions clusterv1.Conditions) {

// NetworkSpec encapsulates Linode networking resources.
type NetworkSpec struct {
// NodebalancerID is the id of apiserver Nodebalancer.
// LoadBalancerType is the type of load balancer to use, defaults to NodeBalancer if not otherwise set
// +kubebuilder:validation:Enum=NodeBalancer
// +optional
NodebalancerID int `json:"nodebalancerID,omitempty"`
LoadBalancerType string `json:"loadBalancerType,omitempty"`
// LoadBalancerPort used by the api server. It must be valid ports range (1-65535). If omitted, default value is 6443.
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +optional
LoadBalancerPort int `json:"loadBalancerPort,omitempty"`
// NodeBalancerID is the id of api server NodeBalancer.
// +optional
NodeBalancerID int `json:"nodeBalancerID,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down
2 changes: 1 addition & 1 deletion api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

137 changes: 137 additions & 0 deletions cloud/services/loadbalancers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
package services

import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/go-logr/logr"
"github.com/linode/cluster-api-provider-linode/cloud/scope"
"github.com/linode/cluster-api-provider-linode/util"
"github.com/linode/linodego"
"net/http"
"strconv"
"strings"
)

var (
defaultLBPort = 6443
)

// CreateNodeBalancer creates a new NodeBalancer if one doesn't exist
func CreateNodeBalancer(ctx context.Context, clusterScope *scope.ClusterScope, logger logr.Logger) (*linodego.NodeBalancer, error) {
var linodeNBs []linodego.NodeBalancer
var linodeNB *linodego.NodeBalancer

tags := []string{string(clusterScope.LinodeCluster.UID)}
filter := map[string]string{
"tags": strings.Join(tags, ","),
}

rawFilter, err := json.Marshal(filter)
if err != nil {

return nil, err
}
logger.Info("Creating NodeBalancer")
if linodeNBs, err = clusterScope.LinodeClient.ListNodeBalancers(ctx, linodego.NewListOptions(1, string(rawFilter))); err != nil {
logger.Info("Failed to list NodeBalancers", "error", err.Error())

return nil, err
}

switch len(linodeNBs) {
case 1:
logger.Info(fmt.Sprintf("NodeBalancer %s already exists", *linodeNBs[0].Label))

linodeNB = &linodeNBs[0]
case 0:
logger.Info(fmt.Sprintf("Creating NodeBalancer %s-api-server", clusterScope.LinodeCluster.Name))
createConfig := linodego.NodeBalancerCreateOptions{
Label: util.Pointer(fmt.Sprintf("%s-api-server", clusterScope.LinodeCluster.Name)),
Region: clusterScope.LinodeCluster.Spec.Region,
ClientConnThrottle: nil,
Tags: tags,
}

if linodeNB, err = clusterScope.LinodeClient.CreateNodeBalancer(ctx, createConfig); err != nil {
logger.Info("Failed to create Linode NodeBalancer", "error", err.Error())

// Already exists is not an error
apiErr := linodego.Error{}
if errors.As(err, &apiErr) && apiErr.Code != http.StatusFound {
return nil, err
}

err = nil

if linodeNB != nil {
logger.Info("Linode NodeBalancer already exists", "existing", linodeNB.Label)
}
}

default:
err = errors.New("multiple NodeBalancers")

logger.Error(err, "Panic! Multiple NodeBalancers found. This might be a concurrency issue in the controller!!!", "filters", string(rawFilter))

return nil, err
}

if linodeNB == nil {
err = errors.New("missing NodeBalancer")

logger.Error(err, "Panic! Failed to create NodeBalancer")

return nil, err
}

return linodeNB, nil
}

// CreateNodeBalancerConfig creates NodeBalancer config if it does not exist
func CreateNodeBalancerConfig(ctx context.Context, clusterScope *scope.ClusterScope, logger logr.Logger) (*linodego.NodeBalancerConfig, error) {

var linodeNBConfigs []linodego.NodeBalancerConfig
var linodeNBConfig *linodego.NodeBalancerConfig
var err error

if linodeNBConfigs, err = clusterScope.LinodeClient.ListNodeBalancerConfigs(ctx, clusterScope.LinodeCluster.Spec.Network.NodeBalancerID, linodego.NewListOptions(1, "")); err != nil {
logger.Info("Failed to list NodeBalancer Configs", "error", err.Error())

return nil, err
}
lbPort := defaultLBPort
if clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort != 0 {
lbPort = clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort
}
switch len(linodeNBConfigs) {
case 1:
logger.Info("NodeBalancer ", strconv.Itoa(linodeNBConfigs[0].ID), " already exists")
linodeNBConfig = &linodeNBConfigs[0]

case 0:
createConfig := linodego.NodeBalancerConfigCreateOptions{
Port: lbPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
}

if linodeNBConfig, err = clusterScope.LinodeClient.CreateNodeBalancerConfig(ctx, clusterScope.LinodeCluster.Spec.Network.NodeBalancerID, createConfig); err != nil {
logger.Info("Failed to create Linode NodeBalancer config", "error", err.Error())

return nil, err

}

default:
err = errors.New("multiple NodeBalancer Configs")

logger.Error(err, "Panic! Multiple NodeBalancer Configs found. This might be a concurrency issue in the controller!!!")

return nil, err
}

return linodeNBConfig, nil
}
15 changes: 10 additions & 5 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ import (
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"

infrastructurev1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1"
//+kubebuilder:scaffold:imports
// +kubebuilder:scaffold:imports
)

var (
Expand All @@ -51,7 +51,7 @@ func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(capi.AddToScheme(scheme))
utilruntime.Must(infrastructurev1alpha1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
// +kubebuilder:scaffold:scheme
}

func main() {
Expand All @@ -62,10 +62,12 @@ func main() {
}

var machineWatchFilter string
var clusterWatchFilter string
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
flag.StringVar(&machineWatchFilter, "machine-watch-filter", "", "The machines to watch by label.")
flag.StringVar(&clusterWatchFilter, "cluster-watch-filter", "", "The clusters to watch by label.")
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
Expand Down Expand Up @@ -103,8 +105,11 @@ func main() {
}

if err = (&controller2.LinodeClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("LinodeClusterReconciler"),
WatchFilterValue: clusterWatchFilter,
LinodeApiKey: linodeToken,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "LinodeCluster")
os.Exit(1)
Expand All @@ -119,7 +124,7 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "LinodeMachine")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
// +kubebuilder:scaffold:builder

if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,21 @@ spec:
description: NetworkSpec encapsulates all things related to Linode
network.
properties:
nodebalancerID:
description: NodebalancerID is the id of apiserver Nodebalancer.
loadBalancerPort:
description: LoadBalancerPort used by the api server. It must
be valid ports range (1-65535). If omitted, default value is
6443.
maximum: 65535
minimum: 1
type: integer
loadBalancerType:
description: LoadBalancerType is the type of load balancer to
use, defaults to NodeBalancer if not otherwise set
enum:
- NodeBalancer
type: string
nodeBalancerID:
description: NodeBalancerID is the id of api server NodeBalancer.
type: integer
type: object
region:
Expand Down
5 changes: 5 additions & 0 deletions config/crd/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
# common labels for CRD resources as required by
# https://cluster-api.sigs.k8s.io/developer/providers/contracts.html#api-version-labels
commonLabels:
cluster.x-k8s.io/v1beta1: v1alpha1

# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
Expand Down
Loading

0 comments on commit e10305c

Please sign in to comment.