Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add initial LinodeCluster controller logic #64

Merged
merged 13 commits into from
Jan 30, 2024
Merged
21 changes: 15 additions & 6 deletions api/v1alpha1/linodecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,16 +44,16 @@ type LinodeClusterStatus struct {
Ready bool `json:"ready"`

// FailureReason will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a succinct value suitable
// reconciling the LinodeCluster and will contain a succinct value suitable
// for machine interpretation.
// +optional
FailureReason *errors.MachineStatusError `json:"failureReason"`
FailureReason *errors.ClusterStatusError `json:"failureReason,omitempty"`

// FailureMessage will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a more verbose string suitable
// reconciling the LinodeCluster and will contain a more verbose string suitable
// for logging and human consumption.
// +optional
FailureMessage *string `json:"failureMessage"`
FailureMessage *string `json:"failureMessage,omitempty"`

// Conditions defines current service state of the LinodeCluster.
// +optional
Expand Down Expand Up @@ -86,9 +86,18 @@ func (lm *LinodeCluster) SetConditions(conditions clusterv1.Conditions) {

// NetworkSpec encapsulates Linode networking resources.
type NetworkSpec struct {
// NodebalancerID is the id of apiserver Nodebalancer.
// LoadBalancerType is the type of load balancer to use, defaults to NodeBalancer if not otherwise set
// +kubebuilder:validation:Enum=NodeBalancer
// +optional
NodebalancerID int `json:"nodebalancerID,omitempty"`
LoadBalancerType string `json:"loadBalancerType,omitempty"`
// LoadBalancerPort used by the api server. It must be valid ports range (1-65535). If omitted, default value is 6443.
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
// +optional
LoadBalancerPort int `json:"loadBalancerPort,omitempty"`
// NodeBalancerID is the id of api server NodeBalancer.
// +optional
NodeBalancerID int `json:"nodeBalancerID,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down
2 changes: 1 addition & 1 deletion api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

26 changes: 23 additions & 3 deletions cloud/scope/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,14 @@ limitations under the License.
package scope

import (
"context"
"errors"
"fmt"
"net/http"

infrav1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1"
"github.com/linode/linodego"
"golang.org/x/oauth2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -33,7 +36,7 @@ import (
type ClusterScopeParams struct {
Client client.Client
Cluster *clusterv1.Cluster
LinodeCluster *infrav1.LinodeCluster
LinodeCluster *infrav1alpha1.LinodeCluster
}

func validateClusterScopeParams(params ClusterScopeParams) error {
Expand Down Expand Up @@ -90,5 +93,22 @@ type ClusterScope struct {
PatchHelper *patch.Helper
LinodeClient *linodego.Client
Cluster *clusterv1.Cluster
LinodeCluster *infrav1.LinodeCluster
LinodeCluster *infrav1alpha1.LinodeCluster
}

// PatchObject persists the cluster configuration and status.
func (s *ClusterScope) PatchObject(ctx context.Context) error {
return s.PatchHelper.Patch(ctx, s.LinodeCluster)
}

// Close closes the current scope persisting the cluster configuration and status.
func (s *ClusterScope) Close(ctx context.Context) error {
return s.PatchObject(ctx)
}

// AddFinalizer adds a finalizer and immediately patches the object to avoid any race conditions
func (s *ClusterScope) AddFinalizer(ctx context.Context) error {
controllerutil.AddFinalizer(s.LinodeCluster, infrav1alpha1.GroupVersion.String())

return s.Close(ctx)
}
112 changes: 112 additions & 0 deletions cloud/services/loadbalancers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
package services

import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"slices"
"strconv"

"github.com/go-logr/logr"
"github.com/linode/cluster-api-provider-linode/cloud/scope"
"github.com/linode/cluster-api-provider-linode/util"
"github.com/linode/linodego"
)

var (
defaultLBPort = 6443
luthermonson marked this conversation as resolved.
Show resolved Hide resolved
)

// CreateNodeBalancer creates a new NodeBalancer if one doesn't exist
func CreateNodeBalancer(ctx context.Context, clusterScope *scope.ClusterScope, logger logr.Logger) (*linodego.NodeBalancer, error) {
var linodeNBs []linodego.NodeBalancer
var linodeNB *linodego.NodeBalancer
eljohnson92 marked this conversation as resolved.
Show resolved Hide resolved
NBLabel := fmt.Sprintf("%s-api-server", clusterScope.LinodeCluster.Name)
clusterUID := string(clusterScope.LinodeCluster.UID)
tags := []string{string(clusterScope.LinodeCluster.UID)}
filter := map[string]string{
"label": NBLabel,
}

rawFilter, err := json.Marshal(filter)
if err != nil {
return nil, err
}
if linodeNBs, err = clusterScope.LinodeClient.ListNodeBalancers(ctx, linodego.NewListOptions(1, string(rawFilter))); err != nil {
logger.Info("Failed to list NodeBalancers", "error", err.Error())

return nil, err
}
if len(linodeNBs) == 1 {
logger.Info(fmt.Sprintf("NodeBalancer %s already exists", *linodeNBs[0].Label))
if !slices.Contains(linodeNBs[0].Tags, clusterUID) {
err = errors.New("NodeBalancer conflict")
logger.Error(err, fmt.Sprintf("NodeBalancer %s is not associated with cluster UID %s. Owner cluster is %s", *linodeNBs[0].Label, clusterUID, linodeNBs[0].Tags[0]))

return nil, err
}

return &linodeNBs[0], nil
}

logger.Info(fmt.Sprintf("Creating NodeBalancer %s-api-server", clusterScope.LinodeCluster.Name))
createConfig := linodego.NodeBalancerCreateOptions{
Label: util.Pointer(fmt.Sprintf("%s-api-server", clusterScope.LinodeCluster.Name)),
Region: clusterScope.LinodeCluster.Spec.Region,
Tags: tags,
}

if linodeNB, err = clusterScope.LinodeClient.CreateNodeBalancer(ctx, createConfig); err != nil {
logger.Info("Failed to create Linode NodeBalancer", "error", err.Error())

// Already exists is not an error
apiErr := linodego.Error{}
if errors.As(err, &apiErr) && apiErr.Code != http.StatusFound {
return nil, err
}

if linodeNB != nil {
logger.Info("Linode NodeBalancer already exists", "existing", linodeNB.Label)
}
}

return linodeNB, nil
}

// CreateNodeBalancerConfig creates NodeBalancer config if it does not exist
func CreateNodeBalancerConfig(ctx context.Context, clusterScope *scope.ClusterScope, logger logr.Logger) (*linodego.NodeBalancerConfig, error) {
eljohnson92 marked this conversation as resolved.
Show resolved Hide resolved
var linodeNBConfigs []linodego.NodeBalancerConfig
var linodeNBConfig *linodego.NodeBalancerConfig
var err error

if linodeNBConfigs, err = clusterScope.LinodeClient.ListNodeBalancerConfigs(ctx, clusterScope.LinodeCluster.Spec.Network.NodeBalancerID, linodego.NewListOptions(1, "")); err != nil {
logger.Info("Failed to list NodeBalancer Configs", "error", err.Error())

return nil, err
}
if len(linodeNBConfigs) == 1 {
logger.Info("NodeBalancer ", strconv.Itoa(linodeNBConfigs[0].ID), " already exists")

return &linodeNBConfigs[0], err
}
lbPort := defaultLBPort
if clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort != 0 {
lbPort = clusterScope.LinodeCluster.Spec.Network.LoadBalancerPort
}
createConfig := linodego.NodeBalancerConfigCreateOptions{
Port: lbPort,
Protocol: linodego.ProtocolTCP,
Algorithm: linodego.AlgorithmRoundRobin,
Check: linodego.CheckConnection,
}

if linodeNBConfig, err = clusterScope.LinodeClient.CreateNodeBalancerConfig(ctx, clusterScope.LinodeCluster.Spec.Network.NodeBalancerID, createConfig); err != nil {
logger.Info("Failed to create Linode NodeBalancer config", "error", err.Error())

return nil, err
}

return linodeNBConfig, nil
}
14 changes: 9 additions & 5 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ import (
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"

infrastructurev1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1"
//+kubebuilder:scaffold:imports
// +kubebuilder:scaffold:imports
)

var (
Expand All @@ -51,7 +51,7 @@ func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(capi.AddToScheme(scheme))
utilruntime.Must(infrastructurev1alpha1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
// +kubebuilder:scaffold:scheme
}

func main() {
Expand All @@ -62,10 +62,12 @@ func main() {
}

var machineWatchFilter string
var clusterWatchFilter string
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
flag.StringVar(&machineWatchFilter, "machine-watch-filter", "", "The machines to watch by label.")
flag.StringVar(&clusterWatchFilter, "cluster-watch-filter", "", "The clusters to watch by label.")
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
Expand Down Expand Up @@ -103,8 +105,10 @@ func main() {
}

if err = (&controller2.LinodeClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Client: mgr.GetClient(),
Recorder: mgr.GetEventRecorderFor("LinodeClusterReconciler"),
WatchFilterValue: clusterWatchFilter,
LinodeApiKey: linodeToken,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "LinodeCluster")
os.Exit(1)
Expand All @@ -119,7 +123,7 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "LinodeMachine")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
// +kubebuilder:scaffold:builder

if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,21 @@ spec:
description: NetworkSpec encapsulates all things related to Linode
network.
properties:
nodebalancerID:
description: NodebalancerID is the id of apiserver Nodebalancer.
loadBalancerPort:
description: LoadBalancerPort used by the api server. It must
be valid ports range (1-65535). If omitted, default value is
6443.
maximum: 65535
minimum: 1
type: integer
loadBalancerType:
description: LoadBalancerType is the type of load balancer to
use, defaults to NodeBalancer if not otherwise set
enum:
- NodeBalancer
type: string
nodeBalancerID:
description: NodeBalancerID is the id of api server NodeBalancer.
type: integer
type: object
region:
Expand Down Expand Up @@ -132,13 +145,13 @@ spec:
type: array
failureMessage:
description: FailureMessage will be set in the event that there is
a terminal problem reconciling the Machine and will contain a more
verbose string suitable for logging and human consumption.
a terminal problem reconciling the LinodeCluster and will contain
a more verbose string suitable for logging and human consumption.
type: string
failureReason:
description: FailureReason will be set in the event that there is
a terminal problem reconciling the Machine and will contain a succinct
value suitable for machine interpretation.
a terminal problem reconciling the LinodeCluster and will contain
a succinct value suitable for machine interpretation.
type: string
ready:
description: Ready denotes that the cluster (infrastructure) is ready.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,21 @@ spec:
description: NetworkSpec encapsulates all things related to
Linode network.
properties:
nodebalancerID:
description: NodebalancerID is the id of apiserver Nodebalancer.
loadBalancerPort:
description: LoadBalancerPort used by the api server.
It must be valid ports range (1-65535). If omitted,
default value is 6443.
maximum: 65535
minimum: 1
type: integer
loadBalancerType:
description: LoadBalancerType is the type of load balancer
to use, defaults to NodeBalancer if not otherwise set
enum:
- NodeBalancer
type: string
nodeBalancerID:
description: NodeBalancerID is the id of api server NodeBalancer.
type: integer
type: object
region:
Expand Down
5 changes: 5 additions & 0 deletions config/crd/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
# common labels for CRD resources as required by
# https://cluster-api.sigs.k8s.io/developer/providers/contracts.html#api-version-labels
commonLabels:
cluster.x-k8s.io/v1beta1: v1alpha1

# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
Expand Down
Loading
Loading