Skip to content

Commit

Permalink
Basic E2E test for machine controller (#80)
Browse files Browse the repository at this point in the history
* Basic E2E test for machine controller

* Fix linode machine update

---------

Co-authored-by: Richard Kovacs <[email protected]>
  • Loading branch information
mhmxs and Richard Kovacs authored Feb 8, 2024
1 parent 73d6c28 commit 78df137
Show file tree
Hide file tree
Showing 27 changed files with 393 additions and 74 deletions.
7 changes: 4 additions & 3 deletions .github/workflows/build_test_ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ jobs:
- name: Docker cache
uses: ScribeMD/[email protected]
with:
key: docker-${{ runner.os }}-${{ hashFiles('Makefile') }}}
key: docker-${{ runner.os }}-${{ hashFiles('go.sum') }}}

- name: Lint
run: make lint
Expand All @@ -100,6 +100,7 @@ jobs:
disable-sudo: true
egress-policy: block
allowed-endpoints: >
api.linode.com:443
api.github.com:443
github.com:443
gcr.io:443
Expand Down Expand Up @@ -130,7 +131,7 @@ jobs:
- name: Docker cache
uses: ScribeMD/[email protected]
with:
key: docker-${{ runner.os }}-${{ hashFiles('Makefile') }}}
key: docker-${{ runner.os }}-${{ hashFiles('go.sum') }}}

- name: E2E test
run: make e2etest
Expand Down Expand Up @@ -173,7 +174,7 @@ jobs:
- name: Docker cache
uses: ScribeMD/[email protected]
with:
key: docker-${{ runner.os }}-${{ hashFiles('Makefile') }}
key: docker-${{ runner.os }}-${{ hashFiles('go.sum') }}

- name: Build the Docker image
run: make docker-build
17 changes: 14 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,14 @@ test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -race -timeout 60s ./... -coverprofile cover.out

.PHONY: e2etest
e2etest: kind ctlptl tilt kuttl kustomize clusterctl manifests generate
e2etest:
make --no-print-directory _e2etest # Workaround to force the flag on Github Action

_e2etest: kind ctlptl tilt kuttl kustomize clusterctl envsubst manifests generate
@echo -n "LINODE_TOKEN=$(LINODE_TOKEN)" > config/default/.env.linode
$(CTLPTL) apply -f .tilt/ctlptl-config.yaml
$(TILT) ci --timeout 180s -f Tiltfile
$(KUTTL) test --config e2e/kuttl-config.yaml
$(TILT) ci --timeout 240s -f Tiltfile
ROOT_DIR="$(PWD)" $(KUTTL) test --config e2e/kuttl-config.yaml

##@ Build

Expand Down Expand Up @@ -180,6 +183,7 @@ TILT ?= $(LOCALBIN)/tilt
KIND ?= $(LOCALBIN)/kind
KUTTL ?= $(LOCALBIN)/kuttl
ENVTEST ?= $(LOCALBIN)/setup-envtest
ENVSUBST ?= $(LOCALBIN)/envsubst
HUSKY ?= $(LOCALBIN)/husky
NILAWAY ?= $(LOCALBIN)/nilaway
GOVULNC ?= $(LOCALBIN)/govulncheck
Expand All @@ -192,6 +196,7 @@ CONTROLLER_TOOLS_VERSION ?= v0.13.0
TILT_VERSION ?= 0.33.6
KIND_VERSION ?= 0.20.0
KUTTL_VERSION ?= 0.15.0
ENVSUBST_VERSION ?= v1.4.2
HUSKY_VERSION ?= v0.2.16
NILAWAY_VERSION ?= latest
GOVULNC_VERSION ?= v1.0.1
Expand Down Expand Up @@ -251,6 +256,12 @@ envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest

.PHONY: envsubst
envsubst: $(ENVSUBST) ## Download envsubst locally if necessary. If wrong version is installed, it will be overwritten.
$(ENVSUBST): $(LOCALBIN)
test -s $(ENVSUBST) || \
(cd $(LOCALBIN); curl -Lso ./envsubst https://github.com/a8m/envsubst/releases/download/$(ENVSUBST_VERSION)/envsubst-$(shell uname -s)-$(ARCH) && chmod +x envsubst)

.PHONY: husky
husky: $(HUSKY) ## Download husky locally if necessary.
@echo Execute install command to enable git hooks: ./bin/husky install
Expand Down
6 changes: 3 additions & 3 deletions cloud/scope/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ import (
"errors"
"fmt"

"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1"
"github.com/linode/linodego"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

infrav1alpha1 "github.com/linode/cluster-api-provider-linode/api/v1alpha1"
)

// ClusterScopeParams defines the input parameters used to create a new Scope.
Expand Down
2 changes: 2 additions & 0 deletions config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ spec:
secretKeyRef:
name: token
key: LINODE_TOKEN
- name: LINODE_API_VERSION
value: v4beta
name: manager
securityContext:
allowPrivilegeEscalation: false
Expand Down
104 changes: 53 additions & 51 deletions controller/linodemachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package controller

import (
"context"
b64 "encoding/base64"
"errors"
"fmt"
"net/http"
Expand All @@ -31,6 +30,7 @@ import (
"github.com/linode/linodego"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
cerrs "sigs.k8s.io/cluster-api/errors"
Expand Down Expand Up @@ -90,6 +90,8 @@ type LinodeMachineReconciler struct {
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
//
//nolint:gocyclo,cyclop // As simple as possible.
func (r *LinodeMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout))
defer cancel()
Expand All @@ -98,17 +100,21 @@ func (r *LinodeMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques

linodeMachine := &infrav1alpha1.LinodeMachine{}
if err := r.Client.Get(ctx, req.NamespacedName, linodeMachine); err != nil {
log.Error(err, "Failed to fetch Linode machine")
if err = client.IgnoreNotFound(err); err != nil {
log.Error(err, "Failed to fetch Linode machine")
}

return ctrl.Result{}, client.IgnoreNotFound(err)
return ctrl.Result{}, err
}

machine, err := kutil.GetOwnerMachine(ctx, r.Client, linodeMachine.ObjectMeta)
switch {
case err != nil:
log.Error(err, "Failed to fetch owner machine")
if err = client.IgnoreNotFound(err); err != nil {
log.Error(err, "Failed to fetch owner machine")
}

return ctrl.Result{}, client.IgnoreNotFound(err)
return ctrl.Result{}, err
case machine == nil:
log.Info("Machine Controller has not yet set OwnerRef, skipping reconciliation")

Expand All @@ -134,14 +140,25 @@ func (r *LinodeMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques
log = log.WithValues("Linode machine: ", machine.Name)

cluster, err := kutil.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
if err != nil {
log.Info("Failed to fetch cluster by label")
switch {
case err != nil:
if err = client.IgnoreNotFound(err); err != nil {
log.Error(err, "Failed to fetch cluster by label")
}

return ctrl.Result{}, client.IgnoreNotFound(err)
} else if cluster == nil {
log.Info("Failed to find cluster by label")
return ctrl.Result{}, err
case cluster == nil:
err = errors.New("missing cluster")

return ctrl.Result{}, client.IgnoreNotFound(err)
log.Error(err, "Missing cluster")

return ctrl.Result{}, err
case cluster.Spec.InfrastructureRef == nil:
err = errors.New("missing infrastructure reference")

log.Error(err, "Missing infrastructure reference")

return ctrl.Result{}, err
}

linodeCluster := &infrav1alpha1.LinodeCluster{}
Expand All @@ -151,9 +168,11 @@ func (r *LinodeMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques
}

if err = r.Client.Get(ctx, linodeClusterKey, linodeCluster); err != nil {
log.Error(err, "Failed to fetch Linode cluster")
if err = client.IgnoreNotFound(err); err != nil {
log.Error(err, "Failed to fetch Linode cluster")
}

return ctrl.Result{}, client.IgnoreNotFound(err)
return ctrl.Result{}, err
}

machineScope, err := scope.NewMachineScope(
Expand Down Expand Up @@ -214,7 +233,7 @@ func (r *LinodeMachineReconciler) reconcile(
}

// Always close the scope when exiting this function so we can persist any LinodeMachine changes.
if patchErr := machineScope.Close(ctx); patchErr != nil && client.IgnoreNotFound(patchErr) != nil {
if patchErr := machineScope.Close(ctx); patchErr != nil && utilerrors.FilterOut(patchErr) != nil {
logger.Error(patchErr, "failed to patch LinodeMachine")

err = errors.Join(err, patchErr)
Expand Down Expand Up @@ -252,7 +271,7 @@ func (r *LinodeMachineReconciler) reconcile(

logger = logger.WithValues("ID", *machineScope.LinodeMachine.Spec.InstanceID)

res, err = r.reconcileUpdate(ctx, logger, machineScope)
res, linodeInstance, err = r.reconcileUpdate(ctx, logger, machineScope)

return
}
Expand All @@ -265,7 +284,7 @@ func (r *LinodeMachineReconciler) reconcile(

return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerWaitForBootstrapDelay}, nil
}
err = r.reconcileCreate(ctx, logger, machineScope, clusterScope)
linodeInstance, err = r.reconcileCreate(ctx, logger, machineScope, clusterScope)

return
}
Expand All @@ -275,7 +294,7 @@ func (r *LinodeMachineReconciler) reconcileCreate(
logger logr.Logger,
machineScope *scope.MachineScope,
clusterScope *scope.ClusterScope,
) error {
) (*linodego.Instance, error) {
logger.Info("creating machine")

tags := []string{string(machineScope.LinodeCluster.UID), string(machineScope.LinodeMachine.UID)}
Expand All @@ -284,7 +303,7 @@ func (r *LinodeMachineReconciler) reconcileCreate(
if err != nil {
logger.Error(err, "Failed to list Linode machine instances")

return err
return nil, err
}

var linodeInstance *linodego.Instance
Expand All @@ -294,36 +313,20 @@ func (r *LinodeMachineReconciler) reconcileCreate(

linodeInstance = &linodeInstances[0]
case 0:
createConfig := linodeMachineSpecToInstanceCreateConfig(machineScope.LinodeMachine.Spec)
if createConfig == nil {
err = errors.New("failed to convert machine spec to create instance config")

logger.Error(err, "Panic! Struct of LinodeMachineSpec is different than InstanceCreateOptions")

return err
}
createConfig.Label = machineScope.LinodeMachine.Spec.Label
createConfig.Tags = tags
createConfig.SwapSize = util.Pointer(0)
createConfig.PrivateIP = true

// get the bootstrap data for the Linode instance and set it for create config
bootstrapData, err := machineScope.GetBootstrapData(ctx)
createConfig, err := r.newCreateConfig(ctx, machineScope, tags, logger)
if err != nil {
logger.Info("Failed to get bootstrap data", "error", err.Error())
logger.Error(err, "Failed to create Linode machine create config")

return err
}
createConfig.Metadata = &linodego.InstanceMetadataOptions{
UserData: b64.StdEncoding.EncodeToString(bootstrapData),
return nil, err
}

if machineScope.LinodeCluster.Spec.VPCRef != nil {
iface, err := r.getVPCInterfaceConfig(ctx, machineScope, createConfig.Interfaces, logger)
if err != nil {
logger.Error(err, "Failed to get VPC interface confiog")

return err
return nil, err
}

createConfig.Interfaces = append(createConfig.Interfaces, *iface)
Expand All @@ -333,22 +336,22 @@ func (r *LinodeMachineReconciler) reconcileCreate(
if err != nil {
logger.Error(err, "Failed to create Linode machine instance")

return err
return nil, err
}
default:
err = errors.New("multiple instances")

logger.Error(err, "Panic! Multiple instances found. This might be a concurrency issue in the controller!!!", "tags", tags)

return err
return nil, err
}

if linodeInstance == nil {
err = errors.New("missing instance")

logger.Error(err, "Panic! Failed to create isntance")

return err
return nil, err
}

machineScope.LinodeMachine.Status.Ready = true
Expand All @@ -366,26 +369,25 @@ func (r *LinodeMachineReconciler) reconcileCreate(
if err = services.AddNodeToNB(ctx, logger, machineScope, clusterScope); err != nil {
logger.Error(err, "Failed to add instance to Node Balancer backend")

return err
return linodeInstance, err
}

return nil
return linodeInstance, nil
}

func (r *LinodeMachineReconciler) reconcileUpdate(
ctx context.Context,
logger logr.Logger,
machineScope *scope.MachineScope,
) (res reconcile.Result, err error) {
) (res reconcile.Result, linodeInstance *linodego.Instance, err error) {
logger.Info("updating machine")

res = ctrl.Result{}

if machineScope.LinodeMachine.Spec.InstanceID == nil {
return res, errors.New("missing instance ID")
return res, nil, errors.New("missing instance ID")
}

var linodeInstance *linodego.Instance
if linodeInstance, err = machineScope.LinodeClient.GetInstance(ctx, *machineScope.LinodeMachine.Spec.InstanceID); err != nil {
err = util.IgnoreLinodeAPIError(err, http.StatusNotFound)
if err != nil {
Expand All @@ -400,27 +402,27 @@ func (r *LinodeMachineReconciler) reconcileUpdate(
conditions.MarkFalse(machineScope.LinodeMachine, clusterv1.ReadyCondition, "missing", clusterv1.ConditionSeverityWarning, "instance not found")
}

return res, err
return res, nil, err
}

if _, ok := requeueInstanceStatuses[linodeInstance.Status]; ok {
if linodeInstance.Updated.Add(reconciler.DefaultMachineControllerWaitForRunningTimeout).After(time.Now()) {
logger.Info("Instance has one operaton running, re-queuing reconciliation", "status", linodeInstance.Status)

return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerWaitForRunningDelay}, nil
return ctrl.Result{RequeueAfter: reconciler.DefaultMachineControllerWaitForRunningDelay}, linodeInstance, nil
}

logger.Info("Instance has one operaton long running, skipping reconciliation", "status", linodeInstance.Status)

conditions.MarkFalse(machineScope.LinodeMachine, clusterv1.ReadyCondition, string(linodeInstance.Status), clusterv1.ConditionSeverityInfo, "skipped due to long running operation")

return res, nil
return res, linodeInstance, nil
} else if linodeInstance.Status != linodego.InstanceRunning {
logger.Info("Instance has incompatible status, skipping reconciliation", "status", linodeInstance.Status)

conditions.MarkFalse(machineScope.LinodeMachine, clusterv1.ReadyCondition, string(linodeInstance.Status), clusterv1.ConditionSeverityInfo, "incompatible status")

return res, nil
return res, linodeInstance, nil
}

machineScope.LinodeMachine.Status.Ready = true
Expand All @@ -429,7 +431,7 @@ func (r *LinodeMachineReconciler) reconcileUpdate(

r.Recorder.Event(machineScope.LinodeMachine, corev1.EventTypeNormal, string(clusterv1.ReadyCondition), "instance is running")

return res, nil
return res, linodeInstance, nil
}

func (r *LinodeMachineReconciler) reconcileDelete(
Expand Down
Loading

0 comments on commit 78df137

Please sign in to comment.