From c12515ac757b638abea43a09aece787bc5cf9471 Mon Sep 17 00:00:00 2001 From: Kyle Wuolle Date: Fri, 30 Aug 2024 08:20:50 -0700 Subject: [PATCH] Add CAPI status/conditions to deployment status --- api/v1alpha1/deployment_types.go | 1 + cmd/main.go | 14 +++- internal/controller/deployment_controller.go | 82 ++++++++++++++++++- .../controller/deployment_controller_test.go | 2 + templates/hmc/templates/rbac/roles.yaml | 7 ++ 5 files changed, 100 insertions(+), 6 deletions(-) diff --git a/api/v1alpha1/deployment_types.go b/api/v1alpha1/deployment_types.go index 099bb0b4f..93a69fe14 100644 --- a/api/v1alpha1/deployment_types.go +++ b/api/v1alpha1/deployment_types.go @@ -24,6 +24,7 @@ import ( const ( DeploymentFinalizer = "hmc.mirantis.com/deployment" + FluxHelmChartNameKey = "helm.toolkit.fluxcd.io/name" HMCManagedLabelKey = "hmc.mirantis.com/managed" HMCManagedLabelValue = "true" ) diff --git a/cmd/main.go b/cmd/main.go index 7bdab0a6a..d1bc29449 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -27,6 +27,7 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" @@ -152,6 +153,12 @@ func main() { os.Exit(1) } + dc, err := dynamic.NewForConfig(mgr.GetConfig()) + if err != nil { + setupLog.Error(err, "failed to create dynamic client") + os.Exit(1) + } + if err = (&controller.TemplateReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), @@ -160,9 +167,10 @@ func main() { os.Exit(1) } if err = (&controller.DeploymentReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Config: mgr.GetConfig(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Config: mgr.GetConfig(), + DynamicClient: dc, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Deployment") os.Exit(1) diff --git a/internal/controller/deployment_controller.go b/internal/controller/deployment_controller.go index dbeeaffc9..1767a5bf3 100644 --- a/internal/controller/deployment_controller.go +++ b/internal/controller/deployment_controller.go @@ -20,6 +20,8 @@ import ( "fmt" "time" + "k8s.io/apimachinery/pkg/labels" + hcv2 "github.com/fluxcd/helm-controller/api/v2" fluxmeta "github.com/fluxcd/pkg/apis/meta" fluxconditions "github.com/fluxcd/pkg/runtime/conditions" @@ -30,8 +32,11 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,8 +53,9 @@ import ( // DeploymentReconciler reconciles a Deployment object type DeploymentReconciler struct { client.Client - Scheme *runtime.Scheme - Config *rest.Config + Scheme *runtime.Scheme + Config *rest.Config + DynamicClient *dynamic.DynamicClient } // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -57,7 +63,6 @@ type DeploymentReconciler struct { func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := log.FromContext(ctx).WithValues("DeploymentController", req.NamespacedName) l.Info("Reconciling Deployment") - deployment := &hmc.Deployment{} if err := r.Get(ctx, req.NamespacedName, deployment); err != nil { if apierrors.IsNotFound(err) { @@ -87,6 +92,63 @@ func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) return r.Update(ctx, l, deployment) } +func (r *DeploymentReconciler) setStatusFromClusterStatus(ctx context.Context, l logr.Logger, deployment *hmc.Deployment) (bool, error) { + resourceId := schema.GroupVersionResource{ + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Resource: "clusters", + } + + list, err := r.DynamicClient.Resource(resourceId).Namespace(deployment.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: deployment.Name}).String(), + }) + + if apierrors.IsNotFound(err) || len(list.Items) == 0 { + l.Info("Clusters not found, ignoring since object must be deleted or not yet created") + return true, nil + } + + if err != nil { + return true, fmt.Errorf("failed to get cluster information for deployment %s in namespace: %s: %w", + deployment.Namespace, deployment.Name, err) + } + conditions, found, err := unstructured.NestedSlice(list.Items[0].Object, "status", "conditions") + if err != nil { + return true, fmt.Errorf("failed to get cluster information for deployment %s in namespace: %s: %w", + deployment.Namespace, deployment.Name, err) + } + if !found { + return true, fmt.Errorf("failed to get cluster information for deployment %s in namespace: %s: status.conditions not found", + deployment.Namespace, deployment.Name) + } + + allConditionsComplete := true + for _, condition := range conditions { + conditionMap, ok := condition.(map[string]interface{}) + if !ok { + return true, fmt.Errorf("failed to cast condition to map[string]interface{} for deployment: %s in namespace: %s: %w", + deployment.Namespace, deployment.Name, err) + } + + var metaCondition metav1.Condition + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(conditionMap, &metaCondition); err != nil { + return true, fmt.Errorf("failed to convert unstructured conditions to metav1.Condition for deployment %s in namespace: %s: %w", + deployment.Namespace, deployment.Name, err) + } + + if metaCondition.Status != "True" { + allConditionsComplete = false + } + + if metaCondition.Reason == "" && metaCondition.Status == "True" { + metaCondition.Reason = "Succeeded" + } + apimeta.SetStatusCondition(deployment.GetConditions(), metaCondition) + } + + return !allConditionsComplete, nil +} + func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deployment *hmc.Deployment) (result ctrl.Result, err error) { finalizersUpdated := controllerutil.AddFinalizer(deployment, hmc.DeploymentFinalizer) if finalizersUpdated { @@ -224,6 +286,20 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy Message: hrReadyCondition.Message, }) } + + requeue, err := r.setStatusFromClusterStatus(ctx, l, deployment) + if err != nil { + if requeue { + return ctrl.Result{RequeueAfter: 10 * time.Second}, err + } else { + return ctrl.Result{}, err + } + } + + if requeue { + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + if !fluxconditions.IsReady(hr) { return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } diff --git a/internal/controller/deployment_controller_test.go b/internal/controller/deployment_controller_test.go index 236ff5311..cd0cc1dba 100644 --- a/internal/controller/deployment_controller_test.go +++ b/internal/controller/deployment_controller_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/reconcile" hmc "github.com/Mirantis/hmc/api/v1alpha1" @@ -128,6 +129,7 @@ var _ = Describe("Deployment Controller", func() { controllerReconciler := &DeploymentReconciler{ Client: k8sClient, Scheme: k8sClient.Scheme(), + Config: &rest.Config{}, } _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ diff --git a/templates/hmc/templates/rbac/roles.yaml b/templates/hmc/templates/rbac/roles.yaml index a498dfb9d..182595578 100644 --- a/templates/hmc/templates/rbac/roles.yaml +++ b/templates/hmc/templates/rbac/roles.yaml @@ -5,6 +5,13 @@ metadata: labels: {{- include "hmc.labels" . | nindent 4 }} rules: +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + verbs: + - get + - list - apiGroups: - helm.toolkit.fluxcd.io resources: