diff --git a/.golangci.yml b/.golangci.yml index f47fe05ba..e6232ebd2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -15,6 +15,9 @@ issues: - text: "struct-tag: unknown option 'inline' in JSON tag" linters: - revive + - text: "Unhandled error in call to function fmt.Print*" + linters: + - revive linters: disable-all: true enable: @@ -60,6 +63,10 @@ linters: - whitespace linters-settings: + dupl: + # Tokens count to trigger issue. + # Default: 150 + threshold: 200 gofmt: # Apply the rewrite rules to the source before reformatting. # https://pkg.go.dev/cmd/gofmt diff --git a/api/v1alpha1/clustertemplate_types.go b/api/v1alpha1/clustertemplate_types.go index 0d4127ffa..a10368b7b 100644 --- a/api/v1alpha1/clustertemplate_types.go +++ b/api/v1alpha1/clustertemplate_types.go @@ -21,7 +21,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ClusterTemplateKind = "ClusterTemplate" +const ( + // Denotes the clustertemplate resource Kind. + ClusterTemplateKind = "ClusterTemplate" + // ChartAnnotationKubernetesVersion is an annotation containing the Kubernetes exact version in the SemVer format associated with a ClusterTemplate. + ChartAnnotationKubernetesVersion = "hmc.mirantis.com/k8s-version" +) // ClusterTemplateSpec defines the desired state of ClusterTemplate type ClusterTemplateSpec struct { @@ -61,6 +66,20 @@ func (t *ClusterTemplate) FillStatusWithProviders(annotations map[string]string) return fmt.Errorf("failed to parse ClusterTemplate infrastructure providers: %v", err) } + kversion := annotations[ChartAnnotationKubernetesVersion] + if t.Spec.KubertenesVersion != "" { + kversion = t.Spec.KubertenesVersion + } + if kversion == "" { + return nil + } + + if _, err := semver.NewVersion(kversion); err != nil { + return fmt.Errorf("failed to parse kubernetes version %s: %w", kversion, err) + } + + t.Status.KubertenesVersion = kversion + return nil } @@ -69,11 +88,6 @@ func (t *ClusterTemplate) GetSpecProviders() ProvidersTupled { return t.Spec.Providers } -// GetStatusProviders returns .status.providers of the Template. -func (t *ClusterTemplate) GetStatusProviders() ProvidersTupled { - return t.Status.Providers -} - // GetHelmSpec returns .spec.helm of the Template. func (t *ClusterTemplate) GetHelmSpec() *HelmSpec { return &t.Spec.Helm diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 936b442b7..f100a4bfc 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -34,7 +34,7 @@ type ( // Holds different types of CAPI providers with either // an exact or constrainted version in the SemVer format. The requirement - // is determined by a consumer this type. + // is determined by a consumer of this type. ProvidersTupled struct { // List of CAPI infrastructure providers with either an exact or constrainted version in the SemVer format. InfrastructureProviders []ProviderTuple `json:"infrastructure,omitempty"` @@ -49,7 +49,7 @@ type ( // Name of the provider. Name string `json:"name,omitempty"` // Compatibility restriction in the SemVer format (exact or constrainted version) - VersionOrContraint string `json:"versionOrContraint,omitempty"` + VersionOrConstraint string `json:"versionOrConstraint,omitempty"` } ) diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 7a4915488..225ce290a 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -74,7 +74,8 @@ type ManagedClusterSpec struct { // +kubebuilder:validation:MinLength=1 // Template is a reference to a Template object located in the same namespace. - Template string `json:"template"` + Template string `json:"template"` + // Name reference to the related Credentials object. Credential string `json:"credential,omitempty"` // Services is a list of services created via ServiceTemplates // that could be installed on the target cluster. @@ -101,8 +102,11 @@ type ManagedClusterSpec struct { // ManagedClusterStatus defines the observed state of ManagedCluster type ManagedClusterStatus struct { // Currently compatible K8S version of the cluster. Being set only if - // the corresponding ClusterTemplate provided it in the spec. + // provided by the corresponding ClusterTemplate. KubertenesVersion string `json:"k8sVersion,omitempty"` + // Providers represent exposed CAPI providers with constrainted compatibility versions set. + // Propagated from the corresponding ClusterTemplate. + Providers ProvidersTupled `json:"providers,omitempty"` // Conditions contains details for the current state of the ManagedCluster Conditions []metav1.Condition `json:"conditions,omitempty"` // ObservedGeneration is the last observed generation. diff --git a/api/v1alpha1/providertemplate_types.go b/api/v1alpha1/providertemplate_types.go index e35cbd7ff..c1326b71c 100644 --- a/api/v1alpha1/providertemplate_types.go +++ b/api/v1alpha1/providertemplate_types.go @@ -21,6 +21,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// ChartAnnotationCAPIVersion is an annotation containing the CAPI exact version in the SemVer format associated with a ProviderTemplate. +const ChartAnnotationCAPIVersion = "hmc.mirantis.com/capi-version" + // ProviderTemplateSpec defines the desired state of ProviderTemplate type ProviderTemplateSpec struct { Helm HelmSpec `json:"helm"` @@ -59,6 +62,20 @@ func (t *ProviderTemplate) FillStatusWithProviders(annotations map[string]string return fmt.Errorf("failed to parse ProviderTemplate infrastructure providers: %v", err) } + capiVersion := annotations[ChartAnnotationCAPIVersion] + if t.Spec.CAPIVersion != "" { + capiVersion = t.Spec.CAPIVersion + } + if capiVersion == "" { + return nil + } + + if _, err := semver.NewVersion(capiVersion); err != nil { + return fmt.Errorf("failed to parse CAPI version %s: %w", capiVersion, err) + } + + t.Status.CAPIVersion = capiVersion + return nil } @@ -67,11 +84,6 @@ func (t *ProviderTemplate) GetSpecProviders() ProvidersTupled { return t.Spec.Providers } -// GetStatusProviders returns .status.providers of the Template. -func (t *ProviderTemplate) GetStatusProviders() ProvidersTupled { - return t.Status.Providers -} - // GetHelmSpec returns .spec.helm of the Template. func (t *ProviderTemplate) GetHelmSpec() *HelmSpec { return &t.Spec.Helm diff --git a/api/v1alpha1/servicetemplate_types.go b/api/v1alpha1/servicetemplate_types.go index 10c3152fc..bf2890ccd 100644 --- a/api/v1alpha1/servicetemplate_types.go +++ b/api/v1alpha1/servicetemplate_types.go @@ -15,12 +15,19 @@ package v1alpha1 import ( + "fmt" "strings" + "github.com/Masterminds/semver/v3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ServiceTemplateKind = "ServiceTemplate" +const ( + // Denotes the servicetemplate resource Kind. + ServiceTemplateKind = "ServiceTemplate" + // ChartAnnotationKubernetesConstraint is an annotation containing the Kubernetes constrainted version in the SemVer format associated with a ServiceTemplate. + ChartAnnotationKubernetesConstraint = "hmc.mirantis.com/k8s-version-constraint" +) // ServiceTemplateSpec defines the desired state of ServiceTemplate type ServiceTemplateSpec struct { @@ -43,28 +50,23 @@ type ServiceTemplateStatus struct { // FillStatusWithProviders sets the status of the template with providers // either from the spec or from the given annotations. -// -// The return parameter is noop and is always nil. func (t *ServiceTemplate) FillStatusWithProviders(annotations map[string]string) error { parseProviders := func(typ providersType) []string { var ( - pspec, pstatus []string - anno string + pspec []string + anno string ) switch typ { case bootstrapProvidersType: - pspec, pstatus = t.Spec.Providers.BootstrapProviders, t.Status.Providers.BootstrapProviders - anno = ChartAnnotationBootstrapProviders + pspec, anno = t.Spec.Providers.BootstrapProviders, ChartAnnotationBootstrapProviders case controlPlaneProvidersType: - pspec, pstatus = t.Spec.Providers.ControlPlaneProviders, t.Status.Providers.ControlPlaneProviders - anno = ChartAnnotationControlPlaneProviders + pspec, anno = t.Spec.Providers.ControlPlaneProviders, ChartAnnotationControlPlaneProviders case infrastructureProvidersType: - pspec, pstatus = t.Spec.Providers.InfrastructureProviders, t.Status.Providers.InfrastructureProviders - anno = ChartAnnotationInfraProviders + pspec, anno = t.Spec.Providers.InfrastructureProviders, ChartAnnotationInfraProviders } if len(pspec) > 0 { - return pstatus + return pspec } providers := annotations[anno] @@ -72,13 +74,35 @@ func (t *ServiceTemplate) FillStatusWithProviders(annotations map[string]string) return []string{} } - return strings.Split(providers, ",") + splitted := strings.Split(providers, ",") + result := make([]string, 0, len(splitted)) + for _, v := range splitted { + if c := strings.TrimSpace(v); c != "" { + result = append(result, c) + } + } + + return result } t.Status.Providers.BootstrapProviders = parseProviders(bootstrapProvidersType) t.Status.Providers.ControlPlaneProviders = parseProviders(controlPlaneProvidersType) t.Status.Providers.InfrastructureProviders = parseProviders(infrastructureProvidersType) + kconstraint := annotations[ChartAnnotationKubernetesConstraint] + if t.Spec.KubertenesConstraint != "" { + kconstraint = t.Spec.KubertenesConstraint + } + if kconstraint == "" { + return nil + } + + if _, err := semver.NewConstraint(kconstraint); err != nil { + return fmt.Errorf("failed to parse kubernetes constraint %s: %w", kconstraint, err) + } + + t.Status.KubertenesConstraint = kconstraint + return nil } diff --git a/api/v1alpha1/templates_common.go b/api/v1alpha1/templates_common.go index 5ea92ccaa..36fd1f167 100644 --- a/api/v1alpha1/templates_common.go +++ b/api/v1alpha1/templates_common.go @@ -85,14 +85,10 @@ const ( infrastructureProvidersType ) -func parseProviders[T any](providersGetter interface { - GetSpecProviders() ProvidersTupled - GetStatusProviders() ProvidersTupled -}, typ providersType, annotations map[string]string, validationFn func(string) (T, error), -) ([]ProviderTuple, error) { - pspec, pstatus, anno := getProvidersSpecStatusAnno(providersGetter, typ) +func parseProviders[T any](providersGetter interface{ GetSpecProviders() ProvidersTupled }, typ providersType, annotations map[string]string, validationFn func(string) (T, error)) ([]ProviderTuple, error) { + pspec, anno := getProvidersSpecAnno(providersGetter, typ) if len(pspec) > 0 { - return pstatus, nil + return pspec, nil } providers := annotations[anno] @@ -102,13 +98,12 @@ func parseProviders[T any](providersGetter interface { var ( splitted = strings.Split(providers, ",") + pstatus = make([]ProviderTuple, 0, len(splitted)) merr error ) - - pstatus = make([]ProviderTuple, 0, len(splitted)) - for _, v := range splitted { - nVerOrC := strings.SplitN(v, " ", 1) + v = strings.TrimSpace(v) + nVerOrC := strings.SplitN(v, " ", 2) if len(nVerOrC) == 0 { // BCE (bound check elimination) continue } @@ -121,30 +116,26 @@ func parseProviders[T any](providersGetter interface { ver := strings.TrimSpace(nVerOrC[1]) if _, err := validationFn(ver); err != nil { // validation - merr = errors.Join(merr, fmt.Errorf("failed to parse version %s in the %s: %v", ver, v, err)) + merr = errors.Join(merr, fmt.Errorf("failed to parse %s in the %s: %v", ver, v, err)) continue } - n.VersionOrContraint = ver + n.VersionOrConstraint = ver pstatus = append(pstatus, n) } return pstatus, merr } -func getProvidersSpecStatusAnno(providersGetter interface { - GetSpecProviders() ProvidersTupled - GetStatusProviders() ProvidersTupled -}, typ providersType, -) (spec, status []ProviderTuple, annotation string) { +func getProvidersSpecAnno(providersGetter interface{ GetSpecProviders() ProvidersTupled }, typ providersType) (spec []ProviderTuple, annotation string) { switch typ { case bootstrapProvidersType: - return providersGetter.GetSpecProviders().BootstrapProviders, providersGetter.GetStatusProviders().BootstrapProviders, ChartAnnotationBootstrapProviders + return providersGetter.GetSpecProviders().BootstrapProviders, ChartAnnotationBootstrapProviders case controlPlaneProvidersType: - return providersGetter.GetSpecProviders().ControlPlaneProviders, providersGetter.GetStatusProviders().ControlPlaneProviders, ChartAnnotationControlPlaneProviders + return providersGetter.GetSpecProviders().ControlPlaneProviders, ChartAnnotationControlPlaneProviders case infrastructureProvidersType: - return providersGetter.GetSpecProviders().InfrastructureProviders, providersGetter.GetStatusProviders().InfrastructureProviders, ChartAnnotationInfraProviders + return providersGetter.GetSpecProviders().InfrastructureProviders, ChartAnnotationInfraProviders default: - return []ProviderTuple{}, []ProviderTuple{}, "" + return []ProviderTuple{}, "" } } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a0d3c518a..0060cf256 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -488,6 +488,7 @@ func (in *ManagedClusterSpec) DeepCopy() *ManagedClusterSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedClusterStatus) DeepCopyInto(out *ManagedClusterStatus) { *out = *in + in.Providers.DeepCopyInto(&out.Providers) if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) diff --git a/go.mod b/go.mod index 7e6c43301..4bdaa60c8 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,6 @@ require ( github.com/fluxcd/pkg/apis/meta v1.6.1 github.com/fluxcd/pkg/runtime v0.49.1 github.com/fluxcd/source-controller/api v1.4.1 - github.com/go-logr/logr v1.4.2 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 github.com/onsi/ginkgo/v2 v2.20.2 @@ -72,6 +71,7 @@ require ( github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index 24476c1ff..86a05fb4c 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -26,7 +26,6 @@ import ( fluxmeta "github.com/fluxcd/pkg/apis/meta" fluxconditions "github.com/fluxcd/pkg/runtime/conditions" sourcev1 "github.com/fluxcd/source-controller/api/v1" - "github.com/go-logr/logr" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart" corev1 "k8s.io/api/core/v1" @@ -38,7 +37,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" @@ -107,7 +105,7 @@ func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque if !managedCluster.DeletionTimestamp.IsZero() { l.Info("Deleting ManagedCluster") - return r.Delete(ctx, l, managedCluster) + return r.Delete(ctx, managedCluster) } if managedCluster.Status.ObservedGeneration == 0 { @@ -121,10 +119,13 @@ func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque l.Error(err, "Failed to track ManagedCluster creation") } } - return r.Update(ctx, l, managedCluster) + + return r.Update(ctx, managedCluster) } -func (r *ManagedClusterReconciler) setStatusFromClusterStatus(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (bool, error) { +func (r *ManagedClusterReconciler) setStatusFromClusterStatus(ctx context.Context, managedCluster *hmc.ManagedCluster) (requeue bool, _ error) { + l := ctrl.LoggerFrom(ctx) + resourceID := schema.GroupVersionResource{ Group: "cluster.x-k8s.io", Version: "v1beta1", @@ -181,7 +182,9 @@ func (r *ManagedClusterReconciler) setStatusFromClusterStatus(ctx context.Contex return !allConditionsComplete, nil } -func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (result ctrl.Result, err error) { +func (r *ManagedClusterReconciler) Update(ctx context.Context, managedCluster *hmc.ManagedCluster) (result ctrl.Result, err error) { + l := ctrl.LoggerFrom(ctx) + finalizersUpdated := controllerutil.AddFinalizer(managedCluster, hmc.ManagedClusterFinalizer) if finalizersUpdated { if err := r.Client.Update(ctx, managedCluster); err != nil { @@ -214,6 +217,7 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma }) return ctrl.Result{}, err } + if !template.Status.Valid { errMsg := "provided template is not marked as valid" apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ @@ -224,12 +228,17 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma }) return ctrl.Result{}, errors.New(errMsg) } + // template is ok, propagate data from it + managedCluster.Status.KubertenesVersion = template.Status.KubertenesVersion + managedCluster.Status.Providers = template.Status.Providers + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.TemplateReadyCondition, Status: metav1.ConditionTrue, Reason: hmc.SucceededReason, Message: "Template is valid", }) + source, err := r.getSource(ctx, template.Status.ChartRef) if err != nil { apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ @@ -347,7 +356,7 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma }) } - requeue, err := r.setStatusFromClusterStatus(ctx, l, managedCluster) + requeue, err := r.setStatusFromClusterStatus(ctx, managedCluster) if err != nil { if requeue { return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err @@ -387,7 +396,7 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M } tmpl := &hmc.ServiceTemplate{} - tmplRef := types.NamespacedName{Name: svc.Template, Namespace: mc.Namespace} + tmplRef := client.ObjectKey{Name: svc.Template, Namespace: mc.Namespace} if err := r.Get(ctx, tmplRef, tmpl); err != nil { return ctrl.Result{}, fmt.Errorf("failed to get Template (%s): %w", tmplRef.String(), err) } @@ -427,7 +436,7 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M }) } - if _, err := sveltos.ReconcileProfile(ctx, r.Client, l, mc.Namespace, mc.Name, + if _, err := sveltos.ReconcileProfile(ctx, r.Client, mc.Namespace, mc.Name, map[string]string{ hmc.FluxHelmChartNamespaceKey: mc.Namespace, hmc.FluxHelmChartNameKey: mc.Name, @@ -457,14 +466,14 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M // getServiceTemplateSource returns the source (HelmRepository) used by the ServiceTemplate. // It is fetched by querying for ServiceTemplate -> HelmChart -> HelmRepository. func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, tmpl *hmc.ServiceTemplate) (*sourcev1.HelmRepository, error) { - tmplRef := types.NamespacedName{Namespace: tmpl.Namespace, Name: tmpl.Name} + tmplRef := client.ObjectKey{Namespace: tmpl.Namespace, Name: tmpl.Name} if tmpl.Status.ChartRef == nil { return nil, fmt.Errorf("status for ServiceTemplate (%s) has not been updated yet", tmplRef.String()) } hc := &sourcev1.HelmChart{} - if err := r.Get(ctx, types.NamespacedName{ + if err := r.Get(ctx, client.ObjectKey{ Namespace: tmpl.Status.ChartRef.Namespace, Name: tmpl.Status.ChartRef.Name, }, hc); err != nil { @@ -472,7 +481,7 @@ func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, } repo := &sourcev1.HelmRepository{} - if err := r.Get(ctx, types.NamespacedName{ + if err := r.Get(ctx, client.ObjectKey{ // Using chart's namespace because it's source // (helm repository in this case) should be within the same namespace. Namespace: hc.Namespace, @@ -552,7 +561,9 @@ func (r *ManagedClusterReconciler) getSource(ctx context.Context, ref *hcv2.Cros return &hc, nil } -func (r *ManagedClusterReconciler) Delete(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (ctrl.Result, error) { +func (r *ManagedClusterReconciler) Delete(ctx context.Context, managedCluster *hmc.ManagedCluster) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + hr := &hcv2.HelmRelease{} err := r.Get(ctx, client.ObjectKey{ Name: managedCluster.Name, @@ -632,7 +643,7 @@ func (r *ManagedClusterReconciler) getProviders(ctx context.Context, templateNam template := &hmc.ClusterTemplate{} templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} if err := r.Get(ctx, templateRef, template); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "Failed to get ClusterTemplate", "namespace", templateNamespace, "name", templateName) + ctrl.LoggerFrom(ctx).Error(err, "Failed to get ClusterTemplate", "template namespace", templateNamespace, "template name", templateName) return nil, err } diff --git a/internal/controller/template_controller.go b/internal/controller/template_controller.go index 8b1cf9799..e92b8eeab 100644 --- a/internal/controller/template_controller.go +++ b/internal/controller/template_controller.go @@ -143,7 +143,7 @@ func (r *TemplateReconciler) ReconcileTemplate(ctx context.Context, template tem } err := helm.ReconcileHelmRepository(ctx, r.Client, defaultRepoName, namespace, r.DefaultRegistryConfig.HelmRepositorySpec()) if err != nil { - l.Error(err, "Failed to reconcile default HelmRepository", "namespace", template.GetNamespace()) + l.Error(err, "Failed to reconcile default HelmRepository") return ctrl.Result{}, err } } diff --git a/internal/controller/templatemanagement_controller.go b/internal/controller/templatemanagement_controller.go index 40f69a2b8..571418563 100644 --- a/internal/controller/templatemanagement_controller.go +++ b/internal/controller/templatemanagement_controller.go @@ -248,7 +248,7 @@ func (r *TemplateManagementReconciler) createTemplateChain(ctx context.Context, } return err } - l.Info(fmt.Sprintf("%s was successfully created", source.Kind()), "namespace", targetNamespace, "name", source.GetName()) + l.Info(fmt.Sprintf("%s was successfully created", source.Kind()), "target namespace", targetNamespace, "source name", source.GetName()) return nil } @@ -262,7 +262,7 @@ func (r *TemplateManagementReconciler) deleteTemplateChain(ctx context.Context, } return err } - l.Info(fmt.Sprintf("%s was successfully deleted", chain.Kind()), "namespace", chain.GetNamespace(), "name", chain.GetName()) + l.Info(fmt.Sprintf("%s was successfully deleted", chain.Kind()), "chain namespace", chain.GetNamespace(), "chain name", chain.GetName()) return nil } diff --git a/internal/sveltos/profile.go b/internal/sveltos/profile.go index 500b40dab..7c4f60ccf 100644 --- a/internal/sveltos/profile.go +++ b/internal/sveltos/profile.go @@ -20,7 +20,6 @@ import ( "math" hmc "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/go-logr/logr" sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -53,12 +52,13 @@ type HelmChartOpts struct { // ReconcileProfile reconciles a Sveltos Profile object. func ReconcileProfile(ctx context.Context, cl client.Client, - l logr.Logger, namespace string, name string, matchLabels map[string]string, opts ReconcileProfileOpts, ) (*sveltosv1beta1.Profile, error) { + l := ctrl.LoggerFrom(ctx) + cp := &sveltosv1beta1.Profile{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, diff --git a/internal/webhook/managedcluster_webhook.go b/internal/webhook/managedcluster_webhook.go index 113a89f60..c4dd0ac21 100644 --- a/internal/webhook/managedcluster_webhook.go +++ b/internal/webhook/managedcluster_webhook.go @@ -18,8 +18,11 @@ import ( "context" "errors" "fmt" + "slices" "sort" + "github.com/Masterminds/semver/v3" + admissionv1 "k8s.io/api/admission/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -28,19 +31,19 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "github.com/Mirantis/hmc/api/v1alpha1" + hmcv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" ) type ManagedClusterValidator struct { client.Client } -var errInvalidManagedCluster = errors.New("the ManagedCluster is invalid") +const invalidManagedClusterMsg = "the ManagedCluster is invalid" func (v *ManagedClusterValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { v.Client = mgr.GetClient() return ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.ManagedCluster{}). + For(&hmcv1alpha1.ManagedCluster{}). WithValidator(v). WithDefaulter(v). Complete() @@ -53,38 +56,95 @@ var ( // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (v *ManagedClusterValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - managedCluster, ok := obj.(*v1alpha1.ManagedCluster) + managedCluster, ok := obj.(*hmcv1alpha1.ManagedCluster) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", obj)) } + template, err := v.getManagedClusterTemplate(ctx, managedCluster.Namespace, managedCluster.Spec.Template) if err != nil { - return nil, fmt.Errorf("%s: %v", errInvalidManagedCluster, err) + return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } - err = v.isTemplateValid(ctx, template) - if err != nil { - return nil, fmt.Errorf("%s: %v", errInvalidManagedCluster, err) + + if err := v.isTemplateValid(ctx, template); err != nil { + return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } + return nil, nil } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (v *ManagedClusterValidator) ValidateUpdate(ctx context.Context, _ runtime.Object, newObj runtime.Object) (admission.Warnings, error) { - newManagedCluster, ok := newObj.(*v1alpha1.ManagedCluster) + newManagedCluster, ok := newObj.(*hmcv1alpha1.ManagedCluster) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", newObj)) } + template, err := v.getManagedClusterTemplate(ctx, newManagedCluster.Namespace, newManagedCluster.Spec.Template) if err != nil { - return nil, fmt.Errorf("%s: %v", errInvalidManagedCluster, err) + return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } - err = v.isTemplateValid(ctx, template) - if err != nil { - return nil, fmt.Errorf("%s: %v", errInvalidManagedCluster, err) + + if err := v.isTemplateValid(ctx, template); err != nil { + return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } + + if err := validateK8sCompatibility(ctx, v.Client, newManagedCluster); err != nil { + return admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, fmt.Errorf("failed to validate k8s compatibility: %v", err) + } + return nil, nil } +func validateK8sCompatibility(ctx context.Context, cl client.Client, mc *hmcv1alpha1.ManagedCluster) error { + if len(mc.Spec.Services) == 0 || mc.Status.KubertenesVersion == "" { + return nil + } + + svcTpls := new(hmcv1alpha1.ServiceTemplateList) + if err := cl.List(ctx, svcTpls, client.InNamespace(mc.Namespace)); err != nil { + return fmt.Errorf("failed to list ServiceTemplates in %s namespace: %w", mc.Namespace, err) + } + + svcTplName2KConstraint := make(map[string]string, len(svcTpls.Items)) + for _, v := range svcTpls.Items { + svcTplName2KConstraint[v.Name] = v.Status.KubertenesConstraint + } + + mcVersion, err := semver.NewVersion(mc.Status.KubertenesVersion) + if err != nil { // should never happen + return fmt.Errorf("failed to parse k8s version %s of the ManagedCluster %s/%s: %w", mc.Status.KubertenesVersion, mc.Namespace, mc.Name, err) + } + + for _, v := range mc.Spec.Services { + if v.Disable { + continue + } + + kc, ok := svcTplName2KConstraint[v.Template] + if !ok { + return fmt.Errorf("specified ServiceTemplate %s/%s is missing in the cluster", mc.Namespace, v.Template) + } + + if kc == "" { + continue + } + + tplConstraint, err := semver.NewConstraint(kc) + if err != nil { // should never happen + return fmt.Errorf("failed to parse k8s constrainted version %s of the ServiceTemplate %s/%s: %w", kc, mc.Namespace, v.Template, err) + } + + if !tplConstraint.Check(mcVersion) { + return fmt.Errorf("k8s version %s of the ManagedCluster %s/%s does not satisfy constrainted version %s from the ServiceTemplate %s/%s", + mc.Status.KubertenesVersion, mc.Namespace, mc.Name, + kc, mc.Namespace, v.Template) + } + } + + return nil +} + // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (*ManagedClusterValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { return nil, nil @@ -92,94 +152,168 @@ func (*ManagedClusterValidator) ValidateDelete(_ context.Context, _ runtime.Obje // Default implements webhook.Defaulter so a webhook will be registered for the type. func (v *ManagedClusterValidator) Default(ctx context.Context, obj runtime.Object) error { - managedCluster, ok := obj.(*v1alpha1.ManagedCluster) + managedCluster, ok := obj.(*hmcv1alpha1.ManagedCluster) if !ok { return apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", obj)) } - // Only apply defaults when there's no configuration provided - if managedCluster.Spec.Config != nil { + // Only apply defaults when there's no configuration provided; + // if template ref is empty, then nothing to default + if managedCluster.Spec.Config != nil || managedCluster.Spec.Template == "" { return nil } + template, err := v.getManagedClusterTemplate(ctx, managedCluster.Namespace, managedCluster.Spec.Template) if err != nil { - return fmt.Errorf("could not get template for the managedcluster: %s", err) + return fmt.Errorf("could not get template for the managedcluster: %v", err) } - err = v.isTemplateValid(ctx, template) - if err != nil { - return fmt.Errorf("template is invalid: %s", err) + + if err := v.isTemplateValid(ctx, template); err != nil { + return fmt.Errorf("template is invalid: %v", err) } + if template.Status.Config == nil { return nil } + managedCluster.Spec.DryRun = true managedCluster.Spec.Config = &apiextensionsv1.JSON{Raw: template.Status.Config.Raw} + return nil } -func (v *ManagedClusterValidator) getManagedClusterTemplate(ctx context.Context, templateNamespace, templateName string) (*v1alpha1.ClusterTemplate, error) { - template := &v1alpha1.ClusterTemplate{} - templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} - if err := v.Get(ctx, templateRef, template); err != nil { - return nil, err - } - return template, nil +func (v *ManagedClusterValidator) getManagedClusterTemplate(ctx context.Context, templateNamespace, templateName string) (tpl *hmcv1alpha1.ClusterTemplate, err error) { + tpl = new(hmcv1alpha1.ClusterTemplate) + return tpl, v.Get(ctx, client.ObjectKey{Namespace: templateNamespace, Name: templateName}, tpl) } -func (v *ManagedClusterValidator) isTemplateValid(ctx context.Context, template *v1alpha1.ClusterTemplate) error { +func (v *ManagedClusterValidator) isTemplateValid(ctx context.Context, template *hmcv1alpha1.ClusterTemplate) error { if !template.Status.Valid { return fmt.Errorf("the template is not valid: %s", template.Status.ValidationError) } - err := v.verifyProviders(ctx, template) - if err != nil { - return fmt.Errorf("providers verification failed: %v", err) + + if err := v.verifyProviders(ctx, template); err != nil { + return fmt.Errorf("failed to verify providers: %v", err) } + return nil } -func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template *v1alpha1.ClusterTemplate) error { - requiredProviders := template.Status.Providers - management := &v1alpha1.Management{} - managementRef := client.ObjectKey{Name: v1alpha1.ManagementName} - if err := v.Get(ctx, managementRef, management); err != nil { +func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template *hmcv1alpha1.ClusterTemplate) error { + management := new(hmcv1alpha1.Management) + if err := v.Get(ctx, client.ObjectKey{Name: hmcv1alpha1.ManagementName}, management); err != nil { return err } - exposedProviders := management.Status.AvailableProviders - missingProviders := make(map[string][]string) - missingProviders["bootstrap"] = getMissingProviders(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) - missingProviders["control plane"] = getMissingProviders(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) - missingProviders["infrastructure"] = getMissingProviders(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) + const ( + bootstrapProviderType = "bootstrap" + controlPlateProviderType = "control plane" + infraProviderType = "infrastructure" + ) - var errs []error - for providerType, missing := range missingProviders { - if len(missing) > 0 { - sort.Slice(missing, func(i, j int) bool { - return missing[i] < missing[j] - }) - errs = append(errs, fmt.Errorf("one or more required %s providers are not deployed yet: %v", providerType, missing)) + var ( + exposedProviders = management.Status.AvailableProviders + requiredProviders = template.Status.Providers + + missingBootstrap, missingCP, missingInfra []string + wrongVersionProviders map[string][]string + ) + + // on update we have to validate versions between exact the provider tpl and constraints from the cluster tpl + if req, _ := admission.RequestFromContext(ctx); req.Operation == admissionv1.Update { + wrongVersionProviders = make(map[string][]string, 3) + missing, wrongVers, err := getMissingProvidersWithWrongVersions(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) + if err != nil { + return err } + wrongVersionProviders[bootstrapProviderType], missingBootstrap = wrongVers, missing + + missing, wrongVers, err = getMissingProvidersWithWrongVersions(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) + if err != nil { + return err + } + wrongVersionProviders[controlPlateProviderType], missingCP = wrongVers, missing + + missing, wrongVers, err = getMissingProvidersWithWrongVersions(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) + if err != nil { + return err + } + wrongVersionProviders[infraProviderType], missingInfra = wrongVers, missing + } else { + missingBootstrap = getMissingProviders(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) + missingCP = getMissingProviders(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) + missingInfra = getMissingProviders(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) } + + missingProviders := map[string][]string{ + bootstrapProviderType: missingBootstrap, + controlPlateProviderType: missingCP, + infraProviderType: missingInfra, + } + + errs := collectErrors(missingProviders, "one or more required %s providers are not deployed yet: %v") + errs = append(errs, collectErrors(wrongVersionProviders, "one or more required %s providers does not satisfy constraints: %v")...) if len(errs) > 0 { sort.Slice(errs, func(i, j int) bool { return errs[i].Error() < errs[j].Error() }) + return errors.Join(errs...) } + return nil } -func getMissingProviders(exposedProviders, requiredProviders []v1alpha1.ProviderTuple) (missing []string) { - exposedSet := make(map[string]struct{}, len(requiredProviders)) - for _, v := range exposedProviders { - exposedSet[v.Name] = struct{}{} +func collectErrors(m map[string][]string, msgFormat string) (errs []error) { + for providerType, missing := range m { + if len(missing) > 0 { + slices.Sort(missing) + errs = append(errs, fmt.Errorf(msgFormat, providerType, missing)) + } + } + + return errs +} + +func getMissingProviders(exposed, required []hmcv1alpha1.ProviderTuple) (missing []string) { + missing, _, _ = getMissingProvidersWithWrongVersions(exposed, required) + return missing +} + +func getMissingProvidersWithWrongVersions(exposed, required []hmcv1alpha1.ProviderTuple) (missing, nonSatisfying []string, _ error) { + exposedSet := make(map[string]hmcv1alpha1.ProviderTuple, len(exposed)) + for _, v := range exposed { + exposedSet[v.Name] = v } - for _, v := range requiredProviders { - if _, ok := exposedSet[v.Name]; !ok { - missing = append(missing, v.Name) + var merr error + for _, reqWithConstraint := range required { + exposedWithExactVer, ok := exposedSet[reqWithConstraint.Name] + if !ok { + missing = append(missing, reqWithConstraint.Name) + continue + } + + if exposedWithExactVer.VersionOrConstraint == "" || reqWithConstraint.VersionOrConstraint == "" { + continue + } + + exactVer, err := semver.NewVersion(exposedWithExactVer.VersionOrConstraint) + if err != nil { + merr = errors.Join(merr, fmt.Errorf("failed to parse version %s of the provider %s: %w", exposedWithExactVer.VersionOrConstraint, exposedWithExactVer.Name, err)) + continue + } + + requiredC, err := semver.NewConstraint(reqWithConstraint.VersionOrConstraint) + if err != nil { + merr = errors.Join(merr, fmt.Errorf("failed to parse constraint %s of the provider %s: %w", exposedWithExactVer.VersionOrConstraint, exposedWithExactVer.Name, err)) + continue + } + + if !requiredC.Check(exactVer) { + nonSatisfying = append(nonSatisfying, fmt.Sprintf("%s %s !~ %s", reqWithConstraint.Name, exposedWithExactVer.VersionOrConstraint, reqWithConstraint.VersionOrConstraint)) } } - return missing + return missing, nonSatisfying, merr } diff --git a/internal/webhook/managedcluster_webhook_test.go b/internal/webhook/managedcluster_webhook_test.go index 32557cfea..e5945223e 100644 --- a/internal/webhook/managedcluster_webhook_test.go +++ b/internal/webhook/managedcluster_webhook_test.go @@ -20,6 +20,7 @@ import ( "testing" . "github.com/onsi/gomega" + admissionv1 "k8s.io/api/admission/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -57,7 +58,7 @@ var ( }, { name: "should fail if the ClusterTemplate is not found in the ManagedCluster's namespace", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -69,7 +70,7 @@ var ( }, { name: "should fail if the cluster template was found but is invalid (some validation error)", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -84,7 +85,7 @@ var ( }, { name: "should fail if one or more requested providers are not available yet", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ management.NewManagement( management.WithAvailableProviders(v1alpha1.ProvidersTupled{ @@ -102,11 +103,11 @@ var ( template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: "the ManagedCluster is invalid: providers verification failed: one or more required control plane providers are not deployed yet: [k0s]\none or more required infrastructure providers are not deployed yet: [azure]", + err: "the ManagedCluster is invalid: failed to verify providers: one or more required control plane providers are not deployed yet: [k0s]\none or more required infrastructure providers are not deployed yet: [azure]", }, { name: "should succeed", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -126,7 +127,11 @@ var ( func TestManagedClusterValidateCreate(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := admission.NewContextWithRequest(context.Background(), admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }) for _, tt := range createAndUpdateTests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() @@ -140,11 +145,8 @@ func TestManagedClusterValidateCreate(t *testing.T) { } else { g.Expect(err).To(Succeed()) } - if len(tt.warnings) > 0 { - g.Expect(warn).To(Equal(tt.warnings)) - } else { - g.Expect(warn).To(BeEmpty()) - } + + g.Expect(warn).To(Equal(tt.warnings)) }) } } @@ -152,8 +154,81 @@ func TestManagedClusterValidateCreate(t *testing.T) { func TestManagedClusterValidateUpdate(t *testing.T) { g := NewWithT(t) - ctx := context.Background() - for _, tt := range createAndUpdateTests { + updateTests := append(createAndUpdateTests[:0:0], createAndUpdateTests...) + updateTests = append(updateTests, []struct { + name string + managedCluster *v1alpha1.ManagedCluster + existingObjects []runtime.Object + err string + warnings admission.Warnings + }{ + { + name: "provider template versions does not satisfy cluster template constraints", + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + existingObjects: []runtime.Object{ + management.NewManagement(management.WithAvailableProviders(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws", VersionOrConstraint: "v1.0.0"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, + })), + template.NewClusterTemplate( + template.WithName(testTemplateName), + template.WithProvidersStatus(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws", VersionOrConstraint: ">=999.0.0"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: ">=999.0.0"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: ">=999.0.0"}}, + }), + template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), + ), + }, + err: `the ManagedCluster is invalid: failed to verify providers: one or more required bootstrap providers does not satisfy constraints: [k0s v1.0.0 !~ >=999.0.0] +one or more required control plane providers does not satisfy constraints: [k0s v1.0.0 !~ >=999.0.0] +one or more required infrastructure providers does not satisfy constraints: [aws v1.0.0 !~ >=999.0.0]`, + }, + { + name: "cluster template k8s version does not satisfy service template constraints", + managedCluster: managedcluster.NewManagedCluster( + managedcluster.WithClusterTemplate(testTemplateName), + managedcluster.WithK8sVersionStatus("v1.30.0"), + managedcluster.WithServiceTemplate(testTemplateName), + ), + existingObjects: []runtime.Object{ + management.NewManagement(management.WithAvailableProviders(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws", VersionOrConstraint: "v1.0.0"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, + })), + template.NewClusterTemplate( + template.WithName(testTemplateName), + template.WithProvidersStatus(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, + }), + template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), + ), + template.NewServiceTemplate( + template.WithName(testTemplateName), + template.WithProvidersStatus(v1alpha1.Providers{ + InfrastructureProviders: []string{"aws"}, + BootstrapProviders: []string{"k0s"}, + ControlPlaneProviders: []string{"k0s"}, + }), + template.WithServiceK8sConstraint("<1.30"), + template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), + ), + }, + err: fmt.Sprintf(`failed to validate k8s compatibility: k8s version v1.30.0 of the ManagedCluster default/managedcluster does not satisfy constrainted version <1.30 from the ServiceTemplate default/%s`, testTemplateName), + warnings: admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, + }, + }...) + + ctx := admission.NewContextWithRequest(context.Background(), admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + }, + }) + for _, tt := range updateTests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() validator := &ManagedClusterValidator{Client: c} @@ -166,11 +241,8 @@ func TestManagedClusterValidateUpdate(t *testing.T) { } else { g.Expect(err).To(Succeed()) } - if len(tt.warnings) > 0 { - g.Expect(warn).To(Equal(tt.warnings)) - } else { - g.Expect(warn).To(BeEmpty()) - } + + g.Expect(warn).To(Equal(tt.warnings)) }) } } @@ -196,8 +268,8 @@ func TestManagedClusterDefault(t *testing.T) { }, { name: "should not set defaults: template is invalid", - input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), - output: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + output: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -212,8 +284,8 @@ func TestManagedClusterDefault(t *testing.T) { }, { name: "should not set defaults: config in template status is unset", - input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), - output: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + output: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -224,9 +296,9 @@ func TestManagedClusterDefault(t *testing.T) { }, { name: "should set defaults", - input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), output: managedcluster.NewManagedCluster( - managedcluster.WithTemplate(testTemplateName), + managedcluster.WithClusterTemplate(testTemplateName), managedcluster.WithConfig(managedClusterConfig), managedcluster.WithDryRun(true), ), diff --git a/internal/webhook/template_webhook.go b/internal/webhook/template_webhook.go index 298aee065..a57343ef7 100644 --- a/internal/webhook/template_webhook.go +++ b/internal/webhook/template_webhook.go @@ -20,7 +20,6 @@ import ( "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -68,13 +67,10 @@ func (v *ClusterTemplateValidator) ValidateDelete(ctx context.Context, obj runti } managedClusters := &v1alpha1.ManagedClusterList{} - listOptions := client.ListOptions{ - FieldSelector: fields.SelectorFromSet(fields.Set{v1alpha1.TemplateKey: template.Name}), - Limit: 1, - Namespace: template.Namespace, - } - err := v.Client.List(ctx, managedClusters, &listOptions) - if err != nil { + if err := v.Client.List(ctx, managedClusters, + client.InNamespace(template.Namespace), + client.MatchingFields{v1alpha1.TemplateKey: template.Name}, + client.Limit(1)); err != nil { return nil, err } @@ -126,11 +122,10 @@ func (v *ServiceTemplateValidator) ValidateDelete(ctx context.Context, obj runti } managedClusters := &v1alpha1.ManagedClusterList{} - if err := v.Client.List(ctx, managedClusters, &client.ListOptions{ - FieldSelector: fields.SelectorFromSet(fields.Set{v1alpha1.ServicesTemplateKey: tmpl.Name}), - Limit: 1, - Namespace: tmpl.Namespace, - }); err != nil { + if err := v.Client.List(ctx, managedClusters, + client.InNamespace(tmpl.Namespace), + client.MatchingFields{v1alpha1.ServicesTemplateKey: tmpl.Name}, + client.Limit(1)); err != nil { return nil, err } diff --git a/internal/webhook/template_webhook_test.go b/internal/webhook/template_webhook_test.go index 9db759593..44938a4cc 100644 --- a/internal/webhook/template_webhook_test.go +++ b/internal/webhook/template_webhook_test.go @@ -47,7 +47,7 @@ func TestClusterTemplateValidateDelete(t *testing.T) { template: tpl, existingObjects: []runtime.Object{managedcluster.NewManagedCluster( managedcluster.WithNamespace(namespace), - managedcluster.WithTemplate(tpl.Name), + managedcluster.WithClusterTemplate(tpl.Name), )}, warnings: admission.Warnings{"The ClusterTemplate object can't be removed if ManagedCluster objects referencing it still exist"}, err: "template deletion is forbidden", @@ -57,7 +57,7 @@ func TestClusterTemplateValidateDelete(t *testing.T) { template: tpl, existingObjects: []runtime.Object{managedcluster.NewManagedCluster( managedcluster.WithNamespace("new"), - managedcluster.WithTemplate(tpl.Name), + managedcluster.WithClusterTemplate(tpl.Name), )}, }, { @@ -68,7 +68,7 @@ func TestClusterTemplateValidateDelete(t *testing.T) { { name: "should succeed", template: template.NewClusterTemplate(), - existingObjects: []runtime.Object{managedcluster.NewManagedCluster(managedcluster.WithTemplate(tplTest.Name))}, + existingObjects: []runtime.Object{managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(tplTest.Name))}, }, } diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml index 4dc8aadd6..91bcd8de5 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml @@ -122,7 +122,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -138,7 +138,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -154,7 +154,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -230,7 +230,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -246,7 +246,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -262,7 +262,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index 37d55cab1..e1fa118ea 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -63,6 +63,7 @@ spec: the template and DryRun will be enabled. x-kubernetes-preserve-unknown-fields: true credential: + description: Name reference to the related Credentials object. type: string dryRun: description: DryRun specifies whether the template should be applied @@ -191,12 +192,66 @@ spec: k8sVersion: description: |- Currently compatible K8S version of the cluster. Being set only if - the corresponding ClusterTemplate provided it in the spec. + provided by the corresponding ClusterTemplate. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer + providers: + description: |- + Providers represent exposed CAPI providers with constrainted compatibility versions set. + Propagated from the corresponding ClusterTemplate. + properties: + bootstrap: + description: List of CAPI bootstrap providers with either an exact + or constrainted version in the SemVer format. + items: + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrConstraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object + type: array + controlPlane: + description: List of CAPI control plane providers with either + an exact or constrainted version in the SemVer format. + items: + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrConstraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object + type: array + infrastructure: + description: List of CAPI infrastructure providers with either + an exact or constrainted version in the SemVer format. + items: + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrConstraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object + type: array + type: object type: object type: object served: true diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml index 04c98a061..5f9d66007 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml @@ -126,7 +126,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -142,7 +142,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -158,7 +158,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml index 8442a2e15..e00635e13 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml @@ -120,7 +120,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -136,7 +136,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -152,7 +152,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -227,7 +227,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -243,7 +243,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -259,7 +259,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string diff --git a/test/managedcluster/vsphere/vsphere.go b/test/managedcluster/vsphere/vsphere.go index 1d9b3f4eb..620a42cfb 100644 --- a/test/managedcluster/vsphere/vsphere.go +++ b/test/managedcluster/vsphere/vsphere.go @@ -89,7 +89,7 @@ func CreateClusterIdentity(kc *kubeclient.KubeClient, secretName string, identit result, err := client.Resource(gvr).Create(ctx, clusterIdentity, metav1.CreateOptions{}) if err != nil { - fmt.Printf("%+v", result) //nolint:revive // false-positive + fmt.Printf("%+v", result) return fmt.Errorf("failed to create vsphereclusteridentity: %w", err) } diff --git a/test/objects/managedcluster/managedcluster.go b/test/objects/managedcluster/managedcluster.go index 377d20ec6..15a7d1525 100644 --- a/test/objects/managedcluster/managedcluster.go +++ b/test/objects/managedcluster/managedcluster.go @@ -60,12 +60,18 @@ func WithDryRun(dryRun bool) Opt { } } -func WithTemplate(templateName string) Opt { +func WithClusterTemplate(templateName string) Opt { return func(p *v1alpha1.ManagedCluster) { p.Spec.Template = templateName } } +func WithK8sVersionStatus(v string) Opt { + return func(managedCluster *v1alpha1.ManagedCluster) { + managedCluster.Status.KubertenesVersion = v + } +} + func WithConfig(config string) Opt { return func(p *v1alpha1.ManagedCluster) { p.Spec.Config = &apiextensionsv1.JSON{ diff --git a/test/objects/template/template.go b/test/objects/template/template.go index 6fa1e5bd8..52fcf56fd 100644 --- a/test/objects/template/template.go +++ b/test/objects/template/template.go @@ -123,6 +123,17 @@ func WithHelmSpec(helmSpec v1alpha1.HelmSpec) Opt { } } +func WithServiceK8sConstraint(v string) Opt { + return func(template Template) { + switch tt := template.(type) { + case *v1alpha1.ServiceTemplate: + tt.Status.KubertenesConstraint = v + default: + panic(fmt.Sprintf("unexpected obj typed %T, expected *ServiceTemplate", tt)) + } + } +} + func WithValidationStatus(validationStatus v1alpha1.TemplateValidationStatus) Opt { return func(t Template) { status := t.GetCommonStatus()