From 13eb7ecd7288b957172f28046eede3b132d91d22 Mon Sep 17 00:00:00 2001 From: Somtochi Onyekwere Date: Wed, 6 Dec 2023 11:08:19 +0100 Subject: [PATCH] create distribution method for cluster info Signed-off-by: Somtochi Onyekwere --- cmd/flux/check.go | 38 +++++++++++++------------------------- cmd/flux/cluster_info.go | 23 ++++++++--------------- cmd/flux/version.go | 15 ++++++++------- pkg/status/status.go | 19 ++++++++++++------- 4 files changed, 41 insertions(+), 54 deletions(-) diff --git a/cmd/flux/check.go b/cmd/flux/check.go index 75fa773674..2b8eb5bb44 100644 --- a/cmd/flux/check.go +++ b/cmd/flux/check.go @@ -82,7 +82,8 @@ func runCheckCmd(cmd *cobra.Command, args []string) error { fluxCheck() - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout) + defer cancel() kubeClient, err := utils.KubeClient(kubeconfigArgs, kubeclientOptions) if err != nil { return err @@ -105,13 +106,13 @@ func runCheckCmd(cmd *cobra.Command, args []string) error { return nil } - logger.Actionf("checking cluster version") + logger.Actionf("checking version in cluster") if !fluxClusterVersionCheck(ctx, kubeClient) { checkFailed = true } logger.Actionf("checking controllers") - if !componentsCheck(ctx, cfg, kubeClient) { + if !componentsCheck(ctx, kubeClient) { checkFailed = true } @@ -190,11 +191,8 @@ func kubernetesCheck(cfg *rest.Config, constraints []string) bool { return true } -func componentsCheck(ctx context.Context, kubeConfig *rest.Config, kubeClient client.Client) bool { - timeoutCtx, cancel := context.WithTimeout(ctx, rootArgs.timeout) - defer cancel() - - statusChecker, err := status.NewStatusChecker(kubeConfig, checkArgs.pollInterval, rootArgs.timeout, logger) +func componentsCheck(ctx context.Context, kubeClient client.Client) bool { + statusChecker, err := status.NewStatusCheckerWithClient(kubeClient, checkArgs.pollInterval, rootArgs.timeout, logger) if err != nil { return false } @@ -203,7 +201,7 @@ func componentsCheck(ctx context.Context, kubeConfig *rest.Config, kubeClient cl selector := client.MatchingLabels{manifestgen.PartOfLabelKey: manifestgen.PartOfLabelValue} var list v1.DeploymentList ns := *kubeconfigArgs.Namespace - if err := kubeClient.List(timeoutCtx, &list, client.InNamespace(ns), selector); err == nil { + if err := kubeClient.List(ctx, &list, client.InNamespace(ns), selector); err == nil { if len(list.Items) == 0 { logger.Failuref("no controllers found in the '%s' namespace with the label selector '%s=%s'", ns, manifestgen.PartOfLabelKey, manifestgen.PartOfLabelValue) @@ -225,18 +223,10 @@ func componentsCheck(ctx context.Context, kubeConfig *rest.Config, kubeClient cl } func crdsCheck(ctx context.Context, kubeClient client.Client) bool { - timeoutCtx, cancel := context.WithTimeout(ctx, rootArgs.timeout) - defer cancel() - - kubeClient, err := utils.KubeClient(kubeconfigArgs, kubeclientOptions) - if err != nil { - return false - } - ok := true selector := client.MatchingLabels{manifestgen.PartOfLabelKey: manifestgen.PartOfLabelValue} var list apiextensionsv1.CustomResourceDefinitionList - if err := kubeClient.List(timeoutCtx, &list, client.InNamespace(*kubeconfigArgs.Namespace), selector); err == nil { + if err := kubeClient.List(ctx, &list, client.InNamespace(*kubeconfigArgs.Namespace), selector); err == nil { if len(list.Items) == 0 { logger.Failuref("no crds found with the label selector '%s=%s'", manifestgen.PartOfLabelKey, manifestgen.PartOfLabelValue) @@ -257,17 +247,15 @@ func crdsCheck(ctx context.Context, kubeClient client.Client) bool { } func fluxClusterVersionCheck(ctx context.Context, kubeClient client.Client) bool { - timeoutCtx, cancel := context.WithTimeout(ctx, rootArgs.timeout) - defer cancel() - - distribution, bootstrapped, err := getFluxDistribution(timeoutCtx, kubeClient) + clusterInfo, err := getFluxClusterInfo(ctx, kubeClient) if err != nil { + logger.Failuref("checking failed: %s", err.Error()) return false } - if distribution != "" { - logger.Successf("distribution: %s", distribution) + if clusterInfo.distribution() != "" { + logger.Successf("distribution: %s", clusterInfo.distribution()) } - logger.Successf("bootstrapped: %t", bootstrapped) + logger.Successf("bootstrapped: %t", clusterInfo.bootstrapped) return true } diff --git a/cmd/flux/cluster_info.go b/cmd/flux/cluster_info.go index b9458db522..2f2f79a339 100644 --- a/cmd/flux/cluster_info.go +++ b/cmd/flux/cluster_info.go @@ -22,13 +22,13 @@ import ( "github.com/manifoldco/promptui" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/fluxcd/flux2/v2/pkg/manifestgen" kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1" + + "github.com/fluxcd/flux2/v2/pkg/manifestgen" ) // bootstrapLabels are labels put on a resource by kustomize-controller. These labels on the CRD indicates @@ -82,7 +82,7 @@ func getFluxClusterInfo(ctx context.Context, c client.Client) (fluxClusterInfo, info.bootstrapped = true } - // the `app.kubernetes.io` label is not set by flux but might be set by other + // the `app.kubernetes.io/managed-by` label is not set by flux but might be set by other // tools used to install Flux e.g Helm. if manager, ok := crdMetadata.Labels["app.kubernetes.io/managed-by"]; ok { info.managedBy = manager @@ -113,19 +113,12 @@ func confirmFluxInstallOverride(info fluxClusterInfo) error { return err } -func getFluxDistribution(ctx context.Context, kubeClient client.Client) (string, bool, error) { - clusterInfo, err := getFluxClusterInfo(ctx, kubeClient) - if err != nil { - if !errors.IsNotFound(err) { - return "", false, fmt.Errorf("cluster info unavailable: %w", err) - } - } - - distribution := clusterInfo.version - if clusterInfo.partOf != "" { - distribution = fmt.Sprintf("%s-%s", clusterInfo.partOf, clusterInfo.version) +func (info fluxClusterInfo) distribution() string { + distribution := info.version + if info.partOf != "" { + distribution = fmt.Sprintf("%s-%s", info.partOf, info.version) } - return distribution, clusterInfo.bootstrapped, nil + return distribution } func installManagedByFlux(manager string) bool { diff --git a/cmd/flux/version.go b/cmd/flux/version.go index 197b2a9d5f..9068425f24 100644 --- a/cmd/flux/version.go +++ b/cmd/flux/version.go @@ -25,6 +25,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/spf13/cobra" v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml/goyaml.v2" @@ -55,7 +56,7 @@ type versionFlags struct { var versionArgs versionFlags -type VersionInfo struct { +type versionInfo struct { Flux string `yaml:"flux"` Distribution string `yaml:"distribution,omitempty"` Controller map[string]string `yaml:"controller,inline"` @@ -80,7 +81,7 @@ func versionCmdRun(cmd *cobra.Command, args []string) error { // VersionInfo struct is used for yaml because we care about the order. // Without this `distribution` is printed before `flux`. // Unfortunately, encoding/json doesn't support the inline tag, so the struct can't be used for json. - yamlInfo := &VersionInfo{ + yamlInfo := &versionInfo{ Controller: map[string]string{}, } info := map[string]string{} @@ -93,13 +94,13 @@ func versionCmdRun(cmd *cobra.Command, args []string) error { return err } - distribution, _, err := getFluxDistribution(ctx, kubeClient) - if err != nil { + clusterInfo, err := getFluxClusterInfo(ctx, kubeClient) + if err != nil && !errors.IsNotFound(err) { return err } - if distribution != "" { - info["distribution"] = distribution - yamlInfo.Distribution = distribution + if clusterInfo.distribution() != "" { + info["distribution"] = clusterInfo.distribution() + yamlInfo.Distribution = clusterInfo.distribution() } selector := client.MatchingLabels{manifestgen.PartOfLabelKey: manifestgen.PartOfLabelValue} diff --git a/pkg/status/status.go b/pkg/status/status.go index 8b61881c47..991afe0753 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -45,6 +45,17 @@ type StatusChecker struct { logger log.Logger } +func NewStatusCheckerWithClient(c client.Client, pollInterval time.Duration, timeout time.Duration, log log.Logger) (*StatusChecker, error) { + return &StatusChecker{ + pollInterval: pollInterval, + timeout: timeout, + client: c, + statusPoller: polling.NewStatusPoller(c, c.RESTMapper(), polling.Options{}), + logger: log, + }, nil +} + + func NewStatusChecker(kubeConfig *rest.Config, pollInterval time.Duration, timeout time.Duration, log log.Logger) (*StatusChecker, error) { restMapper, err := runtimeclient.NewDynamicRESTMapper(kubeConfig) if err != nil { @@ -55,13 +66,7 @@ func NewStatusChecker(kubeConfig *rest.Config, pollInterval time.Duration, timeo return nil, err } - return &StatusChecker{ - pollInterval: pollInterval, - timeout: timeout, - client: c, - statusPoller: polling.NewStatusPoller(c, restMapper, polling.Options{}), - logger: log, - }, nil + return NewStatusCheckerWithClient(c, pollInterval, timeout, log) } func (sc *StatusChecker) Assess(identifiers ...object.ObjMetadata) error {