diff --git a/cmd/metalctl/app/app.go b/cmd/metalctl/app/app.go index 68a6e04..0e3d89c 100644 --- a/cmd/metalctl/app/app.go +++ b/cmd/metalctl/app/app.go @@ -1,13 +1,8 @@ package app import ( - "path/filepath" - "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/homedir" metalv1alphav1 "github.com/ironcore-dev/metal-operator/api/v1alpha1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -34,7 +29,3 @@ func NewCommand() *cobra.Command { root.AddCommand(NewMoveCommand()) return root } - -func GetKubeconfig() (*rest.Config, error) { - return clientcmd.BuildConfigFromFlags("", filepath.Join(homedir.HomeDir(), ".kube", "config")) -} diff --git a/cmd/metalctl/app/move.go b/cmd/metalctl/app/move.go index 032cdeb..f2d0f5b 100644 --- a/cmd/metalctl/app/move.go +++ b/cmd/metalctl/app/move.go @@ -4,18 +4,31 @@ import ( "context" "errors" "fmt" + "log/slog" + "reflect" + "slices" + "time" metalv1alphav1 "github.com/ironcore-dev/metal-operator/api/v1alpha1" "github.com/spf13/cobra" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" ) var ( + sourceKubeconfig string targetKubeconfig string - errCrdCreate error = errors.New("failed to create metal CRDs") + crdsOnly bool + crsOnly bool + namespace string + dryRun bool + verbose bool ) func NewMoveCommand() *cobra.Command { @@ -24,88 +37,168 @@ func NewMoveCommand() *cobra.Command { Short: "Move metal-operator CRDs and CRs from one cluster to another", RunE: runMove, } + move.Flags().StringVar(&sourceKubeconfig, "source-kubeconfig", "", "Kubeconfig pointing to the source cluster") move.Flags().StringVar(&targetKubeconfig, "target-kubeconfig", "", "Kubeconfig pointing to the target cluster") + move.Flags().BoolVar(&crdsOnly, "crds-only", false, "migrate only the CRDs without CRs") + move.Flags().BoolVar(&crsOnly, "crs-only", false, "migrate only the CRs without CRDs") + move.Flags().StringVar(&namespace, "namespace", "", "namespace to filter CRDs and CRs to migrate. Defaults to all namespaces if not specified") + move.Flags().BoolVar(&dryRun, "dry-run", false, "show what would be moved without executing the migration") + move.Flags().BoolVar(&verbose, "verbose", false, "enable verbose logging for detailed output during migration") + move.MarkFlagRequired("source-kubeconfig") move.MarkFlagRequired("target-kubeconfig") + + if verbose { + slog.SetLogLoggerLevel(slog.LevelDebug) + } return move } -type clients struct { +type Clients struct { source client.Client target client.Client } -func makeClients() (clients, error) { - var clients clients - sourceCfg, err := GetKubeconfig() +func makeClient(kubeconfig string) (client.Client, error) { + cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { - return clients, fmt.Errorf("failed to load source cluster kubeconfig: %w", err) + return nil, fmt.Errorf("failed to load cluster kubeconfig: %w", err) } - clients.source, err = client.New(sourceCfg, client.Options{Scheme: scheme}) + return client.New(cfg, client.Options{Scheme: scheme}) +} + +func makeClients() (Clients, error) { + var clients Clients + var err error + + clients.source, err = makeClient(sourceKubeconfig) if err != nil { return clients, fmt.Errorf("failed to construct source cluster client: %w", err) } - targetCfg, err := clientcmd.BuildConfigFromFlags("", targetKubeconfig) - if err != nil { - return clients, fmt.Errorf("failed to load target cluster kubeconfig: %w", err) - } - clients.target, err = client.New(targetCfg, client.Options{Scheme: scheme}) + clients.target, err = makeClient(targetKubeconfig) if err != nil { return clients, fmt.Errorf("failed to construct target cluster client: %w", err) } return clients, nil } -func moveCRDs(ctx context.Context, clients clients) error { - var crds apiextensionsv1.CustomResourceDefinitionList - if err := clients.source.List(ctx, &crds); err != nil { - return err +func getMetalObjects(ctx context.Context, cl client.Client) ([]client.Object, error) { + crds := &apiextensionsv1.CustomResourceDefinitionList{} + if err := cl.List(ctx, crds); err != nil { + return nil, fmt.Errorf("couldn't list CRDs: %w", err) } - metalCrds := make([]apiextensionsv1.CustomResourceDefinition, 0) + + metalObjects := make([]client.Object, 0) for _, crd := range crds.Items { - if crd.Spec.Group == metalv1alphav1.GroupVersion.Group { - metalCrds = append(metalCrds, crd) + if crd.Spec.Group != metalv1alphav1.GroupVersion.Group { + continue + } + if !crsOnly { + metalObjects = append(metalObjects, &crd) + } + + if !crdsOnly { + crs := &unstructured.UnstructuredList{} + crs.SetGroupVersionKind(schema.GroupVersionKind{Group: crd.Spec.Group, Version: crd.Spec.Versions[0].Name, Kind: crd.Spec.Names.Kind}) + + if err := cl.List(ctx, crs, &client.ListOptions{Namespace: namespace}); err != nil { + return nil, fmt.Errorf("couldn't list CRs: %w", err) + } + for _, cr := range crs.Items { // won't work with go version <1.22 + metalObjects = append(metalObjects, &cr) + } } } + + return metalObjects, nil +} + +func validateTargetCluster(ctx context.Context, sourceObjs []client.Object, targetClient client.Client) error { // it may be better to compare on semantics instead of CRD name - for _, sourceCrd := range metalCrds { - var targetCrd apiextensionsv1.CustomResourceDefinition - err := clients.target.Get(ctx, client.ObjectKeyFromObject(&sourceCrd), &targetCrd) - if apierrors.IsNotFound(err) { + for _, sourceObj := range sourceObjs { + targetObj := sourceObj.DeepCopyObject().(client.Object) + err := targetClient.Get(ctx, client.ObjectKeyFromObject(sourceObj), targetObj) + if apierrors.IsNotFound(err) || meta.IsNoMatchError(err) { continue } + sourceSpec := reflect.ValueOf(sourceObj).Elem().FieldByName("Spec") + targetSpec := reflect.ValueOf(targetObj).Elem().FieldByName("Spec") + if reflect.DeepEqual(sourceSpec, targetSpec) { + slog.Debug("source and target CRD or CR have the same specification", slog.String("object", metalObjectToString(sourceObj))) + } if err != nil { - return fmt.Errorf("failed to check CRD existence in target cluster: %w", err) + return fmt.Errorf("failed to check CRD and CR existence in target cluster: %w", err) } - return fmt.Errorf("CRD for %s/%s already exists in the target cluster", sourceCrd.Spec.Group, sourceCrd.Spec.Names.Plural) + return fmt.Errorf("%s already exists in the target cluster", client.ObjectKeyFromObject(sourceObj)) } - for _, crd := range metalCrds { - crd.ResourceVersion = "" - if err := clients.target.Create(ctx, &crd); err != nil { - return errCrdCreate + return nil +} + +func moveMetalObjects(ctx context.Context, sourceObjs []client.Object, cl client.Client) error { + movedObjects := make([]client.Object, 0) + + for _, sourceObj := range sourceObjs { + var err error + sourceObj.SetResourceVersion("") + if err = cl.Create(ctx, sourceObj); err == nil { + if crd, isCrd := sourceObj.(*apiextensionsv1.CustomResourceDefinition); isCrd && + slices.IndexFunc(sourceObjs, func(obj client.Object) bool { + return obj.GetObjectKind().GroupVersionKind().Kind == crd.Spec.Names.Kind + }) != -1 { + err = waitForMetalCRD(ctx, crd, cl) + } + } + if err != nil { + cleanupErrs := make([]error, 0) + for _, obj := range movedObjects { + if err := cl.Delete(ctx, obj); err != nil { + cleanupErrs = append(cleanupErrs, err) + } + } + + return errors.Join( + fmt.Errorf("%s couldn't be created in the target cluster: %w", metalObjectToString(sourceObj), err), + fmt.Errorf("clean up was performed to restore a target cluster's state with error result: %w", errors.Join(cleanupErrs...))) } + movedObjects = append(movedObjects, sourceObj) } return nil } -func cleanupCRDs(ctx context.Context, clients clients) error { - var crds apiextensionsv1.CustomResourceDefinitionList - if err := clients.target.List(ctx, &crds); err != nil { +func waitForMetalCRD(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, cl client.Client) error { + return wait.PollUntilContextTimeout(ctx, time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) { + targetCrd := apiextensionsv1.CustomResourceDefinition{} + if err := cl.Get(ctx, client.ObjectKeyFromObject(crd), &targetCrd); apierrors.IsNotFound(err) { + return false, nil + } + for _, condition := range targetCrd.Status.Conditions { + if condition.Type == apiextensionsv1.Established && condition.Status == apiextensionsv1.ConditionTrue { + return true, nil + } + } + return false, nil + }) +} + +func move(ctx context.Context, clients Clients) error { + sourceObjs, err := getMetalObjects(ctx, clients.source) + if err != nil { return err } - metalCrds := make([]apiextensionsv1.CustomResourceDefinition, 0) - for _, crd := range crds.Items { - if crd.Spec.Group == metalv1alphav1.GroupVersion.Group { - metalCrds = append(metalCrds, crd) - } + slog.Debug(fmt.Sprintf("found %s CRDs and CRs in the source cluster", metalv1alphav1.GroupVersion.Group), slog.Any("Objects", transform(sourceObjs, metalObjectToString))) + + if err = validateTargetCluster(ctx, sourceObjs, clients.target); err != nil { + return err } - errs := make([]error, 0) - for _, crd := range metalCrds { - crd.ResourceVersion = "" - if err := clients.target.Delete(ctx, &crd); err != nil { - errs = append(errs, err) + slog.Debug(fmt.Sprintf("all %s CRDs and CRs from the source cluster are absent in the target cluster", metalv1alphav1.GroupVersion.Group)) + + if !dryRun { + err = moveMetalObjects(ctx, sourceObjs, clients.target) + if err == nil { + slog.Debug(fmt.Sprintf("all %s CRDs and CRs from the source cluster were moved to the target cluster", metalv1alphav1.GroupVersion.Group)) } } - return errors.Join(errs...) + + return err } func runMove(cmd *cobra.Command, args []string) error { @@ -114,12 +207,5 @@ func runMove(cmd *cobra.Command, args []string) error { return err } ctx := cmd.Context() - err = moveCRDs(ctx, clients) - switch { - case errors.Is(err, errCrdCreate): - return cleanupCRDs(ctx, clients) - case err != nil: - return err - } - return nil + return move(ctx, clients) } diff --git a/cmd/metalctl/app/move_test.go b/cmd/metalctl/app/move_test.go new file mode 100644 index 0000000..99ff91b --- /dev/null +++ b/cmd/metalctl/app/move_test.go @@ -0,0 +1,30 @@ +package app + +import ( + "context" + "log/slog" + + metalv1alpha1 "github.com/ironcore-dev/metal-operator/api/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("metalctl move", func() { + _ = SetupTest() + + It("AAAAA", func(ctx SpecContext) { + sourceCr := &metalv1alpha1.BMC{ObjectMeta: metav1.ObjectMeta{Name: "test"}} + Expect(clients.source.Create(ctx, sourceCr)).To(Succeed()) + slog.SetLogLoggerLevel(slog.LevelDebug) + err := move(context.TODO(), clients) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) error { + targetCr := metalv1alpha1.BMC{} + return clients.target.Get(context.Background(), client.ObjectKeyFromObject(sourceCr), &targetCr) + }).Should(Succeed()) + }) +}) diff --git a/cmd/metalctl/app/suite_test.go b/cmd/metalctl/app/suite_test.go new file mode 100644 index 0000000..cc5f753 --- /dev/null +++ b/cmd/metalctl/app/suite_test.go @@ -0,0 +1,159 @@ +// SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and IronCore contributors +// SPDX-License-Identifier: Apache-2.0 + +package app + +import ( + "context" + "fmt" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/ironcore-dev/metal-operator/internal/registry" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sSchema "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + metalv1alpha1 "github.com/ironcore-dev/metal-operator/api/v1alpha1" + //+kubebuilder:scaffold:imports +) + +const ( + pollingInterval = 50 * time.Millisecond + eventuallyTimeout = 3 * time.Second + consistentlyDuration = 1 * time.Second +) + +var ( + cfg *rest.Config + clients Clients +) + +func TestMetalctl(t *testing.T) { + SetDefaultConsistentlyPollingInterval(pollingInterval) + SetDefaultEventuallyPollingInterval(pollingInterval) + SetDefaultEventuallyTimeout(eventuallyTimeout) + SetDefaultConsistentlyDuration(consistentlyDuration) + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + // Source client with CRDs + testEnv := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + DeferCleanup(testEnv.Stop) + + Expect(metalv1alpha1.AddToScheme(k8sSchema.Scheme)).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + clients.source, err = client.New(cfg, client.Options{Scheme: k8sSchema.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(clients.source).NotTo(BeNil()) + + // Target client without CRDs + testEnv = &envtest.Environment{ + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + DeferCleanup(testEnv.Stop) + + clients.target, err = client.New(cfg, client.Options{Scheme: k8sSchema.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(clients.target).NotTo(BeNil()) + + By("Starting the registry server") + var mgrCtx context.Context + mgrCtx, cancel := context.WithCancel(context.Background()) + DeferCleanup(cancel) + registryServer := registry.NewServer("localhost:30000") + go func() { + defer GinkgoRecover() + Expect(registryServer.Start(mgrCtx)).To(Succeed(), "failed to start registry server") + }() + +}) + +func SetupTest() *corev1.Namespace { + ns := &corev1.Namespace{} + + BeforeEach(func(ctx SpecContext) { + // var mgrCtx context.Context + // mgrCtx, cancel := context.WithCancel(context.Background()) + // DeferCleanup(cancel) + + *ns = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + targetNs := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + Expect(clients.source.Create(ctx, ns)).To(Succeed(), "failed to create test namespace") + Expect(clients.target.Create(ctx, &targetNs)).To(Succeed(), "failed to create test namespace") + DeferCleanup(clients.source.Delete, ns) + DeferCleanup(clients.target.Delete, &targetNs) + + // k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + // Scheme: k8sSchema.Scheme, + // Controller: config.Controller{ + // // need to skip unique controller name validation + // // since all tests need a dedicated controller + // SkipNameValidation: ptr.To(true), + // }, + // }) + // Expect(err).ToNot(HaveOccurred()) + + // go func() { + // defer GinkgoRecover() + // Expect(k8sManager.Start(mgrCtx)).To(Succeed(), "failed to start manager") + // }() + }) + + return ns +} diff --git a/cmd/metalctl/app/utils.go b/cmd/metalctl/app/utils.go new file mode 100644 index 0000000..8169a07 --- /dev/null +++ b/cmd/metalctl/app/utils.go @@ -0,0 +1,23 @@ +package app + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// transform returns a list of transformed list elements with function f. +func transform[L ~[]E, E any, T any](list L, f func(E) T) []T { + ret := make([]T, len(list)) + for i, elem := range list { + ret[i] = f(elem) + } + return ret +} + +func metalObjectToString(obj client.Object) string { + if crd, ok := obj.(*apiextensionsv1.CustomResourceDefinition); ok { + return "CRD:" + crd.Spec.Group + "/" + crd.Spec.Names.Kind + } + + return obj.GetObjectKind().GroupVersionKind().Kind + ":" + client.ObjectKeyFromObject(obj).String() +}