diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 1e54aec7fb92..01cbb407e8d0 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -41,14 +41,25 @@ var nodeAddCmd = &cobra.Command{ Short: "Adds a node to the given cluster.", Long: "Adds a node to the given cluster config, and starts it.", Run: func(cmd *cobra.Command, args []string) { - co := mustload.Healthy(ClusterFlagValue()) + co := mustload.HealthyOrNoKubernetes(ClusterFlagValue()) cc := co.Config if driver.BareMetal(cc.Driver) { out.FailureT("none driver does not support multi-node clusters") } - name := node.Name(len(cc.Nodes) + 1) + usedNodeNames := map[string]bool{} + for _, v := range cc.Nodes { + usedNodeNames[v.Name] = true + } + + // find unused node name + ndx := 2 + name := node.Name(ndx) + for usedNodeNames[name] { + ndx++ + name = node.Name(ndx) + } // for now control-plane feature is not supported if cp { diff --git a/cmd/minikube/cmd/node_delete.go b/cmd/minikube/cmd/node_delete.go index 686fdf6a3881..6a706c7397f9 100644 --- a/cmd/minikube/cmd/node_delete.go +++ b/cmd/minikube/cmd/node_delete.go @@ -42,7 +42,7 @@ var nodeDeleteCmd = &cobra.Command{ } name := args[0] - co := mustload.Healthy(ClusterFlagValue()) + co := mustload.HealthyOrNoKubernetes(ClusterFlagValue()) out.Step(style.DeletingHost, "Deleting node {{.name}} from cluster {{.cluster}}", out.V{"name": name, "cluster": co.Config.Name}) n, err := node.Delete(*co.Config, name) diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index 1634ad318fe7..a991c588ac4c 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -139,6 +139,23 @@ func Running(name string) ClusterController { } } +// HealthyOrNoKubernetes is a cmd-friendly way to load a healthy cluster +// also allowing clusters without Kubernetes. +func HealthyOrNoKubernetes(name string) ClusterController { + api, cc := Partial(name) + + // if we're in a cluster that has Kubernetes deployed, expect it to be healthy. + if cc.KubernetesConfig.KubernetesVersion != constants.NoKubernetesVersion { + return Healthy(name) + } + + // otherwise just add a node without Kubernetes. + return ClusterController{ + API: api, + Config: cc, + } +} + // Healthy is a cmd-friendly way to load a healthy cluster func Healthy(name string) ClusterController { co := Running(name) diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 273a59a2ad1e..e9b121b24268 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -28,6 +28,7 @@ import ( "k8s.io/klog/v2" "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/machine" ) @@ -74,27 +75,22 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { } // drainNode drains then deletes (removes) node from cluster. -func drainNode(cc config.ClusterConfig, name string) (*config.Node, error) { - n, _, err := Retrieve(cc, name) - if err != nil { - return n, errors.Wrap(err, "retrieve") - } - +func drainNode(n *config.Node, cc config.ClusterConfig) error { m := config.MachineName(cc, *n) api, err := machine.NewAPIClient() if err != nil { - return n, err + return err } // grab control plane to use kubeconfig host, err := machine.LoadHost(api, cc.Name) if err != nil { - return n, err + return err } runner, err := machine.CommandRunner(host) if err != nil { - return n, err + return err } // kubectl drain with extra options to prevent ending up stuck in the process @@ -103,34 +99,41 @@ func drainNode(cc config.ClusterConfig, name string) (*config.Node, error) { cmd := exec.Command("sudo", "KUBECONFIG=/var/lib/minikube/kubeconfig", kubectl, "drain", m, "--force", "--grace-period=1", "--skip-wait-for-delete-timeout=1", "--disable-eviction", "--ignore-daemonsets", "--delete-emptydir-data", "--delete-local-data") if _, err := runner.RunCmd(cmd); err != nil { - klog.Warningf("unable to drain node %q: %v", name, err) + klog.Warningf("unable to drain node %q: %v", n.Name, err) } else { - klog.Infof("successfully drained node %q", name) + klog.Infof("successfully drained node %q", n.Name) } // kubectl delete client, err := kapi.Client(cc.Name) if err != nil { - return n, err + return err } // set 'GracePeriodSeconds: 0' option to delete node immediately (ie, w/o waiting) var grace *int64 err = client.CoreV1().Nodes().Delete(context.Background(), m, v1.DeleteOptions{GracePeriodSeconds: grace}) if err != nil { - klog.Errorf("unable to delete node %q: %v", name, err) - return n, err + klog.Errorf("unable to delete node %q: %v", n.Name, err) + return err } - klog.Infof("successfully deleted node %q", name) + klog.Infof("successfully deleted node %q", n.Name) - return n, nil + return nil } // Delete calls drainNode to remove node from cluster and deletes the host. func Delete(cc config.ClusterConfig, name string) (*config.Node, error) { - n, err := drainNode(cc, name) + n, _, err := Retrieve(cc, name) if err != nil { - return n, err + return n, errors.Wrap(err, "retrieve") + } + + if cc.KubernetesConfig.KubernetesVersion != constants.NoKubernetesVersion { + err := drainNode(n, cc) + if err != nil { + return n, err + } } m := config.MachineName(cc, *n) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index fd4c00921b4e..37f3194a7874 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -311,8 +311,13 @@ func joinCluster(starter Starter, cpBs bootstrapper.Bootstrapper, bs bootstrappe // avoid "error execution phase kubelet-start: a Node with name "" and status "Ready" already exists in the cluster. // You must delete the existing Node or change the name of this new joining Node" if starter.PreExists { + n, _, err := Retrieve(*starter.Cfg, starter.Node.Name) + if err != nil { + return err + } + klog.Infof("removing existing worker node %q before attempting to rejoin cluster: %+v", starter.Node.Name, starter.Node) - if _, err := drainNode(*starter.Cfg, starter.Node.Name); err != nil { + if err := drainNode(n, *starter.Cfg); err != nil { klog.Errorf("error removing existing worker node before rejoining cluster, will continue anyway: %v", err) } klog.Infof("successfully removed existing worker node %q from cluster: %+v", starter.Node.Name, starter.Node)