diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 4eddc6483007..90163e392c1f 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -35,6 +35,7 @@ type LogOptions struct { // Bootstrapper contains all the methods needed to bootstrap a Kubernetes cluster type Bootstrapper interface { + ApplyNodeLabels(config.ClusterConfig) error StartCluster(config.ClusterConfig) error UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index fdba86768dd8..69761568d153 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -1040,8 +1040,11 @@ func kubectlPath(cfg config.ClusterConfig) string { return path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl") } +func (k *Bootstrapper) ApplyNodeLabels(cfg config.ClusterConfig) error { + return k.applyNodeLabels(cfg) +} + // applyNodeLabels applies minikube labels to all the nodes -// but it's currently called only from kubeadm.StartCluster (via kubeadm.init) where there's only one - first node func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { // time cluster was created. time format is based on ISO 8601 (RFC 3339) // converting - and : to _ because of Kubernetes label restriction @@ -1053,8 +1056,12 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { // ensure that "primary" label is applied only to the 1st node in the cluster (used eg for placing ingress there) // this is used to uniquely distinguish that from other nodes in multi-master/multi-control-plane cluster config primaryLbl := "minikube.k8s.io/primary=false" + + // ensure that "primary" label is not removed when apply label to all others nodes + applyToNodes := "-l minikube.k8s.io/primary!=true" if len(cfg.Nodes) <= 1 { primaryLbl = "minikube.k8s.io/primary=true" + applyToNodes = "--all" } ctx, cancel := context.WithTimeout(context.Background(), applyTimeoutSeconds*time.Second) @@ -1062,7 +1069,7 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { // example: // sudo /var/lib/minikube/binaries//kubectl label nodes minikube.k8s.io/version= minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg), - "label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, primaryLbl, "--all", "--overwrite", + "label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, primaryLbl, applyToNodes, "--overwrite", fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig"))) if _, err := k.c.RunCmd(cmd); err != nil { diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 7ea3e02d2686..baebf81b9baa 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -342,6 +342,9 @@ func joinCluster(starter Starter, cpBs bootstrapper.Bootstrapper, bs bootstrappe return fmt.Errorf("error joining worker node to cluster: %w", err) } + if err := cpBs.ApplyNodeLabels(*starter.Cfg); err != nil { + return fmt.Errorf("error applying node label: %w", err) + } return nil } diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index ece1b81b6c12..33eeff113050 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -220,7 +220,7 @@ func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err) } // docs: check if the node labels matches with the expected Minikube labels: `minikube.k8s.io/*` - expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"} + expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name", "minikube.k8s.io/primary"} for _, el := range expectedLabels { if !strings.Contains(rr.Output(), el) { t.Errorf("expected to have label %q in node labels but got : %s", el, rr.Output()) diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 679b5027c2d4..e5a115b444e5 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -55,6 +55,7 @@ func TestMultiNode(t *testing.T) { {"DeployApp2Nodes", validateDeployAppToMultiNode}, {"PingHostFrom2Pods", validatePodsPingHost}, {"AddNode", validateAddNodeToMultiNode}, + {"MultiNodeLabels", validateMultiNodeLabels}, {"ProfileList", validateProfileListWithMultiNode}, {"CopyFile", validateCopyFileWithMultiNode}, {"StopNode", validateStopRunningNode}, @@ -204,6 +205,33 @@ func validateCopyFileWithMultiNode(ctx context.Context, t *testing.T, profile st } } +// validateMultiNodeLabels check if all node labels were configured correctly +func validateMultiNodeLabels(ctx context.Context, t *testing.T, profile string) { + // docs: Get the node labels from the cluster with `kubectl get nodes` + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "-o", "jsonpath=[{range .items[*]}{.metadata.labels},{end}]")) + if err != nil { + t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err) + } + + nodeLabelsList := []map[string]string{} + fixedString := strings.Replace(rr.Stdout.String(), ",]", "]", 1) + err = json.Unmarshal([]byte(fixedString), &nodeLabelsList) + if err != nil { + t.Errorf("failed to decode json from label list: args %q: %v", rr.Command(), err) + } + + // docs: check if all node labels matches with the expected Minikube labels: `minikube.k8s.io/*` + expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name", "minikube.k8s.io/primary"} + + for _, nodeLabels := range nodeLabelsList { + for _, el := range expectedLabels { + if _, ok := nodeLabels[el]; !ok { + t.Errorf("expected to have label %q in node labels but got : %s", el, rr.Output()) + } + } + } +} + // validateStopRunningNode tests the minikube node stop command func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) { // Run minikube node stop on that node