Skip to content

Commit

Permalink
Merge pull request #16416 from marcellmartini/feature/issue-16415
Browse files Browse the repository at this point in the history
Feature: Make kubeadm.applyNodeLabels apply label to all nodes
  • Loading branch information
medyagh authored Nov 29, 2023
2 parents 79ce564 + c6393b8 commit d8422bf
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 3 deletions.
1 change: 1 addition & 0 deletions pkg/minikube/bootstrapper/bootstrapper.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ type LogOptions struct {

// Bootstrapper contains all the methods needed to bootstrap a Kubernetes cluster
type Bootstrapper interface {
ApplyNodeLabels(config.ClusterConfig) error
StartCluster(config.ClusterConfig) error
UpdateCluster(config.ClusterConfig) error
DeleteCluster(config.KubernetesConfig) error
Expand Down
11 changes: 9 additions & 2 deletions pkg/minikube/bootstrapper/kubeadm/kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -1040,8 +1040,11 @@ func kubectlPath(cfg config.ClusterConfig) string {
return path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl")
}

func (k *Bootstrapper) ApplyNodeLabels(cfg config.ClusterConfig) error {
return k.applyNodeLabels(cfg)
}

// applyNodeLabels applies minikube labels to all the nodes
// but it's currently called only from kubeadm.StartCluster (via kubeadm.init) where there's only one - first node
func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error {
// time cluster was created. time format is based on ISO 8601 (RFC 3339)
// converting - and : to _ because of Kubernetes label restriction
Expand All @@ -1053,16 +1056,20 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error {
// ensure that "primary" label is applied only to the 1st node in the cluster (used eg for placing ingress there)
// this is used to uniquely distinguish that from other nodes in multi-master/multi-control-plane cluster config
primaryLbl := "minikube.k8s.io/primary=false"

// ensure that "primary" label is not removed when apply label to all others nodes
applyToNodes := "-l minikube.k8s.io/primary!=true"
if len(cfg.Nodes) <= 1 {
primaryLbl = "minikube.k8s.io/primary=true"
applyToNodes = "--all"
}

ctx, cancel := context.WithTimeout(context.Background(), applyTimeoutSeconds*time.Second)
defer cancel()
// example:
// sudo /var/lib/minikube/binaries/<version>/kubectl label nodes minikube.k8s.io/version=<version> minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg),
"label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, primaryLbl, "--all", "--overwrite",
"label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, primaryLbl, applyToNodes, "--overwrite",
fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")))

if _, err := k.c.RunCmd(cmd); err != nil {
Expand Down
3 changes: 3 additions & 0 deletions pkg/minikube/node/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,9 @@ func joinCluster(starter Starter, cpBs bootstrapper.Bootstrapper, bs bootstrappe
return fmt.Errorf("error joining worker node to cluster: %w", err)
}

if err := cpBs.ApplyNodeLabels(*starter.Cfg); err != nil {
return fmt.Errorf("error applying node label: %w", err)
}
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion test/integration/functional_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ func validateNodeLabels(ctx context.Context, t *testing.T, profile string) {
t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err)
}
// docs: check if the node labels matches with the expected Minikube labels: `minikube.k8s.io/*`
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"}
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name", "minikube.k8s.io/primary"}
for _, el := range expectedLabels {
if !strings.Contains(rr.Output(), el) {
t.Errorf("expected to have label %q in node labels but got : %s", el, rr.Output())
Expand Down
28 changes: 28 additions & 0 deletions test/integration/multinode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ func TestMultiNode(t *testing.T) {
{"DeployApp2Nodes", validateDeployAppToMultiNode},
{"PingHostFrom2Pods", validatePodsPingHost},
{"AddNode", validateAddNodeToMultiNode},
{"MultiNodeLabels", validateMultiNodeLabels},
{"ProfileList", validateProfileListWithMultiNode},
{"CopyFile", validateCopyFileWithMultiNode},
{"StopNode", validateStopRunningNode},
Expand Down Expand Up @@ -204,6 +205,33 @@ func validateCopyFileWithMultiNode(ctx context.Context, t *testing.T, profile st
}
}

// validateMultiNodeLabels check if all node labels were configured correctly
func validateMultiNodeLabels(ctx context.Context, t *testing.T, profile string) {
// docs: Get the node labels from the cluster with `kubectl get nodes`
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "-o", "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"))
if err != nil {
t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err)
}

nodeLabelsList := []map[string]string{}
fixedString := strings.Replace(rr.Stdout.String(), ",]", "]", 1)
err = json.Unmarshal([]byte(fixedString), &nodeLabelsList)
if err != nil {
t.Errorf("failed to decode json from label list: args %q: %v", rr.Command(), err)
}

// docs: check if all node labels matches with the expected Minikube labels: `minikube.k8s.io/*`
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name", "minikube.k8s.io/primary"}

for _, nodeLabels := range nodeLabelsList {
for _, el := range expectedLabels {
if _, ok := nodeLabels[el]; !ok {
t.Errorf("expected to have label %q in node labels but got : %s", el, rr.Output())
}
}
}
}

// validateStopRunningNode tests the minikube node stop command
func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
// Run minikube node stop on that node
Expand Down

0 comments on commit d8422bf

Please sign in to comment.