From ad7fb3992cbbbe5f1bc918adfc75588fab51bc22 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sun, 7 Jan 2024 21:36:17 +0000 Subject: [PATCH 01/41] support kubernetes ha cluster topology in minikube --- .gitignore | 1 + cmd/minikube/cmd/config/profile_list.go | 115 +++- cmd/minikube/cmd/cp.go | 7 +- cmd/minikube/cmd/docker-env.go | 3 +- cmd/minikube/cmd/logs.go | 2 +- cmd/minikube/cmd/node_add.go | 44 +- cmd/minikube/cmd/node_start.go | 8 +- cmd/minikube/cmd/start.go | 176 +++-- cmd/minikube/cmd/start_flags.go | 64 +- cmd/minikube/cmd/status.go | 17 +- cmd/minikube/cmd/stop.go | 4 +- pkg/addons/addons.go | 10 +- pkg/addons/addons_storage_classes.go | 10 +- pkg/addons/addons_test.go | 18 +- pkg/drivers/kic/kic.go | 4 +- pkg/drivers/kvm/network.go | 10 + pkg/drivers/none/none.go | 2 +- pkg/minikube/bootstrapper/bootstrapper.go | 6 +- .../bootstrapper/bsutil/extraconfig.go | 7 +- .../bootstrapper/bsutil/ktmpl/v1alpha3.go | 82 --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 32 +- .../bootstrapper/bsutil/kubeadm_test.go | 26 +- pkg/minikube/bootstrapper/bsutil/kubelet.go | 8 + .../bootstrapper/bsutil/kverify/node_ready.go | 7 +- .../bootstrapper/bsutil/kverify/pod_ready.go | 2 +- .../bootstrapper/bsutil/kverify/system_svc.go | 2 +- .../testdata/v1.27/containerd-api-port.yaml | 1 + .../v1.27/containerd-pod-network-cidr.yaml | 1 + .../bsutil/testdata/v1.27/containerd.yaml | 1 + .../testdata/v1.27/crio-options-gates.yaml | 1 + .../bsutil/testdata/v1.27/crio.yaml | 1 + .../bsutil/testdata/v1.27/default.yaml | 1 + .../bsutil/testdata/v1.27/dns.yaml | 1 + .../testdata/v1.27/image-repository.yaml | 1 + .../bsutil/testdata/v1.27/options.yaml | 1 + .../testdata/v1.28/containerd-api-port.yaml | 1 + .../v1.28/containerd-pod-network-cidr.yaml | 1 + .../bsutil/testdata/v1.28/containerd.yaml | 1 + .../testdata/v1.28/crio-options-gates.yaml | 1 + .../bsutil/testdata/v1.28/crio.yaml | 1 + .../bsutil/testdata/v1.28/default.yaml | 1 + .../bsutil/testdata/v1.28/dns.yaml | 1 + .../testdata/v1.28/image-repository.yaml | 1 + .../bsutil/testdata/v1.28/options.yaml | 1 + .../testdata/v1.29/containerd-api-port.yaml | 1 + .../v1.29/containerd-pod-network-cidr.yaml | 1 + .../bsutil/testdata/v1.29/containerd.yaml | 1 + .../testdata/v1.29/crio-options-gates.yaml | 1 + .../bsutil/testdata/v1.29/crio.yaml | 1 + .../bsutil/testdata/v1.29/default.yaml | 1 + .../bsutil/testdata/v1.29/dns.yaml | 1 + .../testdata/v1.29/image-repository.yaml | 1 + .../bsutil/testdata/v1.29/options.yaml | 1 + pkg/minikube/bootstrapper/bsutil/versions.go | 50 +- pkg/minikube/bootstrapper/certs.go | 290 ++++---- pkg/minikube/bootstrapper/certs_test.go | 5 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 351 +++++----- pkg/minikube/cluster/cluster.go | 14 +- pkg/minikube/cluster/ha/kube-vip/kube-vip.go | 119 ++++ pkg/minikube/cni/cni.go | 2 +- pkg/minikube/cni/kindnet.go | 3 +- pkg/minikube/command/ssh_runner.go | 4 +- pkg/minikube/config/config.go | 9 +- pkg/minikube/config/profile.go | 60 +- pkg/minikube/config/profile_test.go | 32 +- pkg/minikube/config/types.go | 7 +- pkg/minikube/cruntime/containerd.go | 4 +- pkg/minikube/cruntime/crio.go | 4 +- pkg/minikube/cruntime/docker.go | 2 +- pkg/minikube/download/preload.go | 3 +- pkg/minikube/driver/endpoint.go | 10 +- pkg/minikube/kubeconfig/kubeconfig.go | 291 ++++---- pkg/minikube/machine/build_images.go | 4 +- pkg/minikube/machine/cache_images.go | 20 +- pkg/minikube/machine/client.go | 2 +- pkg/minikube/machine/fix.go | 12 +- pkg/minikube/machine/machine.go | 88 ++- pkg/minikube/machine/start.go | 31 +- pkg/minikube/machine/stop.go | 10 +- pkg/minikube/mustload/mustload.go | 147 ++-- pkg/minikube/node/cache.go | 2 +- pkg/minikube/node/node.go | 116 +++- pkg/minikube/node/start.go | 215 +++--- pkg/minikube/tunnel/cluster_inspector.go | 2 +- pkg/minikube/tunnel/cluster_inspector_test.go | 2 +- pkg/minikube/tunnel/route_test.go | 2 +- pkg/minikube/vmpath/constants.go | 2 + pkg/network/network.go | 15 +- pkg/network/network_test.go | 6 +- pkg/provision/provision.go | 7 +- pkg/util/constants.go | 25 +- pkg/util/constants_test.go | 4 +- test/integration/functional_test.go | 8 +- .../functional_test_tunnel_test.go | 10 +- test/integration/ha_test.go | 627 ++++++++++++++++++ test/integration/multinode_test.go | 56 +- .../testdata/ha/ha-pod-dns-test.yaml | 35 + 97 files changed, 2215 insertions(+), 1187 deletions(-) delete mode 100644 pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha3.go create mode 100644 pkg/minikube/cluster/ha/kube-vip/kube-vip.go create mode 100644 test/integration/ha_test.go create mode 100644 test/integration/testdata/ha/ha-pod-dns-test.yaml diff --git a/.gitignore b/.gitignore index c5fb3beb056a..1463d6febc2c 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ _testmain.go *.exe *.test *.prof +*.pprof /deploy/iso/minikube-iso/board/minikube/x86_64/rootfs-overlay/usr/bin/auto-pause /deploy/iso/minikube-iso/board/minikube/aarch64/rootfs-overlay/usr/bin/auto-pause diff --git a/cmd/minikube/cmd/config/profile_list.go b/cmd/minikube/cmd/config/profile_list.go index 2cd2f1432f6e..b9d93f3c6409 100644 --- a/cmd/minikube/cmd/config/profile_list.go +++ b/cmd/minikube/cmd/config/profile_list.go @@ -83,7 +83,7 @@ func printProfilesTable() { } if len(validProfiles) == 0 { - exit.Message(reason.UsageNoProfileRunning, "No minikube profile was found. ") + exit.Message(reason.UsageNoProfileRunning, "No minikube profile was found.") } updateProfilesStatus(validProfiles) @@ -111,45 +111,82 @@ func updateProfilesStatus(profiles []*config.Profile) { } func profileStatus(p *config.Profile, api libmachine.API) string { - cp, err := config.PrimaryControlPlane(p.Config) - if err != nil { - exit.Error(reason.GuestCpConfig, "error getting primary control plane", err) + cps := config.ControlPlanes(*p.Config) + if len(cps) == 0 { + exit.Message(reason.GuestCpConfig, "No control-plane nodes found.") } - host, err := machine.LoadHost(api, config.MachineName(*p.Config, cp)) - if err != nil { - klog.Warningf("error loading profiles: %v", err) - return "Unknown" - } + status := "Unknown" + healthyCPs := 0 + for _, cp := range cps { + machineName := config.MachineName(*p.Config, cp) - // The machine isn't running, no need to check inside - s, err := host.Driver.GetState() - if err != nil { - klog.Warningf("error getting host state: %v", err) - return "Unknown" - } - if s != state.Running { - return s.String() - } + ms, err := machine.Status(api, machineName) + if err != nil { + klog.Warningf("error loading profile (will continue): machine status for %s: %v", machineName, err) + continue + } + if ms != state.Running.String() { + klog.Warningf("error loading profile (will continue): machine %s is not running: %q", machineName, ms) + status = ms + continue + } - cr, err := machine.CommandRunner(host) - if err != nil { - klog.Warningf("error loading profiles: %v", err) - return "Unknown" - } + host, err := machine.LoadHost(api, machineName) + if err != nil { + klog.Warningf("error loading profile (will continue): load host for %s: %v", machineName, err) + continue + } - hostname, _, port, err := driver.ControlPlaneEndpoint(p.Config, &cp, host.DriverName) - if err != nil { - klog.Warningf("error loading profiles: %v", err) - return "Unknown" + hs, err := host.Driver.GetState() + if err != nil { + klog.Warningf("error loading profile (will continue): host state for %s: %v", machineName, err) + continue + } + if hs != state.Running { + klog.Warningf("error loading profile (will continue): host %s is not running: %q", machineName, hs) + status = hs.String() + continue + } + + cr, err := machine.CommandRunner(host) + if err != nil { + klog.Warningf("error loading profile (will continue): command runner for %s: %v", machineName, err) + continue + } + + hostname, _, port, err := driver.ControlPlaneEndpoint(p.Config, &cp, host.DriverName) + if err != nil { + klog.Warningf("error loading profile (will continue): control-plane endpoint for %s: %v", machineName, err) + continue + } + + as, err := kverify.APIServerStatus(cr, hostname, port) + if err != nil { + klog.Warningf("error loading profile (will continue): apiserver status for %s: %v", machineName, err) + continue + } + if as != state.Running { + klog.Warningf("error loading profile (will continue): apiserver %s is not running: %q", machineName, hs) + status = as.String() + continue + } + + status = state.Running.String() + healthyCPs++ } - status, err := kverify.APIServerStatus(cr, hostname, port) - if err != nil { - klog.Warningf("error getting apiserver status for %s: %v", p.Name, err) - return "Unknown" + if config.HA(*p.Config) { + switch { + case healthyCPs < 2: + return state.Stopped.String() + case healthyCPs == 2: + return "Degraded" + default: + return "HAppy" + } } - return status.String() + return status } func renderProfilesTable(ps [][]string) { @@ -166,9 +203,15 @@ func profilesToTableData(profiles []*config.Profile) [][]string { var data [][]string currentProfile := ClusterFlagValue() for _, p := range profiles { - cp, err := config.PrimaryControlPlane(p.Config) - if err != nil { - exit.Error(reason.GuestCpConfig, "error getting primary control plane", err) + cpIP := p.Config.KubernetesConfig.APIServerHAVIP + cpPort := p.Config.APIServerPort + if !config.HA(*p.Config) { + cp, err := config.ControlPlane(*p.Config) + if err != nil { + exit.Error(reason.GuestCpConfig, "error getting control-plane node", err) + } + cpIP = cp.IP + cpPort = cp.Port } k8sVersion := p.Config.KubernetesConfig.KubernetesVersion @@ -179,7 +222,7 @@ func profilesToTableData(profiles []*config.Profile) [][]string { if p.Name == currentProfile { c = "*" } - data = append(data, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), k8sVersion, p.Status, strconv.Itoa(len(p.Config.Nodes)), c}) + data = append(data, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cpIP, strconv.Itoa(cpPort), k8sVersion, p.Status, strconv.Itoa(len(p.Config.Nodes)), c}) } return data } diff --git a/cmd/minikube/cmd/cp.go b/cmd/minikube/cmd/cp.go index f60a0685331e..3bf65500876e 100644 --- a/cmd/minikube/cmd/cp.go +++ b/cmd/minikube/cmd/cp.go @@ -71,7 +71,7 @@ Example Command : "minikube cp a.txt /home/docker/b.txt" + runner = remoteCommandRunner(&co, dst.node) } else if src.node == "" { // if node name not explicitly specified in both of source and target, - // consider target is controlpanel node for backward compatibility. + // consider target is control-plane node for backward compatibility. runner = co.CP.Runner } else { runner = command.NewExecRunner(false) @@ -84,9 +84,6 @@ Example Command : "minikube cp a.txt /home/docker/b.txt" + }, } -func init() { -} - // setDstFileNameFromSrc sets the src filename as dst filename // when the dst file name is not provided and ends with a `/`. // Otherwise this function is a no-op and returns the passed dst. @@ -211,7 +208,7 @@ func validateArgs(src, dst *remotePath) { } // if node name not explicitly specified in both of source and target, - // consider target node is controlpanel for backward compatibility. + // consider target node is control-plane for backward compatibility. if src.node == "" && dst.node == "" && !strings.HasPrefix(dst.path, "/") { exit.Message(reason.Usage, `Target must be an absolute Path. Relative Path is not allowed (example: "minikube:/home/docker/copied.txt")`) } diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index 49da87b9dd11..52090d8d35eb 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -228,8 +228,7 @@ func mustRestartDockerd(name string, runner command.Runner) { // will need to wait for apisever container to come up, this usually takes 5 seconds // verifying apisever using kverify would add code complexity for a rare case. klog.Warningf("waiting to ensure apisever container is up...") - startTime := time.Now() - if err = waitForAPIServerProcess(runner, startTime, time.Second*30); err != nil { + if err = waitForAPIServerProcess(runner, time.Now(), time.Second*30); err != nil { klog.Warningf("apiserver container isn't up, error: %v", err) } } diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index d49e4162f0fb..7e1411160183 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -138,7 +138,7 @@ func shouldSilentFail() bool { api, cc := mustload.Partial(ClusterFlagValue()) - cp, err := config.PrimaryControlPlane(cc) + cp, err := config.ControlPlane(*cc) if err != nil { return false } diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 1e54aec7fb92..73f8fa62694d 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -19,6 +19,7 @@ package cmd import ( "github.com/spf13/cobra" "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" @@ -32,8 +33,9 @@ import ( ) var ( - cp bool - worker bool + cpNode bool + workerNode bool + deleteNodeOnFailure bool ) var nodeAddCmd = &cobra.Command{ @@ -48,20 +50,31 @@ var nodeAddCmd = &cobra.Command{ out.FailureT("none driver does not support multi-node clusters") } - name := node.Name(len(cc.Nodes) + 1) + if cpNode && !config.HA(*cc) { + out.FailureT("Adding a control-plane node to a non-HA cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.") + } + + roles := []string{} + if workerNode { + roles = append(roles, "worker") + } + if cpNode { + roles = append(roles, "control-plane") + } - // for now control-plane feature is not supported - if cp { - out.Step(style.Unsupported, "Adding a control-plane node is not yet supported, setting control-plane flag to false") - cp = false + // calculate appropriate new node name with id following the last existing one + lastID, err := node.ID(cc.Nodes[len(cc.Nodes)-1].Name) + if err != nil { + lastID = len(cc.Nodes) + out.ErrLn("determining last node index (will assume %d): %v", lastID, err) } + name := node.Name(lastID + 1) - out.Step(style.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) - // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. + out.Step(style.Happy, "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}", out.V{"name": name, "cluster": cc.Name, "roles": roles}) n := config.Node{ Name: name, - Worker: worker, - ControlPlane: cp, + Worker: workerNode, + ControlPlane: cpNode, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } @@ -77,7 +90,7 @@ var nodeAddCmd = &cobra.Command{ } register.Reg.SetStep(register.InitialSetup) - if err := node.Add(cc, n, false); err != nil { + if err := node.Add(cc, n, deleteNodeOnFailure); err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, n, nil, err) if err != nil { exit.Error(reason.GuestNodeAdd, "failed to add node", err) @@ -93,10 +106,9 @@ var nodeAddCmd = &cobra.Command{ } func init() { - // TODO(https://github.com/kubernetes/minikube/issues/7366): We should figure out which minikube start flags to actually import - nodeAddCmd.Flags().BoolVar(&cp, "control-plane", false, "This flag is currently unsupported.") - nodeAddCmd.Flags().BoolVar(&worker, "worker", true, "If true, the added node will be marked for work. Defaults to true.") - nodeAddCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") + nodeAddCmd.Flags().BoolVar(&cpNode, "control-plane", false, "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA clusters.") + nodeAddCmd.Flags().BoolVar(&workerNode, "worker", true, "If set, added node will be available as worker. Defaults to true.") + nodeAddCmd.Flags().BoolVar(&deleteNodeOnFailure, "delete-on-failure", false, "If set, delete the current cluster if start fails and try again. Defaults to false.") nodeCmd.AddCommand(nodeAddCmd) } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index d3964a41810c..4e411edd5599 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -56,7 +56,7 @@ var nodeStartCmd = &cobra.Command{ } register.Reg.SetStep(register.InitialSetup) - r, p, m, h, err := node.Provision(cc, n, n.ControlPlane, viper.GetBool(deleteOnFailure)) + r, p, m, h, err := node.Provision(cc, n, viper.GetBool(deleteOnFailure)) if err != nil { exit.Error(reason.GuestNodeProvision, "provisioning host for node", err) } @@ -71,10 +71,8 @@ var nodeStartCmd = &cobra.Command{ ExistingAddons: cc.Addons, } - _, err = node.Start(s, n.ControlPlane) - if err != nil { - _, err := maybeDeleteAndRetry(cmd, *cc, *n, nil, err) - if err != nil { + if _, err = node.Start(s); err != nil { + if _, err := maybeDeleteAndRetry(cmd, *cc, *n, nil, err); err != nil { node.ExitIfFatal(err, false) exit.Error(reason.GuestNodeStart, "failed to start node", err) } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 73509071bfcd..8b9a694d53b7 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -266,19 +266,17 @@ func runStart(cmd *cobra.Command, _ []string) { validateBuiltImageVersion(starter.Runner, ds.Name) - if existing != nil && driver.IsKIC(existing.Driver) { - if viper.GetBool(createMount) { - old := "" - if len(existing.ContainerVolumeMounts) > 0 { - old = existing.ContainerVolumeMounts[0] - } - if mount := viper.GetString(mountString); old != mount { - exit.Message(reason.GuestMountConflict, "Sorry, {{.driver}} does not allow mounts to be changed after container creation (previous mount: '{{.old}}', new mount: '{{.new}})'", out.V{ - "driver": existing.Driver, - "new": mount, - "old": old, - }) - } + if existing != nil && driver.IsKIC(existing.Driver) && viper.GetBool(createMount) { + old := "" + if len(existing.ContainerVolumeMounts) > 0 { + old = existing.ContainerVolumeMounts[0] + } + if mount := viper.GetString(mountString); old != mount { + exit.Message(reason.GuestMountConflict, "Sorry, {{.driver}} does not allow mounts to be changed after container creation (previous mount: '{{.old}}', new mount: '{{.new}})'", out.V{ + "driver": existing.Driver, + "new": mount, + "old": old, + }) } } @@ -377,7 +375,7 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * ssh.SetDefaultClient(ssh.External) } - mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure)) + mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, viper.GetBool(deleteOnFailure)) if err != nil { return node.Starter{}, err } @@ -454,7 +452,8 @@ func imageMatchesBinaryVersion(imageVersion, binaryVersion string) bool { } func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) { - kubeconfig, err := node.Start(starter, true) + // start primary control-plane node + kubeconfig, err := node.Start(starter) if err != nil { kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err) if err != nil { @@ -462,45 +461,44 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. } } + // target total and number of control-plane nodes + numCPNodes := 1 numNodes := viper.GetInt(nodes) if existing != nil { - if numNodes > 1 { - // We ignore the --nodes parameter if we're restarting an existing cluster - out.WarningT(`The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use "minikube node add" to add nodes to an existing cluster.`, out.V{"cluster": existing.Name}) + numCPNodes = 0 + for _, n := range existing.Nodes { + if n.ControlPlane { + numCPNodes++ + } } numNodes = len(existing.Nodes) + } else if viper.GetBool(ha) { + numCPNodes = 3 } - if numNodes > 1 { - if driver.BareMetal(starter.Cfg.Driver) { - exit.Message(reason.DrvUnsupportedMulti, "The none driver is not compatible with multi-node clusters.") + + // apart from starter, add any additional existing or new nodes + for i := 1; i < numNodes; i++ { + var n config.Node + if existing != nil { + n = existing.Nodes[i] } else { - if existing == nil { - for i := 1; i < numNodes; i++ { - nodeName := node.Name(i + 1) - n := config.Node{ - Name: nodeName, - Worker: true, - ControlPlane: false, - KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, - ContainerRuntime: starter.Cfg.KubernetesConfig.ContainerRuntime, - } - out.Ln("") // extra newline for clarity on the command line - err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) - if err != nil { - return nil, errors.Wrap(err, "adding node") - } - } - } else { - for _, n := range existing.Nodes { - if !n.ControlPlane { - err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) - if err != nil { - return nil, errors.Wrap(err, "adding node") - } - } - } + nodeName := node.Name(i + 1) + n = config.Node{ + Name: nodeName, + Port: starter.Cfg.APIServerPort, + KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, + ContainerRuntime: starter.Cfg.KubernetesConfig.ContainerRuntime, + Worker: true, + } + if i < numCPNodes { // starter node is also counted as (primary) cp node + n.ControlPlane = true } } + + out.Ln("") // extra newline for clarity on the command line + if err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)); err != nil { + return nil, errors.Wrap(err, "adding node") + } } pause.RemovePausedFile(starter.Runner) @@ -626,7 +624,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co cc := updateExistingConfigFromFlags(cmd, &existing) var kubeconfig *kubeconfig.Settings for _, n := range cc.Nodes { - r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane, false) + r, p, m, h, err := node.Provision(&cc, &n, false) s := node.Starter{ Runner: r, PreExists: p, @@ -641,7 +639,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co return nil, err } - k, err := node.Start(s, n.ControlPlane) + k, err := node.Start(s) if n.ControlPlane { kubeconfig = k } @@ -793,24 +791,23 @@ func hostDriver(existing *config.ClusterConfig) string { if existing == nil { return "" } + api, err := machine.NewAPIClient() if err != nil { klog.Warningf("selectDriver NewAPIClient: %v", err) return existing.Driver } - cp, err := config.PrimaryControlPlane(existing) + cp, err := config.ControlPlane(*existing) if err != nil { - klog.Warningf("Unable to get control plane from existing config: %v", err) + klog.Errorf("Unable to get primary control-plane node from existing config: %v", err) return existing.Driver } + machineName := config.MachineName(*existing, cp) h, err := api.Load(machineName) if err != nil { - klog.Warningf("api.Load failed for %s: %v", machineName, err) - if existing.VMDriver != "" { - return existing.VMDriver - } + klog.Errorf("api.Load failed for %s: %v", machineName, err) return existing.Driver } @@ -1280,6 +1277,7 @@ func validateFlags(cmd *cobra.Command, drvName string) { if cmd.Flags().Changed(imageRepository) { viper.Set(imageRepository, validateImageRepository(viper.GetString(imageRepository))) } + if cmd.Flags().Changed(ports) { err := validatePorts(viper.GetStringSlice(ports)) if err != nil { @@ -1665,48 +1663,44 @@ func validateInsecureRegistry() { } } -func createNode(cc config.ClusterConfig, existing *config.ClusterConfig) (config.ClusterConfig, config.Node, error) { - // Create the initial node, which will necessarily be a control plane - if existing != nil { - cp, err := config.PrimaryControlPlane(existing) - if err != nil { - return cc, config.Node{}, err - } - cp.KubernetesVersion, err = getKubernetesVersion(&cc) - if err != nil { - klog.Warningf("failed getting Kubernetes version: %v", err) - } - cp.ContainerRuntime = getContainerRuntime(&cc) +// configureNodes creates primary control-plane node config on first cluster start or updates existing cluster nodes configs on restart. +// It will return updated cluster config and primary control-plane node or any error occurred. +func configureNodes(cc config.ClusterConfig, existing *config.ClusterConfig) (config.ClusterConfig, config.Node, error) { + kv, err := getKubernetesVersion(&cc) + if err != nil { + return cc, config.Node{}, errors.Wrapf(err, "failed getting kubernetes version") + } + cr := getContainerRuntime(&cc) - // Make sure that existing nodes honor if KubernetesVersion gets specified on restart - // KubernetesVersion is the only attribute that the user can override in the Node object - nodes := []config.Node{} - for _, n := range existing.Nodes { - n.KubernetesVersion, err = getKubernetesVersion(&cc) - if err != nil { - klog.Warningf("failed getting Kubernetes version: %v", err) - } - n.ContainerRuntime = getContainerRuntime(&cc) - nodes = append(nodes, n) + // create the initial node, which will necessarily be primary control-plane node + if existing == nil { + pcp := config.Node{ + Port: cc.APIServerPort, + KubernetesVersion: kv, + ContainerRuntime: cr, + ControlPlane: true, + Worker: true, } - cc.Nodes = nodes + cc.Nodes = []config.Node{pcp} + return cc, pcp, nil + } - return cc, cp, nil + // Make sure that existing nodes honor if KubernetesVersion gets specified on restart + // KubernetesVersion is the only attribute that the user can override in the Node object + nodes := []config.Node{} + for _, n := range existing.Nodes { + n.KubernetesVersion = kv + n.ContainerRuntime = cr + nodes = append(nodes, n) } + cc.Nodes = nodes - kubeVer, err := getKubernetesVersion(&cc) + pcp, err := config.ControlPlane(*existing) if err != nil { - klog.Warningf("failed getting Kubernetes version: %v", err) + return cc, config.Node{}, errors.Wrapf(err, "failed getting control-plane node") } - cp := config.Node{ - Port: cc.KubernetesConfig.NodePort, - KubernetesVersion: kubeVer, - ContainerRuntime: getContainerRuntime(&cc), - ControlPlane: true, - Worker: true, - } - cc.Nodes = []config.Node{cp} - return cc, cp, nil + + return cc, pcp, nil } // autoSetDriverOptions sets the options needed for specific driver automatically. @@ -1970,6 +1964,10 @@ func validateBareMetal(drvName string) { return } + if viper.GetInt(nodes) > 1 || viper.GetBool(ha) { + exit.Message(reason.DrvUnsupportedMulti, "The none driver is not compatible with multi-node clusters.") + } + if ClusterFlagValue() != constants.DefaultClusterName { exit.Message(reason.DrvUnsupportedProfile, "The '{{.name}} driver does not support multiple profiles: https://minikube.sigs.k8s.io/docs/reference/drivers/none/", out.V{"name": drvName}) } diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 22b1378dd874..bd04a037824f 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -115,6 +115,7 @@ const ( autoUpdate = "auto-update-drivers" hostOnlyNicType = "host-only-nic-type" natNicType = "nat-nic-type" + ha = "ha" nodes = "nodes" preload = "preload" deleteOnFailure = "delete-on-failure" @@ -190,7 +191,8 @@ func initMinikubeFlags() { startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") - startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") + startCmd.Flags().Bool(ha, false, "Create Highly Available Cluster with a minimum of three control-plane nodes that will also be marked for work.") + startCmd.Flags().IntP(nodes, "n", 1, "The total number of nodes to spin up. Defaults to 1.") startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.") startCmd.Flags().Bool(noKubernetes, false, "If set, minikube VM/container will start without starting or configuring Kubernetes. (only works on new clusters)") startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") @@ -301,8 +303,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k cc = updateExistingConfigFromFlags(cmd, existing) // identify appropriate cni then configure cruntime accordingly - _, err := cni.New(&cc) - if err != nil { + if _, err := cni.New(&cc); err != nil { return cc, config.Node{}, errors.Wrap(err, "cni") } } else { @@ -333,7 +334,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k proxy.SetDockerEnv() } - return createNode(cc, existing) + return configureNodes(cc, existing) } func getCPUCount(drvName string) int { @@ -518,6 +519,8 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, rtime str out.WarningT("--network flag is only valid with the docker/podman, KVM and Qemu drivers, it will be ignored") } + validateHANodeCount(cmd) + checkNumaCount(k8sVersion) checkExtraDiskOptions(cmd, drvName) @@ -552,6 +555,7 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, rtime str KVMGPU: viper.GetBool(kvmGPU), KVMHidden: viper.GetBool(kvmHidden), KVMNUMACount: viper.GetInt(kvmNUMACount), + APIServerPort: viper.GetInt(apiServerPort), DisableDriverMounts: viper.GetBool(disableDriverMounts), UUID: viper.GetString(uuid), NoVTXCheck: viper.GetBool(noVTXCheck), @@ -601,9 +605,8 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, rtime str ExtraOptions: getExtraOptions(), ShouldLoadCachedImages: viper.GetBool(cacheImages), CNI: getCNIConfig(cmd), - NodePort: viper.GetInt(apiServerPort), }, - MultiNodeRequested: viper.GetInt(nodes) > 1, + MultiNodeRequested: viper.GetInt(nodes) > 1 || viper.GetBool(ha), AutoPauseInterval: viper.GetDuration(autoPauseInterval), GPUs: viper.GetString(gpus), } @@ -668,6 +671,23 @@ func addFeatureGate(featureGates, s string) string { return strings.Join(split, ",") } +// validateHANodeCount ensures correct total number of nodes in HA cluster. +func validateHANodeCount(cmd *cobra.Command) { + if !viper.GetBool(ha) { + return + } + + // set total number of nodes in ha cluster to 3, if not otherwise defined by user + if !cmd.Flags().Changed(nodes) { + viper.Set(nodes, 3) + } + + // respect user preference, if correct + if cmd.Flags().Changed(nodes) && viper.GetInt(nodes) < 3 { + exit.Message(reason.Usage, "HA clusters require 3 or more control-plane nodes") + } +} + func checkNumaCount(k8sVersion string) { if viper.GetInt(kvmNUMACount) < 1 || viper.GetInt(kvmNUMACount) > 8 { exit.Message(reason.Usage, "--kvm-numa-count range is 1-8") @@ -690,11 +710,6 @@ func upgradeExistingConfig(cmd *cobra.Command, cc *config.ClusterConfig) { return } - if cc.VMDriver != "" && cc.Driver == "" { - klog.Infof("config upgrade: Driver=%s", cc.VMDriver) - cc.Driver = cc.VMDriver - } - if cc.Name == "" { klog.Infof("config upgrade: Name=%s", ClusterFlagValue()) cc.Name = ClusterFlagValue() @@ -717,28 +732,32 @@ func upgradeExistingConfig(cmd *cobra.Command, cc *config.ClusterConfig) { cc.Memory = memInMB } - // pre minikube 1.9.2 cc.KubernetesConfig.NodePort was not populated. - // in minikube config there were two fields for api server port. - // one in cc.KubernetesConfig.NodePort and one in cc.Nodes.Port - // this makes sure api server port not be set as 0! - if cc.KubernetesConfig.NodePort == 0 { - cc.KubernetesConfig.NodePort = viper.GetInt(apiServerPort) - } - if cc.CertExpiration == 0 { cc.CertExpiration = constants.DefaultCertExpiration } - } // updateExistingConfigFromFlags will update the existing config from the flags - used on a second start -// skipping updating existing docker env , docker opt, InsecureRegistry, registryMirror, extra-config, apiserver-ips +// skipping updating existing docker env, docker opt, InsecureRegistry, registryMirror, extra-config, apiserver-ips func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterConfig) config.ClusterConfig { //nolint to suppress cyclomatic complexity 45 of func `updateExistingConfigFromFlags` is high (> 30) - validateFlags(cmd, existing.Driver) cc := *existing + if cmd.Flags().Changed(nodes) { + out.WarningT("You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.") + } + + if cmd.Flags().Changed(ha) { + out.WarningT("Changing the HA mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.") + } + + if cmd.Flags().Changed(apiServerPort) && viper.GetBool(ha) { + out.WarningT("Changing the apiserver port of an existing minikube ha cluster is not currently supported. Please first delete the cluster.") + } else { + updateIntFromFlag(cmd, &cc.APIServerPort, apiServerPort) + } + if cmd.Flags().Changed(memory) && getMemorySize(cmd, cc.Driver) != cc.Memory { out.WarningT("You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.") } @@ -803,7 +822,6 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC updateStringFromFlag(cmd, &cc.KubernetesConfig.NetworkPlugin, networkPlugin) updateStringFromFlag(cmd, &cc.KubernetesConfig.ServiceCIDR, serviceCIDR) updateBoolFromFlag(cmd, &cc.KubernetesConfig.ShouldLoadCachedImages, cacheImages) - updateIntFromFlag(cmd, &cc.KubernetesConfig.NodePort, apiServerPort) updateDurationFromFlag(cmd, &cc.CertExpiration, certExpiration) updateBoolFromFlag(cmd, &cc.Mount, createMount) updateStringFromFlag(cmd, &cc.MountString, mountString) diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index aaee94cbb734..f63a77cd40a8 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -403,18 +403,21 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St if cc.Addons["auto-pause"] { hostname, _, port, err = driver.AutoPauseProxyEndpoint(&cc, &n, host.DriverName) } else { - hostname, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, host.DriverName) + if config.HA(cc) { + hostname = cc.KubernetesConfig.APIServerHAVIP + port = cc.APIServerPort + err = nil // checked below + } else { + hostname, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, host.DriverName) + } } if err != nil { klog.Errorf("forwarded endpoint: %v", err) st.Kubeconfig = Misconfigured - } else { - err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port) - if err != nil && st.Host != state.Starting.String() { - klog.Errorf("kubeconfig endpoint: %v", err) - st.Kubeconfig = Misconfigured - } + } else if err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port, ""); err != nil && st.Host != state.Starting.String() { + klog.Errorf("kubeconfig endpoint: %v", err) + st.Kubeconfig = Misconfigured } sta, err := kverify.APIServerStatus(cr, hostname, port) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index a34449871c23..3b3e1beed779 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -134,7 +134,9 @@ func stopProfile(profile string) int { out.WarningT("Unable to kill mount process: {{.error}}", out.V{"error": err}) } - for _, n := range cc.Nodes { + // stop nodes in reverse order, so last one being primary control-plane node, that will start first next time + for i := len(cc.Nodes) - 1; i >= 0; i-- { + n := cc.Nodes[i] machineName := config.MachineName(*cc, n) nonexistent := stop(api, machineName) diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index c03551d86ca1..3dec0cb6c9b0 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -255,9 +255,9 @@ func EnableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err } defer api.Close() - cp, err := config.PrimaryControlPlane(cc) + cp, err := config.ControlPlane(*cc) if err != nil { - exit.Error(reason.GuestCpConfig, "Error getting primary control plane", err) + exit.Error(reason.GuestCpConfig, "Error getting control-plane node", err) } // maintain backwards compatibility for ingress and ingress-dns addons with k8s < v1.19 @@ -505,7 +505,7 @@ func Enable(wg *sync.WaitGroup, cc *config.ClusterConfig, toEnable map[string]bo klog.Infof("enable addons start: toEnable=%v", toEnable) var enabledAddons []string defer func() { - klog.Infof("enable addons completed in %s: enabled=%v", time.Since(start), enabledAddons) + klog.Infof("duration metric: took %s for enable addons: enabled=%v", time.Since(start), enabledAddons) }() toEnableList := []string{} @@ -610,9 +610,9 @@ func VerifyNotPaused(profile string, enable bool) error { } defer api.Close() - cp, err := config.PrimaryControlPlane(cc) + cp, err := config.ControlPlane(*cc) if err != nil { - return errors.Wrap(err, "control plane") + return errors.Wrap(err, "get control-plane node") } host, err := machine.LoadHost(api, config.MachineName(*cc, cp)) diff --git a/pkg/addons/addons_storage_classes.go b/pkg/addons/addons_storage_classes.go index 949c28228df4..b30f2eb4f306 100644 --- a/pkg/addons/addons_storage_classes.go +++ b/pkg/addons/addons_storage_classes.go @@ -49,12 +49,12 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st } defer api.Close() - cp, err := config.PrimaryControlPlane(cc) - if err != nil { - return errors.Wrap(err, "getting control plane") + pcp, err := config.ControlPlane(*cc) + if err != nil || !config.IsPrimaryControlPlane(pcp) { + return errors.Wrap(err, "get primary control-plane node") } - if !machine.IsRunning(api, config.MachineName(*cc, cp)) { - klog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", config.MachineName(*cc, cp), name, val) + if !machine.IsRunning(api, config.MachineName(*cc, pcp)) { + klog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", config.MachineName(*cc, pcp), name, val) return EnableOrDisableAddon(cc, name, val) } diff --git a/pkg/addons/addons_test.go b/pkg/addons/addons_test.go index 9ea5046e21af..6361421ea7a4 100644 --- a/pkg/addons/addons_test.go +++ b/pkg/addons/addons_test.go @@ -45,6 +45,7 @@ func createTestProfile(t *testing.T) string { CPUs: 2, Memory: 2500, KubernetesConfig: config.KubernetesConfig{}, + Nodes: []config.Node{{ControlPlane: true}}, } if err := config.DefaultLoader.WriteConfigToFile(name, cc); err != nil { @@ -54,7 +55,10 @@ func createTestProfile(t *testing.T) string { } func TestIsAddonAlreadySet(t *testing.T) { - cc := &config.ClusterConfig{Name: "test"} + cc := &config.ClusterConfig{ + Name: "test", + Nodes: []config.Node{{ControlPlane: true}}, + } if err := Set(cc, "registry", "true"); err != nil { t.Errorf("unable to set registry true: %v", err) @@ -70,7 +74,10 @@ func TestIsAddonAlreadySet(t *testing.T) { } func TestDisableUnknownAddon(t *testing.T) { - cc := &config.ClusterConfig{Name: "test"} + cc := &config.ClusterConfig{ + Name: "test", + Nodes: []config.Node{{ControlPlane: true}}, + } if err := Set(cc, "InvalidAddon", "false"); err == nil { t.Fatalf("Disable did not return error for unknown addon") @@ -78,7 +85,10 @@ func TestDisableUnknownAddon(t *testing.T) { } func TestEnableUnknownAddon(t *testing.T) { - cc := &config.ClusterConfig{Name: "test"} + cc := &config.ClusterConfig{ + Name: "test", + Nodes: []config.Node{{ControlPlane: true}}, + } if err := Set(cc, "InvalidAddon", "true"); err == nil { t.Fatalf("Enable did not return error for unknown addon") @@ -124,6 +134,7 @@ func TestStartWithAddonsEnabled(t *testing.T) { CPUs: 2, Memory: 2500, KubernetesConfig: config.KubernetesConfig{}, + Nodes: []config.Node{{ControlPlane: true}}, } toEnable := ToEnable(cc, map[string]bool{}, []string{"dashboard"}) @@ -150,6 +161,7 @@ func TestStartWithAllAddonsDisabled(t *testing.T) { CPUs: 2, Memory: 2500, KubernetesConfig: config.KubernetesConfig{}, + Nodes: []config.Node{{ControlPlane: true}}, } UpdateConfigToDisable(cc) diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index f4dc27c56ddd..9fae7a457c00 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -114,7 +114,7 @@ func (d *Driver) Create() error { ip := gateway.To4() // calculate the container IP based on guessing the machine index index := driver.IndexFromMachineName(d.NodeConfig.MachineName) - if int(ip[3])+index > 255 { + if int(ip[3])+index > 253 { // reserve last client ip address for multi-control-plane loadbalancer vip address in ha cluster return fmt.Errorf("too many machines to calculate an IP") } ip[3] += byte(index) @@ -200,7 +200,7 @@ func (d *Driver) Create() error { } klog.Infof("Unable to extract preloaded tarball to volume: %v", err) } else { - klog.Infof("duration metric: took %f seconds to extract preloaded images to volume", time.Since(t).Seconds()) + klog.Infof("duration metric: took %s to extract preloaded images to volume ...", time.Since(t)) } }() waitForPreload.Wait() diff --git a/pkg/drivers/kvm/network.go b/pkg/drivers/kvm/network.go index f33b02b8f4a6..6b4312c78ba4 100644 --- a/pkg/drivers/kvm/network.go +++ b/pkg/drivers/kvm/network.go @@ -22,6 +22,7 @@ import ( "bytes" "encoding/xml" "fmt" + "net" "text/template" "time" @@ -197,6 +198,12 @@ func (d *Driver) createNetwork() error { log.Debugf("failed to find free subnet for private KVM network %s after %d attempts: %v", d.PrivateNetwork, 20, err) return fmt.Errorf("un-retryable: %w", err) } + + // reserve last client ip address for multi-control-plane loadbalancer vip address in ha cluster + clientMaxIP := net.ParseIP(subnet.ClientMax) + clientMaxIP.To4()[3]-- + subnet.ClientMax = clientMaxIP.String() + // create the XML for the private network from our networkTmpl tryNet := kvmNetwork{ Name: d.PrivateNetwork, @@ -207,12 +214,15 @@ func (d *Driver) createNetwork() error { if err = tmpl.Execute(&networkXML, tryNet); err != nil { return fmt.Errorf("executing private KVM network template: %w", err) } + log.Debugf("created network xml: %s", networkXML.String()) + // define the network using our template var network *libvirt.Network network, err = conn.NetworkDefineXML(networkXML.String()) if err != nil { return fmt.Errorf("defining private KVM network %s %s from xml %s: %w", d.PrivateNetwork, subnet.CIDR, networkXML.String(), err) } + // and finally create & start it log.Debugf("trying to create private KVM network %s %s...", d.PrivateNetwork, subnet.CIDR) if err = network.Create(); err == nil { diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index f441711eb615..cb50b535949d 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -125,7 +125,7 @@ func (d *Driver) GetURL() (string, error) { // GetState returns the state that the host is in (running, stopped, etc) func (d *Driver) GetState() (state.State, error) { - hostname, port, err := kubeconfig.Endpoint(d.BaseDriver.MachineName) + hostname, port, err := kubeconfig.Endpoint(d.BaseDriver.MachineName, "") if err != nil { klog.Warningf("unable to get port: %v", err) port = constants.APIServerPort diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 90163e392c1f..a51ccc13e631 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -35,7 +35,8 @@ type LogOptions struct { // Bootstrapper contains all the methods needed to bootstrap a Kubernetes cluster type Bootstrapper interface { - ApplyNodeLabels(config.ClusterConfig) error + // LabelAndUntaintNode applies minikube labels to node and removes NoSchedule taints from control-plane nodes. + LabelAndUntaintNode(config.ClusterConfig, config.Node) error StartCluster(config.ClusterConfig) error UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error @@ -45,7 +46,8 @@ type Bootstrapper interface { GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(config.ClusterConfig, LogOptions) map[string]string - SetupCerts(config.ClusterConfig, config.Node) error + // SetupCerts gets the generated credentials required to talk to the APIServer. + SetupCerts(config.ClusterConfig, config.Node, cruntime.CommandRunner) error GetAPIServerStatus(string, int) (string, error) } diff --git a/pkg/minikube/bootstrapper/bsutil/extraconfig.go b/pkg/minikube/bootstrapper/bsutil/extraconfig.go index 6c5f13216500..d6f296669271 100644 --- a/pkg/minikube/bootstrapper/bsutil/extraconfig.go +++ b/pkg/minikube/bootstrapper/bsutil/extraconfig.go @@ -169,7 +169,7 @@ func newComponentOptions(opts config.ExtraOptionSlice, version semver.Version, f kubeadmExtraArgs = append(kubeadmExtraArgs, componentOptions{ Component: kubeadmComponentKey, ExtraArgs: extraConfig, - Pairs: optionPairsForComponent(component, version, cp), + Pairs: optionPairsForComponent(component, cp), }) } } @@ -178,9 +178,8 @@ func newComponentOptions(opts config.ExtraOptionSlice, version semver.Version, f } // optionPairsForComponent generates a map of value pairs for a k8s component -func optionPairsForComponent(component string, version semver.Version, cp config.Node) map[string]string { - // For the ktmpl.V1Beta1 users - if component == Apiserver && version.GTE(semver.MustParse("1.14.0-alpha.0")) { +func optionPairsForComponent(component string, cp config.Node) map[string]string { + if component == Apiserver { return map[string]string{ "certSANs": fmt.Sprintf(`["127.0.0.1", "localhost", "%s"]`, cp.IP), } diff --git a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha3.go b/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha3.go deleted file mode 100644 index 217c469c944b..000000000000 --- a/pkg/minikube/bootstrapper/bsutil/ktmpl/v1alpha3.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ktmpl - -import "text/template" - -// V1Alpha3 is for Kubernetes v1.12 -var V1Alpha3 = template.Must(template.New("configTmpl-v1alpha3").Funcs(template.FuncMap{ - "printMapInOrder": printMapInOrder, -}).Parse(`apiVersion: kubeadm.k8s.io/v1alpha3 -kind: InitConfiguration -apiEndpoint: - advertiseAddress: {{.AdvertiseAddress}} - bindPort: {{.APIServerPort}} -bootstrapTokens: - - groups: - - system:bootstrappers:kubeadm:default-node-token - ttl: 24h0m0s - usages: - - signing - - authentication -nodeRegistration: - criSocket: {{if .CRISocket}}{{.CRISocket}}{{else}}/var/run/dockershim.sock{{end}} - name: "{{.NodeName}}" - kubeletExtraArgs: - node-ip: {{.NodeIP}} - taints: [] ---- -apiVersion: kubeadm.k8s.io/v1alpha3 -kind: ClusterConfiguration -{{if .ImageRepository}}imageRepository: {{.ImageRepository}} -{{end}}{{range .ComponentOptions}}{{.Component}}ExtraArgs:{{range $i, $val := printMapInOrder .ExtraArgs ": " }} - {{$val}}{{end}} -{{end -}} -{{if .FeatureArgs}}featureGates: {{range $i, $val := .FeatureArgs}} - {{$i}}: {{$val}}{{end}} -{{end -}} -certificatesDir: {{.CertDir}} -clusterName: {{.ClusterName}} -apiServerCertSANs: ["127.0.0.1", "localhost", "{{.AdvertiseAddress}}"] -controlPlaneEndpoint: {{.ControlPlaneAddress}}:{{.APIServerPort}} -etcd: - local: - dataDir: {{.EtcdDataDir}} -controllerManagerExtraArgs: - allocate-node-cidrs: "true" - leader-elect: "false" -schedulerExtraArgs: - leader-elect: "false" -kubernetesVersion: {{.KubernetesVersion}} -networking: - dnsDomain: {{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}} - podSubnet: "{{ .PodSubnet }}" - serviceSubnet: {{.ServiceCIDR}} ---- -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -cgroupDriver: {{.CgroupDriver}} -clusterDomain: "{{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}" -# disable disk resource management by default -imageGCHighThresholdPercent: 100 -evictionHard: - nodefs.available: "0%" - nodefs.inodesFree: "0%" - imagefs.available: "0%" -failSwapOn: false -staticPodPath: {{.StaticPodPath}} -`)) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index d6b960aca6ba..87a3b51a658e 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -25,6 +25,7 @@ import ( "github.com/blang/semver/v4" "github.com/pkg/errors" "k8s.io/klog/v2" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/ktmpl" "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/config" @@ -37,7 +38,7 @@ import ( // Container runtimes const remoteContainerRuntime = "remote" -// GenerateKubeadmYAML generates the kubeadm.yaml file +// GenerateKubeadmYAML generates the kubeadm.yaml file for primary control-plane node. func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Manager) ([]byte, error) { k8s := cc.KubernetesConfig version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) @@ -52,11 +53,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana } // In case of no port assigned, use default - cp, err := config.PrimaryControlPlane(&cc) - if err != nil { - return nil, errors.Wrap(err, "getting control plane") - } - nodePort := cp.Port + nodePort := n.Port if nodePort <= 0 { nodePort = constants.APIServerPort } @@ -69,7 +66,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana return nil, errors.Wrap(err, "getting cgroup driver") } - componentOpts, err := createExtraComponentConfig(k8s.ExtraOptions, version, componentFeatureArgs, cp) + componentOpts, err := createExtraComponentConfig(k8s.ExtraOptions, version, componentFeatureArgs, n) if err != nil { return nil, errors.Wrap(err, "generating extra component config for kubeadm") } @@ -88,6 +85,15 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana // ref: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration kubeletConfigOpts := kubeletConfigOpts(k8s.ExtraOptions) + // container-runtime-endpoint kubelet flag was deprecated but corresponding containerRuntimeEndpoint kubelet config field is "required" but supported only from k8s v1.27 + // ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options + // ref: https://github.com/kubernetes/kubernetes/issues/118787 + if version.GTE(semver.MustParse("1.27.0")) { + kubeletConfigOpts["containerRuntimeEndpoint"] = k8s.ExtraOptions.Get("container-runtime-endpoint", Kubelet) + if kubeletConfigOpts["containerRuntimeEndpoint"] == "" { + kubeletConfigOpts["containerRuntimeEndpoint"] = r.KubeletOptions()["container-runtime-endpoint"] + } + } // set hairpin mode to hairpin-veth to achieve hairpin NAT, because promiscuous-bridge assumes the existence of a container bridge named cbr0 // ref: https://kubernetes.io/docs/tasks/debug/debug-application/debug-service/#a-pod-fails-to-reach-itself-via-the-service-ip kubeletConfigOpts["hairpinMode"] = k8s.ExtraOptions.Get("hairpin-mode", Kubelet) @@ -156,29 +162,29 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana opts.ServiceCIDR = k8s.ServiceCIDR } - configTmpl := ktmpl.V1Alpha3 - // v1beta1 works in v1.13, but isn't required until v1.14. - if version.GTE(semver.MustParse("1.14.0-alpha.0")) { - configTmpl = ktmpl.V1Beta1 - } + configTmpl := ktmpl.V1Beta1 // v1beta2 isn't required until v1.17. if version.GTE(semver.MustParse("1.17.0")) { configTmpl = ktmpl.V1Beta2 } - // v1beta3 isn't required until v1.23. if version.GTE(semver.MustParse("1.23.0")) { configTmpl = ktmpl.V1Beta3 } + // TODO: support v1beta4 kubeadm config when released - refs: https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta4/ and https://github.com/kubernetes/kubeadm/issues/2890 + if version.GTE(semver.MustParse("1.24.0-alpha.2")) { opts.PrependCriSocketUnix = true } + klog.Infof("kubeadm options: %+v", opts) + b := bytes.Buffer{} if err := configTmpl.Execute(&b, opts); err != nil { return nil, err } klog.Infof("kubeadm config:\n%s\n", b.String()) + return b.Bytes(), nil } diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go index 13dc81d8076f..5b9a30702702 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm_test.go @@ -146,7 +146,18 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) { } for _, version := range versions { for _, tc := range tests { - runtime, err := cruntime.New(cruntime.Config{Type: tc.runtime, Runner: fcr}) + socket := "" + switch tc.runtime { + case constants.Docker: + socket = "/var/run/dockershim.sock" + case constants.CRIO: + socket = "/var/run/crio/crio.sock" + case constants.Containerd: + socket = "/run/containerd/containerd.sock" + default: + socket = "/var/run/dockershim.sock" + } + runtime, err := cruntime.New(cruntime.Config{Type: tc.runtime, Runner: fcr, Socket: socket}) if err != nil { t.Fatalf("runtime: %v", err) } @@ -232,7 +243,18 @@ func TestGenerateKubeadmYAML(t *testing.T) { } for _, version := range versions { for _, tc := range tests { - runtime, err := cruntime.New(cruntime.Config{Type: tc.runtime, Runner: fcr}) + socket := "" + switch tc.runtime { + case constants.Docker: + socket = "/var/run/dockershim.sock" + case constants.CRIO: + socket = "/var/run/crio/crio.sock" + case constants.Containerd: + socket = "/run/containerd/containerd.sock" + default: + socket = "/var/run/dockershim.sock" + } + runtime, err := cruntime.New(cruntime.Config{Type: tc.runtime, Runner: fcr, Socket: socket}) if err != nil { t.Fatalf("runtime: %v", err) } diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 39658b940f7e..e0811107eb42 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -85,6 +85,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = nc.IP } + if _, ok := extraOpts["hostname-override"]; !ok { nodeName := KubeNodeName(mc, nc) extraOpts["hostname-override"] = nodeName @@ -98,6 +99,13 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage } } + // container-runtime-endpoint kubelet flag was deprecated but corresponding containerRuntimeEndpoint kubelet config field is "required" and supported from k8s v1.27 + // ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options + // ref: https://github.com/kubernetes/kubernetes/issues/118787 + if version.GTE(semver.MustParse("1.27.0")) { + kubeletConfigParams = append(kubeletConfigParams, "container-runtime-endpoint") + } + // parses a map of the feature gates for kubelet _, kubeletFeatureArgs, err := parseFeatureArgs(k8s.FeatureGates) if err != nil { diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go b/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go index 919adaf5cb36..17d5172b0346 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/node_ready.go @@ -35,7 +35,7 @@ func WaitNodeCondition(cs *kubernetes.Clientset, name string, condition core.Nod klog.Infof("waiting up to %v for node %q to be %q ...", timeout, name, condition) start := time.Now() defer func() { - klog.Infof("duration metric: took %v waiting for node %q to be %q ...", time.Since(start), name, condition) + klog.Infof("duration metric: took %s for node %q to be %q ...", time.Since(start), name, condition) }() lap := time.Now() @@ -49,11 +49,6 @@ func WaitNodeCondition(cs *kubernetes.Clientset, name string, condition core.Nod klog.Info(reason) return true, nil } - if status == core.ConditionUnknown { - klog.Info(reason) - return false, fmt.Errorf(reason) - } - // reduce log spam if time.Since(lap) > (2 * time.Second) { klog.Info(reason) lap = time.Now() diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go b/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go index c2cc9fd63a0f..1c576e4ff6ee 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/pod_ready.go @@ -78,7 +78,7 @@ func waitPodCondition(cs *kubernetes.Clientset, name, namespace string, conditio klog.Infof("waiting up to %v for pod %q in %q namespace to be %q ...", timeout, name, namespace, condition) start := time.Now() defer func() { - klog.Infof("duration metric: took %v waiting for pod %q in %q namespace to be %q ...", time.Since(start), name, namespace, condition) + klog.Infof("duration metric: took %s for pod %q in %q namespace to be %q ...", time.Since(start), name, namespace, condition) }() lap := time.Now() diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/system_svc.go b/pkg/minikube/bootstrapper/bsutil/kverify/system_svc.go index 352ee7cf9c3e..5322693a1bd9 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/system_svc.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/system_svc.go @@ -53,7 +53,7 @@ func WaitForService(cr command.Runner, svc string, timeout time.Duration) error return fmt.Errorf("not running: %s", err) } - klog.Infof("duration metric: took %s WaitForService to wait for %s.", time.Since(pStart), svc) + klog.Infof("duration metric: took %s WaitForService to wait for %s", time.Since(pStart), svc) return nil diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd-api-port.yaml index 042cefc67732..91456400ce51 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd-api-port.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd-pod-network-cidr.yaml index 1c9e6895bc01..0d36ce5168b0 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd-pod-network-cidr.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd.yaml index 85a483cf6203..d3887911c3a4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/containerd.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/crio-options-gates.yaml index 887c8e0011a1..88a966955115 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/crio-options-gates.yaml @@ -56,6 +56,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/crio/crio.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/crio.yaml index 40719ae985ef..e221977de4fc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/crio.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/crio/crio.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/default.yaml index cd67c01b5426..88c75cf54245 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/default.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/dns.yaml index 03d274dec6f5..cfd7b7a1c0cb 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/dns.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "minikube.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/image-repository.yaml index 27b1151379df..00a9791e6772 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/image-repository.yaml @@ -51,6 +51,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/options.yaml index 7c7c5fe2928c..cca2d609d610 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.27/options.yaml @@ -53,6 +53,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd-api-port.yaml index ff5198290b66..00a1377899e7 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd-api-port.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd-pod-network-cidr.yaml index d6e18ef7b225..1b622c8c6c51 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd-pod-network-cidr.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd.yaml index 10400b54d63f..0bad8314e0ea 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/containerd.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/crio-options-gates.yaml index b60e76ed19cf..ce0ec108a096 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/crio-options-gates.yaml @@ -56,6 +56,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/crio/crio.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/crio.yaml index 69f9403e2712..09f6307c5222 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/crio.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/crio/crio.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/default.yaml index 7bc34ac48c87..e3468a4948e6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/default.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/dns.yaml index cf77babc8642..054feace83ac 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/dns.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "minikube.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/image-repository.yaml index 12776d746895..d7e432a854d1 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/image-repository.yaml @@ -51,6 +51,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/options.yaml index 36edb2135fcf..89c9cbe162c4 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.28/options.yaml @@ -53,6 +53,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd-api-port.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd-api-port.yaml index 75b5ecde2c42..47f371b30101 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd-api-port.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd-api-port.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd-pod-network-cidr.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd-pod-network-cidr.yaml index 1d909f31ea07..9b26c75e7c6c 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd-pod-network-cidr.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd-pod-network-cidr.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd.yaml index 93b3cd3f2b8f..a527a0a5bd7b 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/containerd.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///run/containerd/containerd.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/crio-options-gates.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/crio-options-gates.yaml index 028b1488d082..2a03d33906e6 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/crio-options-gates.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/crio-options-gates.yaml @@ -56,6 +56,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/crio/crio.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/crio.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/crio.yaml index c0f35140234e..725519fa30d1 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/crio.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/crio.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/crio/crio.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/default.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/default.yaml index f1617705fd63..3b0247bc9e97 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/default.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/default.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/dns.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/dns.yaml index 018a33029fdc..ed17755b7744 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/dns.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/dns.yaml @@ -50,6 +50,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "minikube.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/image-repository.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/image-repository.yaml index 2ff0ab48075d..2a3a6b266eac 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/image-repository.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/image-repository.yaml @@ -51,6 +51,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/options.yaml b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/options.yaml index 658319e0235a..f8620d6585bc 100644 --- a/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/options.yaml +++ b/pkg/minikube/bootstrapper/bsutil/testdata/v1.29/options.yaml @@ -53,6 +53,7 @@ authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd +containerRuntimeEndpoint: unix:///var/run/dockershim.sock hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" diff --git a/pkg/minikube/bootstrapper/bsutil/versions.go b/pkg/minikube/bootstrapper/bsutil/versions.go index 3f0a294953c4..d142a0e3ceca 100644 --- a/pkg/minikube/bootstrapper/bsutil/versions.go +++ b/pkg/minikube/bootstrapper/bsutil/versions.go @@ -17,12 +17,10 @@ limitations under the License. package bsutil import ( - "path" "strings" "github.com/blang/semver/v4" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" ) @@ -42,60 +40,14 @@ var versionSpecificOpts = []config.VersionedExtraOption{ config.NewUnversionedOption(Kubelet, "bootstrap-kubeconfig", "/etc/kubernetes/bootstrap-kubelet.conf"), config.NewUnversionedOption(Kubelet, "config", "/var/lib/kubelet/config.yaml"), config.NewUnversionedOption(Kubelet, "kubeconfig", "/etc/kubernetes/kubelet.conf"), - { - Option: config.ExtraOption{ - Component: Kubelet, - Key: "require-kubeconfig", - Value: "true", - }, - LessThanOrEqual: semver.MustParse("1.9.10"), - }, - - { - Option: config.ExtraOption{ - Component: Kubelet, - Key: "allow-privileged", - Value: "true", - }, - LessThanOrEqual: semver.MustParse("1.15.0-alpha.3"), - }, - - // before 1.16.0-beta.2, kubeadm bug did not allow overriding this via config file, so this has - // to be passed in as a kubelet flag. See https://github.com/kubernetes/kubernetes/pull/81903 for more details. - { - Option: config.ExtraOption{ - Component: Kubelet, - Key: "client-ca-file", - Value: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), - }, - LessThanOrEqual: semver.MustParse("1.16.0-beta.1"), - }, - { Option: config.ExtraOption{ Component: Apiserver, Key: "enable-admission-plugins", - Value: strings.Join(util.DefaultLegacyAdmissionControllers, ","), - }, - GreaterThanOrEqual: semver.MustParse("1.11.0-alpha.0"), - LessThanOrEqual: semver.MustParse("1.13.1000"), - }, - { - Option: config.ExtraOption{ - Component: Apiserver, - Key: "enable-admission-plugins", - Value: strings.Join(util.DefaultV114AdmissionControllers, ","), + Value: strings.Join(util.DefaultAdmissionControllers, ","), }, GreaterThanOrEqual: semver.MustParse("1.14.0-alpha.0"), }, - { - Option: config.ExtraOption{ - Component: Kubelet, - Key: "cadvisor-port", - Value: "0", - }, - LessThanOrEqual: semver.MustParse("1.11.1000"), - }, { Option: config.ExtraOption{ Component: ControllerManager, diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 766655a5deb2..ef1eb80dd5b0 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -24,19 +24,20 @@ import ( "net" "os" "os/exec" - "path" "path/filepath" - "sort" + "slices" "strings" "time" "github.com/juju/mutex/v2" "github.com/otiai10/copy" "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/clientcmd/api/latest" "k8s.io/klog/v2" + "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" @@ -50,25 +51,39 @@ import ( "k8s.io/minikube/pkg/util/lock" ) +// sharedCACerts represents minikube Root CA and Proxy Client CA certs and keys shared among profiles. +type sharedCACerts struct { + caCert string + caKey string + proxyCert string + proxyKey string +} + // SetupCerts gets the generated credentials required to talk to the APIServer. -func SetupCerts(cmd command.Runner, k8s config.ClusterConfig, n config.Node) error { +func SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCmd command.Runner, cmd command.Runner) error { localPath := localpath.Profile(k8s.KubernetesConfig.ClusterName) - klog.Infof("Setting up %s for IP: %s\n", localPath, n.IP) + klog.Infof("Setting up %s for IP: %s", localPath, n.IP) - ccs, regen, err := generateSharedCACerts() + sharedCerts, regen, err := generateSharedCACerts() if err != nil { - return errors.Wrap(err, "shared CA certs") + return errors.Wrap(err, "generate shared ca certs") } - xfer, err := generateProfileCerts(k8s, n, ccs, regen) - if err != nil { - return errors.Wrap(err, "profile certs") + xfer := []string{ + sharedCerts.caCert, + sharedCerts.caKey, + sharedCerts.proxyCert, + sharedCerts.proxyKey, } - xfer = append(xfer, ccs.caCert) - xfer = append(xfer, ccs.caKey) - xfer = append(xfer, ccs.proxyCert) - xfer = append(xfer, ccs.proxyKey) + // only generate/renew certs for control-plane nodes or if needs regenating + if n.ControlPlane || regen { + profileCerts, err := generateProfileCerts(k8s, n, sharedCerts, regen) + if err != nil { + return errors.Wrap(err, "generate profile certs") + } + xfer = append(xfer, profileCerts...) + } copyableFiles := []assets.CopyableFile{} defer func() { @@ -79,54 +94,75 @@ func SetupCerts(cmd command.Runner, k8s config.ClusterConfig, n config.Node) err } }() - for _, p := range xfer { - cert := filepath.Base(p) - perms := "0644" - if strings.HasSuffix(cert, ".key") { - perms = "0600" - } - certFile, err := assets.NewFileAsset(p, vmpath.GuestKubernetesCertsDir, cert, perms) + for _, c := range xfer { + certFile, err := assets.NewFileAsset(c, vmpath.GuestKubernetesCertsDir, filepath.Base(c), properPerms(c)) if err != nil { - return errors.Wrapf(err, "key asset %s", cert) + return errors.Wrapf(err, "create cert file asset for %s", c) } copyableFiles = append(copyableFiles, certFile) } caCerts, err := collectCACerts() if err != nil { - return err + return errors.Wrap(err, "collect ca certs") } + for src, dst := range caCerts { - certFile, err := assets.NewFileAsset(src, path.Dir(dst), path.Base(dst), "0644") + // note: these are all public certs, so should be world-readeable + certFile, err := assets.NewFileAsset(src, filepath.Dir(dst), filepath.Base(dst), "0644") if err != nil { - return errors.Wrapf(err, "ca asset %s", src) + return errors.Wrapf(err, "create ca cert file asset for %s", src) } - copyableFiles = append(copyableFiles, certFile) } - kcs := &kubeconfig.Settings{ - ClusterName: n.Name, - ClusterServerAddress: fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprint(n.Port))), - ClientCertificate: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), - ClientKey: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), - CertificateAuthority: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), - ExtensionContext: kubeconfig.NewExtension(), - ExtensionCluster: kubeconfig.NewExtension(), - KeepContext: false, - } - - kubeCfg := api.NewConfig() - err = kubeconfig.PopulateFromSettings(kcs, kubeCfg) - if err != nil { - return errors.Wrap(err, "populating kubeconfig") - } - data, err := runtime.Encode(latest.Codec, kubeCfg) - if err != nil { - return errors.Wrap(err, "encoding kubeconfig") - } - if n.ControlPlane { + // copy essential certs from primary control-plane node to secondaries + // ref: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/#manual-certs + if !config.IsPrimaryControlPlane(n) { + pcpCerts := []struct { + srcDir string + srcFile string + dstFile string + }{ + {vmpath.GuestKubernetesCertsDir, "sa.pub", "sa.pub"}, + {vmpath.GuestKubernetesCertsDir, "sa.key", "sa.key"}, + {vmpath.GuestKubernetesCertsDir, "front-proxy-ca.crt", "front-proxy-ca.crt"}, + {vmpath.GuestKubernetesCertsDir, "front-proxy-ca.key", "front-proxy-ca.key"}, + {vmpath.GuestKubernetesCertsDir + "/etcd", "ca.crt", "etcd-ca.crt"}, + {vmpath.GuestKubernetesCertsDir + "/etcd", "ca.key", "etcd-ca.key"}, + } + for _, c := range pcpCerts { + // get cert from primary control-plane node + f := assets.NewMemoryAsset(nil, c.srcDir, c.srcFile, properPerms(c.dstFile)) + if err := pcpCmd.CopyFrom(f); err != nil { + klog.Errorf("unable to copy %s/%s from primary control-plane to %s in node %q: %v", c.srcDir, c.srcFile, c.dstFile, n.Name, err) + } + // put cert to secondary control-plane node + copyableFiles = append(copyableFiles, f) + } + } + + // generate kubeconfig for control-plane node + kcs := &kubeconfig.Settings{ + ClusterName: n.Name, + ClusterServerAddress: fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprint(n.Port))), + ClientCertificate: filepath.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), + ClientKey: filepath.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), + CertificateAuthority: filepath.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), + ExtensionContext: kubeconfig.NewExtension(), + ExtensionCluster: kubeconfig.NewExtension(), + KeepContext: false, + } + kubeCfg := api.NewConfig() + err = kubeconfig.PopulateFromSettings(kcs, kubeCfg) + if err != nil { + return errors.Wrap(err, "populating kubeconfig") + } + data, err := runtime.Encode(latest.Codec, kubeCfg) + if err != nil { + return errors.Wrap(err, "encoding kubeconfig") + } kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") copyableFiles = append(copyableFiles, kubeCfgFile) } @@ -138,28 +174,23 @@ func SetupCerts(cmd command.Runner, k8s config.ClusterConfig, n config.Node) err } if err := installCertSymlinks(cmd, caCerts); err != nil { - return errors.Wrapf(err, "certificate symlinks") + return errors.Wrap(err, "install cert symlinks") } - if err := generateKubeadmCerts(cmd, k8s); err != nil { - return fmt.Errorf("failed to renew kubeadm certs: %v", err) + if err := renewExpiredKubeadmCerts(cmd, k8s); err != nil { + return errors.Wrap(err, "renew expired kubeadm certs") } + return nil } -// CACerts has cert and key for CA (and Proxy) -type CACerts struct { - caCert string - caKey string - proxyCert string - proxyKey string -} +// generateSharedCACerts generates minikube Root CA and Proxy Client CA certs, but only if missing or expired. +func generateSharedCACerts() (sharedCACerts, bool, error) { + klog.Info("generating shared ca certs ...") -// generateSharedCACerts generates CA certs shared among profiles, but only if missing -func generateSharedCACerts() (CACerts, bool, error) { regenProfileCerts := false globalPath := localpath.MiniPath() - cc := CACerts{ + cc := sharedCACerts{ caCert: localpath.CACert(), caKey: filepath.Join(globalPath, "ca.key"), proxyCert: filepath.Join(globalPath, "proxy-client-ca.crt"), @@ -183,59 +214,64 @@ func generateSharedCACerts() (CACerts, bool, error) { }, } - // create a lock for "ca-certs" to avoid race condition over multiple minikube instances rewriting shared ca certs + // create a lock for "ca-certs" to avoid race condition over multiple minikube instances rewriting ca certs hold := filepath.Join(globalPath, "ca-certs") spec := lock.PathMutexSpec(hold) spec.Timeout = 1 * time.Minute - klog.Infof("acquiring lock for shared ca certs: %+v", spec) + klog.Infof("acquiring lock for ca certs: %+v", spec) releaser, err := mutex.Acquire(spec) if err != nil { - return cc, false, errors.Wrapf(err, "unable to acquire lock for shared ca certs %+v", spec) + return cc, false, errors.Wrapf(err, "acquire lock for ca certs %+v", spec) } defer releaser.Release() for _, ca := range caCertSpecs { if isValid(ca.certPath, ca.keyPath) { - klog.Infof("skipping %s CA generation: %s", ca.subject, ca.keyPath) + klog.Infof("skipping valid %q ca cert: %s", ca.subject, ca.keyPath) continue } regenProfileCerts = true - klog.Infof("generating %s CA: %s", ca.subject, ca.keyPath) + klog.Infof("generating %q ca cert: %s", ca.subject, ca.keyPath) if err := util.GenerateCACert(ca.certPath, ca.keyPath, ca.subject); err != nil { - return cc, false, errors.Wrap(err, "generate ca cert") + return cc, false, errors.Wrapf(err, "generate %q ca cert: %s", ca.subject, ca.keyPath) } } return cc, regenProfileCerts, nil } -// generateProfileCerts generates profile certs for a profile -func generateProfileCerts(cfg config.ClusterConfig, n config.Node, ccs CACerts, regen bool) ([]string, error) { - +// generateProfileCerts generates certs for a profile, but only if missing, expired or needs regenerating. +func generateProfileCerts(cfg config.ClusterConfig, n config.Node, shared sharedCACerts, regen bool) ([]string, error) { // Only generate these certs for the api server if !n.ControlPlane { return []string{}, nil } + klog.Info("generating profile certs ...") + k8s := cfg.KubernetesConfig - profilePath := localpath.Profile(k8s.ClusterName) - serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR) + serviceIP, err := util.ServiceClusterIP(k8s.ServiceCIDR) if err != nil { - return nil, errors.Wrap(err, "getting service cluster ip") + return nil, errors.Wrap(err, "get service cluster ip") } - apiServerIPs := k8s.APIServerIPs - apiServerIPs = append(apiServerIPs, - net.ParseIP(n.IP), serviceIP, net.ParseIP(oci.DefaultBindIPV4), net.ParseIP("10.0.0.1")) + apiServerIPs := append([]net.IP{}, k8s.APIServerIPs...) + apiServerIPs = append(apiServerIPs, serviceIP, net.ParseIP(oci.DefaultBindIPV4), net.ParseIP("10.0.0.1")) + // append ip addresses of all control-plane nodes + for _, n := range config.ControlPlanes(cfg) { + apiServerIPs = append(apiServerIPs, net.ParseIP(n.IP)) + } + if config.HA(cfg) { + apiServerIPs = append(apiServerIPs, net.ParseIP(cfg.KubernetesConfig.APIServerHAVIP)) + } - apiServerNames := k8s.APIServerNames - apiServerNames = append(apiServerNames, k8s.APIServerName, constants.ControlPlaneAlias) + apiServerNames := append([]string{}, k8s.APIServerNames...) + apiServerNames = append(apiServerNames, k8s.APIServerName, constants.ControlPlaneAlias, config.MachineName(cfg, n)) - apiServerAlternateNames := apiServerNames - apiServerAlternateNames = append(apiServerAlternateNames, - util.GetAlternateDNS(k8s.DNSDomain)...) + apiServerAlternateNames := append([]string{}, apiServerNames...) + apiServerAlternateNames = append(apiServerAlternateNames, util.AlternateDNS(k8s.DNSDomain)...) daemonHost := oci.DaemonHost(k8s.ContainerRuntime) if daemonHost != oci.DefaultBindIPV4 { @@ -249,12 +285,15 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, ccs CACerts, } // Generate a hash input for certs that depend on ip/name combinations - hi := []string{} - hi = append(hi, apiServerAlternateNames...) + hi := append([]string{}, apiServerAlternateNames...) for _, ip := range apiServerIPs { hi = append(hi, ip.String()) } - sort.Strings(hi) + // eliminate duplicates in 'hi' + slices.Sort(hi) + hi = slices.Compact(hi) + + profilePath := localpath.Profile(k8s.ClusterName) specs := []struct { certPath string @@ -267,14 +306,14 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, ccs CACerts, caCertPath string caKeyPath string }{ - { // Client cert + { // client cert certPath: localpath.ClientCert(k8s.ClusterName), keyPath: localpath.ClientKey(k8s.ClusterName), subject: "minikube-user", ips: []net.IP{}, alternateNames: []string{}, - caCertPath: ccs.caCert, - caKeyPath: ccs.caKey, + caCertPath: shared.caCert, + caKeyPath: shared.caKey, }, { // apiserver serving cert hash: fmt.Sprintf("%x", sha1.Sum([]byte(strings.Join(hi, "/"))))[0:8], @@ -283,8 +322,8 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, ccs CACerts, subject: "minikube", ips: apiServerIPs, alternateNames: apiServerAlternateNames, - caCertPath: ccs.caCert, - caKeyPath: ccs.caKey, + caCertPath: shared.caCert, + caKeyPath: shared.caKey, }, { // aggregator proxy-client cert certPath: filepath.Join(profilePath, "proxy-client.crt"), @@ -292,8 +331,8 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, ccs CACerts, subject: "aggregator", ips: []net.IP{}, alternateNames: []string{}, - caCertPath: ccs.proxyCert, - caKeyPath: ccs.proxyKey, + caCertPath: shared.proxyCert, + caKeyPath: shared.proxyKey, }, } @@ -312,11 +351,11 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, ccs CACerts, } if !regen && isValid(cp, kp) { - klog.Infof("skipping %s signed cert generation: %s", spec.subject, kp) + klog.Infof("skipping valid signed profile cert regeneration for %q: %s", spec.subject, kp) continue } - klog.Infof("generating %s signed cert: %s", spec.subject, kp) + klog.Infof("generating signed profile cert for %q: %s", spec.subject, kp) if canRead(cp) { os.Remove(cp) } @@ -330,17 +369,17 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, ccs CACerts, cfg.CertExpiration, ) if err != nil { - return xfer, errors.Wrapf(err, "generate signed cert for %q", spec.subject) + return nil, errors.Wrapf(err, "generate signed profile cert for %q", spec.subject) } if spec.hash != "" { klog.Infof("copying %s -> %s", cp, spec.certPath) if err := copy.Copy(cp, spec.certPath); err != nil { - return xfer, errors.Wrap(err, "copy cert") + return nil, errors.Wrap(err, "copy profile cert") } klog.Infof("copying %s -> %s", kp, spec.keyPath) if err := copy.Copy(kp, spec.keyPath); err != nil { - return xfer, errors.Wrap(err, "copy key") + return nil, errors.Wrap(err, "copy profile cert key") } } } @@ -348,9 +387,11 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, ccs CACerts, return xfer, nil } -func generateKubeadmCerts(cmd command.Runner, cc config.ClusterConfig) error { - if _, err := cmd.RunCmd(exec.Command("ls", path.Join(vmpath.GuestPersistentDir, "certs", "etcd"))); err != nil { - klog.Infof("certs directory doesn't exist, likely first start: %v", err) +// renewExpiredKubeadmCerts checks if kubeadm certs already exists and are still valid, then renews them if needed. +// if certs don't exist already (eg, kubeadm hasn't run yet), then checks are skipped. +func renewExpiredKubeadmCerts(cmd command.Runner, cc config.ClusterConfig) error { + if _, err := cmd.RunCmd(exec.Command("stat", filepath.Join(vmpath.GuestPersistentDir, "certs", "apiserver-kubelet-client.crt"))); err != nil { + klog.Infof("'apiserver-kubelet-client' cert doesn't exist, likely first start: %v", err) return nil } @@ -364,7 +405,7 @@ func generateKubeadmCerts(cmd command.Runner, cc config.ClusterConfig) error { certPath = append(certPath, "etcd") } certPath = append(certPath, strings.TrimPrefix(cert, "etcd-")+".crt") - if !isKubeadmCertValid(cmd, path.Join(certPath...)) { + if !isKubeadmCertValid(cmd, filepath.Join(certPath...)) { expiredCerts = true } } @@ -372,10 +413,10 @@ func generateKubeadmCerts(cmd command.Runner, cc config.ClusterConfig) error { return nil } out.WarningT("kubeadm certificates have expired. Generating new ones...") - kubeadmPath := path.Join(vmpath.GuestPersistentDir, "binaries", cc.KubernetesConfig.KubernetesVersion) + kubeadmPath := filepath.Join(vmpath.GuestPersistentDir, "binaries", cc.KubernetesConfig.KubernetesVersion) bashCmd := fmt.Sprintf("sudo env PATH=\"%s:$PATH\" kubeadm certs renew all --config %s", kubeadmPath, constants.KubeadmYamlPath) if _, err := cmd.RunCmd(exec.Command("/bin/bash", "-c", bashCmd)); err != nil { - return fmt.Errorf("failed to renew kubeadm certs: %v", err) + return errors.Wrap(err, "kubeadm certs renew") } return nil } @@ -403,7 +444,9 @@ func isValidPEMCertificate(filePath string) (bool, error) { return false, nil } -// collectCACerts looks up all PEM certificates with .crt or .pem extension in ~/.minikube/certs or ~/.minikube/files/etc/ssl/certs to copy to the host. +// collectCACerts looks up all public pem certificates with .crt or .pem extension +// in ~/.minikube/certs or ~/.minikube/files/etc/ssl/certs +// to copy them to the vmpath.GuestCertAuthDir ("/usr/share/ca-certificates") in host. // minikube root CA is also included but libmachine certificates (ca.pem/cert.pem) are excluded. func collectCACerts() (map[string]string, error) { localPath := localpath.MiniPath() @@ -425,16 +468,14 @@ func collectCACerts() (map[string]string, error) { return nil } - fullPath := filepath.Join(certsDir, hostpath) - ext := strings.ToLower(filepath.Ext(hostpath)) - - if ext == ".crt" || ext == ".pem" { + ext := filepath.Ext(hostpath) + if strings.ToLower(ext) == ".crt" || strings.ToLower(ext) == ".pem" { if info.Size() < 32 { - klog.Warningf("ignoring %s, impossibly tiny %d bytes", fullPath, info.Size()) + klog.Warningf("ignoring %s, impossibly tiny %d bytes", hostpath, info.Size()) return nil } - klog.Infof("found cert: %s (%d bytes)", fullPath, info.Size()) + klog.Infof("found cert: %s (%d bytes)", hostpath, info.Size()) validPem, err := isValidPEMCertificate(hostpath) if err != nil { @@ -444,23 +485,24 @@ func collectCACerts() (map[string]string, error) { if validPem { filename := filepath.Base(hostpath) dst := fmt.Sprintf("%s.%s", strings.TrimSuffix(filename, ext), "pem") - certFiles[hostpath] = path.Join(vmpath.GuestCertAuthDir, dst) + certFiles[hostpath] = filepath.Join(vmpath.GuestCertAuthDir, dst) } } return nil }) if err != nil { - return nil, errors.Wrapf(err, "provisioning: traversal certificates dir %s", certsDir) + return nil, errors.Wrapf(err, "collecting CA certs from %s", certsDir) } - for _, excluded := range []string{"ca.pem", "cert.pem"} { - certFiles[filepath.Join(certsDir, excluded)] = "" + excluded := []string{"ca.pem", "cert.pem"} + for _, e := range excluded { + certFiles[filepath.Join(certsDir, e)] = "" } } - // populates minikube CA - certFiles[filepath.Join(localPath, "ca.crt")] = path.Join(vmpath.GuestCertAuthDir, "minikubeCA.pem") + // include minikube CA + certFiles[localpath.CACert()] = filepath.Join(vmpath.GuestCertAuthDir, "minikubeCA.pem") filtered := map[string]string{} for k, v := range certFiles { @@ -502,8 +544,8 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error { } for _, caCertFile := range caCerts { - dstFilename := path.Base(caCertFile) - certStorePath := path.Join(vmpath.GuestCertStoreDir, dstFilename) + dstFilename := filepath.Base(caCertFile) + certStorePath := filepath.Join(vmpath.GuestCertStoreDir, dstFilename) cmd := fmt.Sprintf("test -s %s && ln -fs %s %s", caCertFile, caCertFile, certStorePath) if _, err := cr.RunCmd(exec.Command("sudo", "/bin/bash", "-c", cmd)); err != nil { @@ -518,7 +560,7 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error { if err != nil { return errors.Wrapf(err, "calculate hash for cacert %s", caCertFile) } - subjectHashLink := path.Join(vmpath.GuestCertStoreDir, fmt.Sprintf("%s.0", subjectHash)) + subjectHashLink := filepath.Join(vmpath.GuestCertStoreDir, fmt.Sprintf("%s.0", subjectHash)) // NOTE: This symlink may exist, but point to a missing file cmd = fmt.Sprintf("test -L %s || ln -fs %s %s", subjectHashLink, certStorePath, subjectHashLink) @@ -540,8 +582,8 @@ func canRead(path string) bool { return true } -// isValid checks a cert/key path and makes sure it's still valid -// if a cert is expired or otherwise invalid, it will be deleted +// isValid checks a cert & key paths exist and are still valid. +// If a cert is expired or otherwise invalid, it will be deleted. func isValid(certPath, keyPath string) bool { if !canRead(keyPath) { return false @@ -589,3 +631,15 @@ func isKubeadmCertValid(cmd command.Runner, certPath string) bool { } return err == nil } + +// properPerms returns proper permissions for given cert file, based on its extension. +func properPerms(cert string) string { + perms := "0644" + + ext := strings.ToLower(filepath.Ext(cert)) + if ext == ".key" || ext == ".pem" { + perms = "0600" + } + + return perms +} diff --git a/pkg/minikube/bootstrapper/certs_test.go b/pkg/minikube/bootstrapper/certs_test.go index 277fbdf7d205..ec70bc0bd231 100644 --- a/pkg/minikube/bootstrapper/certs_test.go +++ b/pkg/minikube/bootstrapper/certs_test.go @@ -61,7 +61,10 @@ func TestSetupCerts(t *testing.T) { f := command.NewFakeCommandRunner() f.SetCommandToOutput(expected) - if err := SetupCerts(f, k8s, config.Node{}); err != nil { + p := command.NewFakeCommandRunner() + p.SetCommandToOutput(map[string]string{}) + + if err := SetupCerts(k8s, config.Node{}, p, f); err != nil { t.Fatalf("Error starting cluster: %v", err) } } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 69761568d153..69bd5b0f3c94 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -48,6 +48,7 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" "k8s.io/minikube/pkg/minikube/bootstrapper/images" + kubevip "k8s.io/minikube/pkg/minikube/cluster/ha/kube-vip" "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" @@ -136,8 +137,8 @@ func (k *Bootstrapper) createCompatSymlinks() error { return nil } -// clearStaleConfigs clears configurations which may have stale IP addresses -func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { +// clearStaleConfigs tries to clear configurations which may have stale IP addresses. +func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) { // These are the files that kubeadm will reject stale versions of paths := []string{ "/etc/kubernetes/admin.conf", @@ -150,16 +151,10 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { rr, err := k.c.RunCmd(exec.Command("sudo", args...)) if err != nil { klog.Infof("config check failed, skipping stale config cleanup: %v", err) - return nil } klog.Infof("found existing configuration files:\n%s\n", rr.Stdout.String()) - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return err - } - - endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.ControlPlaneAlias, strconv.Itoa(cp.Port))) + endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.ControlPlaneAlias, strconv.Itoa(cfg.APIServerPort))) for _, path := range paths { _, err := k.c.RunCmd(exec.Command("sudo", "grep", endpoint, path)) if err != nil { @@ -171,9 +166,9 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { } } } - return nil } +// init initialises primary control-plane using kubeadm. func (k *Bootstrapper) init(cfg config.ClusterConfig) error { version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { @@ -196,11 +191,7 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { "FileAvailable--etc-kubernetes-manifests-etcd.yaml", "Port-10250", // For "none" users who already have a kubelet online "Swap", // For "none" users who have swap configured - } - if version.GE(semver.MustParse("1.13.0")) { - ignore = append(ignore, - "NumCPU", // For "none" users who have too few CPUs - ) + "NumCPU", // For "none" users who have too few CPUs } if version.GE(semver.MustParse("1.20.0")) { ignore = append(ignore, @@ -210,11 +201,6 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { ignore = append(ignore, bsutil.SkipAdditionalPreflights[r.Name()]...) skipSystemVerification := false - // Allow older kubeadm versions to function with newer Docker releases. - if version.LT(semver.MustParse("1.13.0")) { - klog.Infof("ignoring SystemVerification for kubeadm because of old Kubernetes version %v", version) - skipSystemVerification = true - } if driver.BareMetal(cfg.Driver) && r.Name() == "Docker" { if v, err := r.Version(); err == nil && strings.Contains(v, "azure") { klog.Infof("ignoring SystemVerification for kubeadm because of unknown docker version %s", v) @@ -234,9 +220,7 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { ignore = append(ignore, "FileContent--proc-sys-net-bridge-bridge-nf-call-iptables") } - if err := k.clearStaleConfigs(cfg); err != nil { - return errors.Wrap(err, "clearing stale configs") - } + k.clearStaleConfigs(cfg) conf := constants.KubeadmYamlPath ctx, cancel := context.WithTimeout(context.Background(), initTimeoutMinutes*time.Minute) @@ -273,32 +257,34 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { wg.Add(3) go func() { + defer wg.Done() // we need to have cluster role binding before applying overlay to avoid #7428 if err := k.elevateKubeSystemPrivileges(cfg); err != nil { - klog.Errorf("unable to create cluster role binding, some addons might not work: %v", err) + klog.Errorf("unable to create cluster role binding for primary control-plane node, some addons might not work: %v", err) } - wg.Done() }() go func() { - if err := k.applyNodeLabels(cfg); err != nil { - klog.Warningf("unable to apply node labels: %v", err) + defer wg.Done() + if err := k.LabelAndUntaintNode(cfg, config.ControlPlanes(cfg)[0]); err != nil { + klog.Warningf("unable to apply primary control-plane node labels and taints: %v", err) } - wg.Done() }() go func() { + defer wg.Done() if err := bsutil.AdjustResourceLimits(k.c); err != nil { - klog.Warningf("unable to adjust resource limits: %v", err) + klog.Warningf("unable to adjust resource limits for primary control-plane node: %v", err) } - wg.Done() }() wg.Wait() - // Tunnel apiserver to guest, if necessary - if cfg.APIServerPort != 0 { - k.tunnelToAPIServer(cfg) + + // tunnel apiserver to guest + if err := k.tunnelToAPIServer(cfg); err != nil { + klog.Warningf("apiserver tunnel failed: %v", err) } + return nil } @@ -403,7 +389,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { start := time.Now() klog.Infof("StartCluster: %+v", cfg) defer func() { - klog.Infof("StartCluster complete in %s", time.Since(start)) + klog.Infof("duration metric: took %s to StartCluster", time.Since(start)) }() // Before we start, ensure that no paused components are lurking around @@ -412,17 +398,18 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } if err := bsutil.ExistingConfig(k.c); err == nil { - // If the guest already exists and was stopped, re-establish the apiserver tunnel so checks pass - if cfg.APIServerPort != 0 { - k.tunnelToAPIServer(cfg) + // if the guest already exists and was stopped, re-establish the apiserver tunnel so checks pass + if err := k.tunnelToAPIServer(cfg); err != nil { + klog.Warningf("apiserver tunnel failed: %v", err) } + klog.Infof("found existing configuration files, will attempt cluster restart") - rerr := k.restartControlPlane(cfg) - if rerr == nil { + + var rerr error + if rerr := k.restartPrimaryControlPlane(cfg); rerr == nil { return nil } - - out.ErrT(style.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) + out.ErrT(style.Embarrassed, "Unable to restart control-plane node(s), will reset cluster: {{.error}}", out.V{"error": rerr}) if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { klog.Warningf("delete failed: %v", err) } @@ -450,20 +437,27 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { return err } -func (k *Bootstrapper) tunnelToAPIServer(cfg config.ClusterConfig) { +// tunnelToAPIServer creates ssh tunnel between apiserver:port inside control-plane node and host on port 8443. +func (k *Bootstrapper) tunnelToAPIServer(cfg config.ClusterConfig) error { + if cfg.APIServerPort != 0 { + return fmt.Errorf("apiserver port not set") + } + m, err := machine.NewAPIClient() if err != nil { - klog.Warningf("libmachine API failed: %v", err) + return errors.Wrapf(err, "create libmachine api client") } - cp, err := config.PrimaryControlPlane(&cfg) + + cp, err := config.ControlPlane(cfg) if err != nil { - klog.Warningf("finding control plane failed: %v", err) + return errors.Wrapf(err, "get control-plane node") } + args := []string{"-f", "-NTL", fmt.Sprintf("%d:localhost:8443", cfg.APIServerPort)} - err = machine.CreateSSHShell(m, cfg, cp, args, false) - if err != nil { - klog.Warningf("apiserver tunnel failed: %v", err) + if err = machine.CreateSSHShell(m, cfg, cp, args, false); err != nil { + return errors.Wrapf(err, "ssh command") } + return nil } // client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver @@ -496,17 +490,17 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time out.Step(style.HealthCheck, "Verifying Kubernetes components...") // regardless if waiting is set or not, we will make sure kubelet is not stopped // to solve corner cases when a container is hibernated and once coming back kubelet not running. - if err := k.ensureServiceStarted("kubelet"); err != nil { + if err := sysinit.New(k.c).Start("kubelet"); err != nil { klog.Warningf("Couldn't ensure kubelet is started this might cause issues: %v", err) } // TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT - cp, err := config.PrimaryControlPlane(&cfg) + cp, err := config.ControlPlane(cfg) if err != nil { - return errors.Wrap(err, "get primary control plane") + return errors.Wrap(err, "get control-plane node") } hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &cp, cfg.Driver) if err != nil { - return errors.Wrap(err, "get control plane endpoint") + return errors.Wrap(err, "get control-plane endpoint") } client, err := k.client(hostname, port) @@ -578,7 +572,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time } } - klog.Infof("duration metric: took %s to wait for : %+v ...", time.Since(start), cfg.VerifyComponents) + klog.Infof("duration metric: took %s to wait for: %+v", time.Since(start), cfg.VerifyComponents) if err := kverify.NodePressure(client); err != nil { adviseNodePressure(err, cfg.Name, cfg.Driver) @@ -587,92 +581,35 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return nil } -// ensureServiceStarted will start a systemd or init.d service if it is not running. -func (k *Bootstrapper) ensureServiceStarted(svc string) error { - if st := kverify.ServiceStatus(k.c, svc); st != state.Running { - klog.Warningf("surprisingly %q service status was %s!. will try to start it, could be related to this issue https://github.com/kubernetes/minikube/issues/9458", svc, st) - return sysinit.New(k.c).Start(svc) - } - return nil -} - -// needsReconfigure returns whether or not the cluster needs to be reconfigured -func (k *Bootstrapper) needsReconfigure(conf string, hostname string, port int, client *kubernetes.Clientset, version string) bool { - if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil { - klog.Infof("needs reconfigure: configs differ:\n%s", rr.Output()) - return true - } - - // cruntime.Enable() may restart kube-apiserver but does not wait for it to return back - // could take five-ish seconds, so hopefully 10 seconds is sufficient to wait for api server to come back up - apiStatusTimeout := 10 * time.Second - st, err := kverify.WaitForAPIServerStatus(k.c, apiStatusTimeout, hostname, port) - if err != nil { - klog.Infof("needs reconfigure: apiserver error: %v", err) - return true - } - if st != state.Running { - klog.Infof("needs reconfigure: apiserver in state %s", st) - return true - } - - if err := kverify.ExpectAppsRunning(client, kverify.AppsRunningList); err != nil { - klog.Infof("needs reconfigure: %v", err) - return true - } - - if err := kverify.APIServerVersionMatch(client, version); err != nil { - klog.Infof("needs reconfigure: %v", err) - return true - } - - // DANGER: This log message is hard-coded in an integration test! - klog.Infof("The running cluster does not require reconfiguration: %s", hostname) - return false -} - -// restartCluster restarts the Kubernetes cluster configured by kubeadm -func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { - klog.Infof("restartCluster start") +// restartPrimaryControlPlane restarts the kubernetes cluster configured by kubeadm. +func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) error { + klog.Infof("restartPrimaryControlPlane start ...") start := time.Now() defer func() { - klog.Infof("restartCluster took %s", time.Since(start)) + klog.Infof("duration metric: took %s to restartPrimaryControlPlane", time.Since(start)) }() - k8sVersion, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) - if err != nil { - return errors.Wrap(err, "parsing Kubernetes version") - } - - phase := "alpha" - controlPlane := "controlplane" - if k8sVersion.GTE(semver.MustParse("1.13.0")) { - phase = "init" - controlPlane = "control-plane" - } - if err := k.createCompatSymlinks(); err != nil { klog.Errorf("failed to create compat symlinks: %v", err) } - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return errors.Wrap(err, "primary control plane") + pcp, err := config.ControlPlane(cfg) + if err != nil || !config.IsPrimaryControlPlane(pcp) { + return errors.Wrap(err, "get primary control-plane node") } - hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &cp, cfg.Driver) + host, _, port, err := driver.ControlPlaneEndpoint(&cfg, &pcp, cfg.Driver) if err != nil { - return errors.Wrap(err, "control plane") + return errors.Wrap(err, "get primary control-plane endpoint") } // Save the costly tax of reinstalling Kubernetes if the only issue is a missing kube context - _, err = kubeconfig.UpdateEndpoint(cfg.Name, hostname, port, kubeconfig.PathFromEnv(), kubeconfig.NewExtension()) - if err != nil { + if _, err := kubeconfig.UpdateEndpoint(cfg.Name, host, port, kubeconfig.PathFromEnv(), kubeconfig.NewExtension()); err != nil { klog.Warningf("unable to update kubeconfig (cluster will likely require a reset): %v", err) } - client, err := k.client(hostname, port) + client, err := k.client(host, port) if err != nil { return errors.Wrap(err, "getting k8s client") } @@ -680,37 +617,41 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { // If the cluster is running, check if we have any work to do. conf := constants.KubeadmYamlPath - if !k.needsReconfigure(conf, hostname, port, client, cfg.KubernetesConfig.KubernetesVersion) { - klog.Infof("Taking a shortcut, as the cluster seems to be properly configured") - return nil + // check whether or not the cluster needs to be reconfigured + // except for vm driver in non-ha cluster - fallback to old behaviour + if config.HA(cfg) || !driver.IsVM(cfg.Driver) { + rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")) + if err == nil { + // DANGER: This log message is hard-coded in an integration test! + klog.Infof("The running cluster does not require reconfiguration: %s", host) + return nil + } + klog.Infof("detected kubeadm config drift (will reconfigure cluster from new %s):\n%s", conf, rr.Output()) } if err := k.stopKubeSystem(cfg); err != nil { - klog.Warningf("Failed to stop kube-system containers: port conflicts may arise: %v", err) + klog.Warningf("Failed to stop kube-system containers, port conflicts may arise: %v", err) } if err := sysinit.New(k.c).Stop("kubelet"); err != nil { klog.Warningf("Failed to stop kubelet, this might cause upgrade errors: %v", err) } - if err := k.clearStaleConfigs(cfg); err != nil { - return errors.Wrap(err, "clearing stale configs") - } + k.clearStaleConfigs(cfg) if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil { return errors.Wrap(err, "cp") } - baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) + baseCmd := fmt.Sprintf("%s init", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion)) cmds := []string{ fmt.Sprintf("%s phase certs all --config %s", baseCmd, conf), fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, conf), fmt.Sprintf("%s phase kubelet-start --config %s", baseCmd, conf), - fmt.Sprintf("%s phase %s all --config %s", baseCmd, controlPlane, conf), + fmt.Sprintf("%s phase control-plane all --config %s", baseCmd, conf), fmt.Sprintf("%s phase etcd local --config %s", baseCmd, conf), } - klog.Infof("reconfiguring cluster from %s", conf) // Run commands one at a time so that it is easier to root cause failures. for _, c := range cmds { if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)); err != nil { @@ -732,7 +673,7 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { return errors.Wrap(err, "apiserver healthz") } - if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), hostname, port, kconst.DefaultControlPlaneTimeout); err != nil { + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), host, port, kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "apiserver health") } @@ -801,9 +742,20 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { // JoinCluster adds new node to an existing cluster. func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string) error { - // Join the master by specifying its token + // Join the control plane by specifying its token joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, config.MachineName(cc, n)) + if n.ControlPlane { + joinCmd += " --control-plane" + // fix kvm driver where ip address is automatically taken from the "default" network instead from the dedicated network + // avoid error: "error execution phase control-plane-prepare/certs: error creating PKI assets: failed to write or validate certificate "apiserver": certificate apiserver is invalid: x509: certificate is valid for 192.168.39.147, 10.96.0.1, 127.0.0.1, 10.0.0.1, 192.168.39.58, not 192.168.122.21" + // ref: https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/#options + // "If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used." + // "If the node should host a new control plane instance, the port for the API Server to bind to." + joinCmd += " --apiserver-advertise-address=" + n.IP + + " --apiserver-bind-port=" + strconv.Itoa(n.Port) + } + if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)); err != nil { return errors.Wrapf(err, "kubeadm join") } @@ -828,7 +780,7 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) - // avoid "Found multiple CRI sockets, please use --cri-socket to select one: /var/run/dockershim.sock, /var/run/crio/crio.sock" error + // avoid "Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock" error version, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) if err != nil { return "", errors.Wrap(err, "parsing Kubernetes version") @@ -837,7 +789,15 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { if err != nil { klog.Errorf("cruntime: %v", err) } + sp := cr.SocketPath() + // avoid warning/error: + // 'Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. + // Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". + // Please update your configuration!' + if !strings.HasPrefix(sp, "unix://") { + sp = "unix://" + sp + } joinCmd = fmt.Sprintf("%s --cri-socket %s", joinCmd, sp) return joinCmd, nil @@ -903,12 +863,14 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { } // SetupCerts sets up certificates within the cluster. -func (k *Bootstrapper) SetupCerts(k8s config.ClusterConfig, n config.Node) error { - return bootstrapper.SetupCerts(k.c, k8s, n) +func (k *Bootstrapper) SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCmd cruntime.CommandRunner) error { + return bootstrapper.SetupCerts(k8s, n, pcpCmd, k.c) } // UpdateCluster updates the control plane with cluster-level info. func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { + klog.Infof("updating cluster %+v ...", cfg) + images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "kubeadm images") @@ -943,25 +905,22 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { } } - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return errors.Wrap(err, "getting control plane") + pcp, err := config.ControlPlane(cfg) + if err != nil || !config.IsPrimaryControlPlane(pcp) { + return errors.Wrap(err, "get primary control-plane node") } - err = k.UpdateNode(cfg, cp, r) + err = k.UpdateNode(cfg, pcp, r) if err != nil { - return errors.Wrap(err, "updating control plane") + return errors.Wrap(err, "update primary control-plane node") } return nil } -// UpdateNode updates a node. +// UpdateNode updates new or existing node. func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cruntime.Manager) error { - kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, n, r) - if err != nil { - return errors.Wrap(err, "generating kubeadm cfg") - } + klog.Infof("updating node %v ...", n) kubeletCfg, err := bsutil.NewKubeletConfig(cfg, n, r) if err != nil { @@ -975,19 +934,36 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru klog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig) - sm := sysinit.New(k.c) - - if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c, sm, cfg.BinaryMirror); err != nil { - return errors.Wrap(err, "downloading binaries") - } - files := []assets.CopyableFile{ assets.NewMemoryAssetTarget(kubeletCfg, bsutil.KubeletSystemdConfFile, "0644"), assets.NewMemoryAssetTarget(kubeletService, bsutil.KubeletServiceFile, "0644"), } if n.ControlPlane { - files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, constants.KubeadmYamlPath+".new", "0640")) + // for primary control-plane node only, generate kubeadm config based on current params + // on node restart, it will be checked against later if anything needs changing + if config.IsPrimaryControlPlane(n) { + kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, n, r) + if err != nil { + return errors.Wrap(err, "generating kubeadm cfg") + } + files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, constants.KubeadmYamlPath+".new", "0640")) + } + // deploy kube-vip for ha cluster + if config.HA(cfg) { + kubevipCfg, err := kubevip.Configure(cfg) + if err != nil { + klog.Errorf("couldn't generate kube-vip config, this might cause issues (will continue): %v", err) + } else { + files = append(files, assets.NewMemoryAssetTarget(kubevipCfg, path.Join(vmpath.GuestManifestsDir, kubevip.Manifest), "0600")) + } + } + } + + sm := sysinit.New(k.c) + + if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c, sm, cfg.BinaryMirror); err != nil { + return errors.Wrap(err, "downloading binaries") } // Installs compatibility shims for non-systemd environments @@ -1006,13 +982,23 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru return errors.Wrap(err, "resolv.conf") } - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return errors.Wrap(err, "control plane") + // add "control-plane.minikube.internal" dns alias + // note: needs to be called after APIServerHAVIP is set (in startPrimaryControlPlane()) and before kubeadm kicks off + cpIP := cfg.KubernetesConfig.APIServerHAVIP + if !config.HA(cfg) { + cp, err := config.ControlPlane(cfg) + if err != nil { + return errors.Wrap(err, "get control-plane node") + } + cpIP = cp.IP + } + if err := machine.AddHostAlias(k.c, constants.ControlPlaneAlias, net.ParseIP(cpIP)); err != nil { + return errors.Wrap(err, "add control-plane alias") } - if err := machine.AddHostAlias(k.c, constants.ControlPlaneAlias, net.ParseIP(cp.IP)); err != nil { - return errors.Wrap(err, "host alias") + // "ensure" kubelet is started, intentionally non-fatal in case of an error + if err := sysinit.New(k.c).Start("kubelet"); err != nil { + klog.Errorf("Couldn't ensure kubelet is started this might cause issues (will continue): %v", err) } return nil @@ -1040,44 +1026,55 @@ func kubectlPath(cfg config.ClusterConfig) string { return path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl") } -func (k *Bootstrapper) ApplyNodeLabels(cfg config.ClusterConfig) error { - return k.applyNodeLabels(cfg) +func (k *Bootstrapper) LabelAndUntaintNode(cfg config.ClusterConfig, n config.Node) error { + return k.labelAndUntaintNode(cfg, n) } -// applyNodeLabels applies minikube labels to all the nodes -func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { - // time cluster was created. time format is based on ISO 8601 (RFC 3339) +// labelAndUntaintNode applies minikube labels to node and removes NoSchedule taints that might be set to secondary control-plane nodes by default in ha cluster. +func (k *Bootstrapper) labelAndUntaintNode(cfg config.ClusterConfig, n config.Node) error { + // time node was created. time format is based on ISO 8601 (RFC 3339) // converting - and : to _ because of Kubernetes label restriction createdAtLbl := "minikube.k8s.io/updated_at=" + time.Now().Format("2006_01_02T15_04_05_0700") + verLbl := "minikube.k8s.io/version=" + version.GetVersion() commitLbl := "minikube.k8s.io/commit=" + version.GetGitCommitID() - nameLbl := "minikube.k8s.io/name=" + cfg.Name + profileNameLbl := "minikube.k8s.io/name=" + cfg.Name // ensure that "primary" label is applied only to the 1st node in the cluster (used eg for placing ingress there) // this is used to uniquely distinguish that from other nodes in multi-master/multi-control-plane cluster config primaryLbl := "minikube.k8s.io/primary=false" - - // ensure that "primary" label is not removed when apply label to all others nodes - applyToNodes := "-l minikube.k8s.io/primary!=true" - if len(cfg.Nodes) <= 1 { + if config.IsPrimaryControlPlane(n) { primaryLbl = "minikube.k8s.io/primary=true" - applyToNodes = "--all" } ctx, cancel := context.WithTimeout(context.Background(), applyTimeoutSeconds*time.Second) defer cancel() - // example: - // sudo /var/lib/minikube/binaries//kubectl label nodes minikube.k8s.io/version= minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig - cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg), - "label", "nodes", verLbl, commitLbl, nameLbl, createdAtLbl, primaryLbl, applyToNodes, "--overwrite", - fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig"))) + // example: + // sudo /var/lib/minikube/binaries//kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes test-357 minikube.k8s.io/version= minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 + cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg), fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), + "label", "--overwrite", "nodes", config.MachineName(cfg, n), createdAtLbl, verLbl, commitLbl, profileNameLbl, primaryLbl) if _, err := k.c.RunCmd(cmd); err != nil { if ctx.Err() == context.DeadlineExceeded { - return errors.Wrapf(err, "timeout apply labels") + return errors.Wrapf(err, "timeout apply node labels") + } + return errors.Wrapf(err, "apply node labels") + } + + // primary control-plane and worker nodes should be untainted by default + if n.ControlPlane && !config.IsPrimaryControlPlane(n) { + // example: + // sudo /var/lib/minikube/binaries//kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes test-357 node-role.kubernetes.io/control-plane:NoSchedule- + cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg), fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), + "taint", "nodes", config.MachineName(cfg, n), "node-role.kubernetes.io/control-plane:NoSchedule-") + if _, err := k.c.RunCmd(cmd); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return errors.Wrapf(err, "timeout remove node taints") + } + return errors.Wrapf(err, "remove node taints") } - return errors.Wrapf(err, "applying node labels") } + return nil } @@ -1085,7 +1082,7 @@ func (k *Bootstrapper) applyNodeLabels(cfg config.ClusterConfig) error { func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) error { start := time.Now() defer func() { - klog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges.", time.Since(start)) + klog.Infof("duration metric: took %s to wait for elevateKubeSystemPrivileges", time.Since(start)) }() // Allow no more than 5 seconds for creating cluster role bindings diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index 01f675579534..c8a3cd39b7a4 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -54,21 +54,21 @@ func Bootstrapper(api libmachine.API, bootstrapperName string, cc config.Cluster return b, nil } -// ControlPlaneBootstrapper returns the bootstrapper for the cluster's control plane -func ControlPlaneBootstrapper(mAPI libmachine.API, cc *config.ClusterConfig, bootstrapperName string) (bootstrapper.Bootstrapper, command.Runner, error) { - cp, err := config.PrimaryControlPlane(cc) +// ControlPlaneBootstrapper returns a bootstrapper for the first available cluster control-plane node. +func ControlPlaneBootstrapper(mAPI libmachine.API, cc *config.ClusterConfig, bootstrapperName string) (bootstrapper.Bootstrapper, error) { + cp, err := config.ControlPlane(*cc) if err != nil { - return nil, nil, errors.Wrap(err, "getting primary control plane") + return nil, errors.Wrap(err, "get primary control-plane node") } h, err := machine.LoadHost(mAPI, config.MachineName(*cc, cp)) if err != nil { - return nil, nil, errors.Wrap(err, "getting control plane host") + return nil, errors.Wrap(err, "load primary control-plane host") } cpr, err := machine.CommandRunner(h) if err != nil { - return nil, nil, errors.Wrap(err, "getting control plane command runner") + return nil, errors.Wrap(err, "get primary control-plane command runner") } bs, err := Bootstrapper(mAPI, bootstrapperName, *cc, cpr) - return bs, cpr, err + return bs, err } diff --git a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go new file mode 100644 index 000000000000..521a2814a876 --- /dev/null +++ b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go @@ -0,0 +1,119 @@ +/* +Copyright 2023 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubevip + +import ( + "bytes" + "html/template" + + "github.com/pkg/errors" + "k8s.io/klog/v2" + "k8s.io/minikube/pkg/minikube/config" +) + +const Manifest = "kube-vip.yaml" + +// KubeVipTemplate is kube-vip static pod config template +// ref: https://kube-vip.io/docs/installation/static/ +// update: regenerate with: +// +// export KVVERSION=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name") +// docker run --rm ghcr.io/kube-vip/kube-vip:$KVVERSION manifest pod --interface eth0 --address 192.168.42.17 --controlplane --arp --leaderElection +var kubeVipTemplate = template.Must(template.New("kubeletSystemdTemplate").Parse(`apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system +spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "{{ .Port }}" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leasename + value: plndr-cp-lock + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: address + value: {{ .VIP }} + - name: prometheus_server + value: :2112 + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +status: {} +`)) + +// Configure takes last client ip address in cluster nodes network subnet as vip address and generates kube-vip.yaml file. +func Configure(cc config.ClusterConfig) ([]byte, error) { + klog.Info("generating kube-vip config ...") + + params := struct { + VIP string + Port int + }{ + VIP: cc.KubernetesConfig.APIServerHAVIP, + Port: cc.APIServerPort, + } + + b := bytes.Buffer{} + if err := kubeVipTemplate.Execute(&b, params); err != nil { + return nil, errors.Wrapf(err, "parse template") + } + + klog.Infof("kube-vip config:\n%s", b.String()) + + return b.Bytes(), nil +} diff --git a/pkg/minikube/cni/cni.go b/pkg/minikube/cni/cni.go index 0c46aa43067b..bb8b0bb8fc31 100644 --- a/pkg/minikube/cni/cni.go +++ b/pkg/minikube/cni/cni.go @@ -133,7 +133,7 @@ func chooseDefault(cc config.ClusterConfig) Manager { if len(cc.Nodes) > 1 || cc.MultiNodeRequested { // Enables KindNet CNI in master in multi node cluster, This solves the network problem // inside pod for multi node clusters. See https://github.com/kubernetes/minikube/issues/9838. - klog.Infof("%d nodes found, recommending kindnet", len(cc.Nodes)) + klog.Infof("multinode detected (%d nodes found), recommending kindnet", len(cc.Nodes)) return KindNet{cc: cc} } diff --git a/pkg/minikube/cni/kindnet.go b/pkg/minikube/cni/kindnet.go index fa30e14430b1..1046864d84c3 100644 --- a/pkg/minikube/cni/kindnet.go +++ b/pkg/minikube/cni/kindnet.go @@ -179,8 +179,7 @@ func (c KindNet) manifest() (assets.CopyableFile, error) { // Apply enables the CNI func (c KindNet) Apply(r Runner) error { // This is mostly applicable to the 'none' driver - _, err := r.RunCmd(exec.Command("stat", "/opt/cni/bin/portmap")) - if err != nil { + if _, err := r.RunCmd(exec.Command("stat", "/opt/cni/bin/portmap")); err != nil { return errors.Wrap(err, "required 'portmap' CNI plug-in not found") } diff --git a/pkg/minikube/command/ssh_runner.go b/pkg/minikube/command/ssh_runner.go index 6ccd2dfd5fca..20356b172bdf 100644 --- a/pkg/minikube/command/ssh_runner.go +++ b/pkg/minikube/command/ssh_runner.go @@ -383,8 +383,6 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error { // The scpcmd below *should not* return until all data is copied and the // StdinPipe is closed. But let's use errgroup to make it explicit. var g errgroup.Group - var copied int64 - g.Go(func() error { defer w.Close() header := fmt.Sprintf("C%s %d %s\n", f.GetPermissions(), f.GetLength(), f.GetTargetName()) @@ -395,7 +393,7 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error { return nil } - copied, err = io.Copy(w, f) + copied, err := io.Copy(w, f) if err != nil { return errors.Wrap(err, "io.Copy") } diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 422ee448bbe1..b3741af96974 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -244,10 +244,13 @@ func MultiNode(cc ClusterConfig) bool { if len(cc.Nodes) > 1 { return true } + return viper.GetInt("nodes") > 1 +} - if viper.GetInt("nodes") > 1 { +// HA returns true if HA is requested. +func HA(cc ClusterConfig) bool { + if len(ControlPlanes(cc)) > 1 { return true } - - return false + return viper.GetBool("ha") } diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 7c1a46c7161a..11eca01487bd 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -33,6 +33,31 @@ import ( var keywords = []string{"start", "stop", "status", "delete", "config", "open", "profile", "addons", "cache", "logs"} +// ControlPlane returns the first available control-plane node or error, if none found. +func ControlPlane(cc ClusterConfig) (Node, error) { + cps := ControlPlanes(cc) + if len(cps) == 0 { + return Node{}, fmt.Errorf("no control-plane nodes found") + } + return cps[0], nil +} + +// ControlPlanes returns a list of control-plane nodes. +func ControlPlanes(cc ClusterConfig) []Node { + cps := []Node{} + for _, n := range cc.Nodes { + if n.ControlPlane { + cps = append(cps, n) + } + } + return cps +} + +// IsPrimaryControlPlane returns if node is primary control-plane node. +func IsPrimaryControlPlane(node Node) bool { + return node.ControlPlane && node.Name == "" +} + // IsValid checks if the profile has the essential info needed for a profile func (p *Profile) IsValid() bool { if p.Config == nil { @@ -49,39 +74,6 @@ func (p *Profile) IsValid() bool { return true } -// PrimaryControlPlane gets the node specific config for the first created control plane -func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { - for _, n := range cc.Nodes { - if n.ControlPlane { - return n, nil - } - } - - // This config is probably from 1.6 or earlier, let's convert it. - cp := Node{ - Name: cc.KubernetesConfig.NodeName, - IP: cc.KubernetesConfig.NodeIP, - Port: cc.KubernetesConfig.NodePort, - KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, - ContainerRuntime: cc.KubernetesConfig.ContainerRuntime, - ControlPlane: true, - Worker: true, - } - - cc.Nodes = []Node{cp} - - // Remove old style attribute to avoid confusion - cc.KubernetesConfig.NodeName = "" - cc.KubernetesConfig.NodeIP = "" - - err := SaveProfile(viper.GetString(ProfileName), cc) - if err != nil { - return Node{}, err - } - - return cp, nil -} - // ProfileNameValid checks if the profile name is container name and DNS hostname/label friendly. func ProfileNameValid(name string) bool { // RestrictedNamePattern describes the characters allowed to represent a profile's name @@ -331,7 +323,7 @@ func ProfileFolderPath(profile string, miniHome ...string) string { // MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names func MachineName(cc ClusterConfig, n Node) string { // For single node cluster, default to back to old naming - if (len(cc.Nodes) == 1 && cc.Nodes[0].Name == n.Name) || n.ControlPlane { + if (len(cc.Nodes) == 1 && cc.Nodes[0].Name == n.Name) || n.Name == "" { return cc.Name } return fmt.Sprintf("%s-%s", cc.Name, n.Name) diff --git a/pkg/minikube/config/profile_test.go b/pkg/minikube/config/profile_test.go index 5ef6542ff40d..27bc32929a07 100644 --- a/pkg/minikube/config/profile_test.go +++ b/pkg/minikube/config/profile_test.go @@ -17,7 +17,6 @@ limitations under the License. package config import ( - "os" "path/filepath" "testing" @@ -279,46 +278,19 @@ func TestGetPrimaryControlPlane(t *testing.T) { expectedPort int expectedName string }{ - {"old style", "p1", "192.168.64.75", 8443, "minikube"}, {"new style", "p2_newformat", "192.168.59.136", 8443, "m01"}, } for _, tc := range tests { t.Run(tc.description, func(t *testing.T) { - // To save converted config file from old style config at ./testdata/.minikube, - // rather than at env(MINIKUBE_HOME) which depends on test environment - t.Setenv("MINIKUBE_HOME", miniDir) - cc, err := DefaultLoader.LoadConfigFromFile(tc.profile, miniDir) if err != nil { t.Fatalf("Failed to load config for %s", tc.description) } - // temporarily copy the original profile config - originalFilePath := profileFilePath(tc.profile, miniDir) - tempFilePath := filepath.Join(miniDir, "profiles", tc.profile, "config_temp.json") - t.Cleanup(func() { - // reset profile config - err = os.Rename(tempFilePath, originalFilePath) - if err != nil { - t.Fatalf("Failed to move temporal config file (%s) to original file path (%s)", - tempFilePath, originalFilePath) - } - }) - - d, err := os.ReadFile(originalFilePath) - if err != nil { - t.Fatalf("Failed to read config file : %s", originalFilePath) - } - - err = os.WriteFile(tempFilePath, d, 0644) - if err != nil { - t.Fatalf("Failed to write temporal config file : %s", tempFilePath) - } - - // get primary control plane + // get control-plane node viper.Set(ProfileName, tc.profile) - n, err := PrimaryControlPlane(cc) + n, err := ControlPlane(*cc) if err != nil { t.Fatalf("Unexpected error getting primary control plane: %v", err) } diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 245f5c10e7e3..9c350cd0be64 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -41,7 +41,6 @@ type ClusterConfig struct { Memory int CPUs int DiskSize int - VMDriver string // Legacy use only Driver string HyperkitVpnKitSock string // Only used by the Hyperkit driver HyperkitVSockPorts []string // Only used by the Hyperkit driver @@ -116,6 +115,7 @@ type KubernetesConfig struct { KubernetesVersion string ClusterName string Namespace string + APIServerHAVIP string APIServerName string APIServerNames []string APIServerIPs []net.IP @@ -136,11 +136,6 @@ type KubernetesConfig struct { EnableDefaultCNI bool // deprecated in preference to CNI CNI string // CNI to use - - // We need to keep these in the short term for backwards compatibility - NodeIP string - NodePort int - NodeName string } // Node contains information about specific nodes in a cluster diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index fdbb28ae4336..d834718ad0e0 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -544,14 +544,14 @@ func (r *Containerd) Preload(cc config.ClusterConfig) error { if err := r.Runner.Copy(fa); err != nil { return errors.Wrap(err, "copying file") } - klog.Infof("Took %f seconds to copy over tarball", time.Since(t).Seconds()) + klog.Infof("duration metric: took %s to copy over tarball", time.Since(t)) t = time.Now() // extract the tarball to /var in the VM if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xf", dest)); err != nil { return errors.Wrapf(err, "extracting tarball: %s", rr.Output()) } - klog.Infof("Took %f seconds to extract the tarball", time.Since(t).Seconds()) + klog.Infof("duration metric: took %s to extract the tarball", time.Since(t)) // remove the tarball in the VM if err := r.Runner.Remove(fa); err != nil { diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index d23a14afe78b..28008a28bc06 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -441,14 +441,14 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error { if err := r.Runner.Copy(fa); err != nil { return errors.Wrap(err, "copying file") } - klog.Infof("Took %f seconds to copy over tarball", time.Since(t).Seconds()) + klog.Infof("duration metric: took %s to copy over tarball", time.Since(t)) t = time.Now() // extract the tarball to /var in the VM if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xf", dest)); err != nil { return errors.Wrapf(err, "extracting tarball: %s", rr.Output()) } - klog.Infof("Took %f seconds to extract the tarball", time.Since(t).Seconds()) + klog.Infof("duration metric: took %s to extract the tarball", time.Since(t)) // remove the tarball in the VM if err := r.Runner.Remove(fa); err != nil { diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index c4cbead6265d..7783d1180893 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -632,7 +632,7 @@ func (r *Docker) Preload(cc config.ClusterConfig) error { if err := r.Runner.Copy(fa); err != nil { return errors.Wrap(err, "copying file") } - klog.Infof("Took %f seconds to copy over tarball", time.Since(t).Seconds()) + klog.Infof("duration metric: took %s to copy over tarball", time.Since(t)) // extract the tarball to /var in the VM if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xf", dest)); err != nil { diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 6a60e6e4a736..861ed2e79138 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -137,8 +137,7 @@ func PreloadExists(k8sVersion, containerRuntime, driverName string, forcePreload } // If the preload existence is cached, just return that value. - preloadState, ok := preloadStates[k8sVersion][containerRuntime] - if ok { + if preloadState, ok := preloadStates[k8sVersion][containerRuntime]; ok { return preloadState } diff --git a/pkg/minikube/driver/endpoint.go b/pkg/minikube/driver/endpoint.go index bddb207b8570..d5e86a746e57 100644 --- a/pkg/minikube/driver/endpoint.go +++ b/pkg/minikube/driver/endpoint.go @@ -27,13 +27,14 @@ import ( "k8s.io/minikube/pkg/network" ) -// ControlPlaneEndpoint returns the location where callers can reach this cluster +// ControlPlaneEndpoint returns the location where callers can reach this cluster. func ControlPlaneEndpoint(cc *config.ClusterConfig, cp *config.Node, driverName string) (string, net.IP, int, error) { if NeedsPortForward(driverName) { port, err := oci.ForwardedPort(cc.Driver, cc.Name, cp.Port) if err != nil { klog.Warningf("failed to get forwarded control plane port %v", err) } + hostname := oci.DaemonHost(driverName) ips, err := net.LookupIP(hostname) @@ -45,8 +46,11 @@ func ControlPlaneEndpoint(cc *config.ClusterConfig, cp *config.Node, driverName if cc.KubernetesConfig.APIServerName != constants.APIServerName { hostname = cc.KubernetesConfig.APIServerName } - return hostname, ips[0], port, err - } else if IsQEMU(driverName) && network.IsBuiltinQEMU(cc.Network) { + + return hostname, ips[0], port, nil + } + + if IsQEMU(driverName) && network.IsBuiltinQEMU(cc.Network) { return "localhost", net.IPv4(127, 0, 0, 1), cc.APIServerPort, nil } diff --git a/pkg/minikube/kubeconfig/kubeconfig.go b/pkg/minikube/kubeconfig/kubeconfig.go index 55ea43d1b685..29a9419f0f6b 100644 --- a/pkg/minikube/kubeconfig/kubeconfig.go +++ b/pkg/minikube/kubeconfig/kubeconfig.go @@ -35,211 +35,165 @@ import ( "k8s.io/minikube/pkg/util/lock" ) -// VerifyEndpoint verifies the IP:port stored in kubeconfig. -func VerifyEndpoint(contextName string, hostname string, port int, configPath ...string) error { - path := PathFromEnv() - if configPath != nil { - path = configPath[0] +// UpdateEndpoint overwrites the IP stored in kubeconfig with the provided IP. +// It will also fix missing cluster or context in kubeconfig, if needed. +// Returns if the change was made and any error occurred. +func UpdateEndpoint(contextName string, host string, port int, configPath string, ext *Extension) (bool, error) { + if host == "" { + return false, fmt.Errorf("empty host") } - if hostname == "" { - return fmt.Errorf("empty IP") + if err := VerifyEndpoint(contextName, host, port, configPath); err != nil { + klog.Infof("verify endpoint returned: %v", err) } - gotHostname, gotPort, err := Endpoint(contextName, path) + cfg, err := readOrNew(configPath) if err != nil { - return errors.Wrap(err, "extract IP") - } - - if hostname != gotHostname || port != gotPort { - return fmt.Errorf("got: %s:%d, want: %s:%d", gotHostname, gotPort, hostname, port) + return false, errors.Wrap(err, "get kubeconfig") } - return nil -} + address := "https://" + host + ":" + strconv.Itoa(port) -// PathFromEnv gets the path to the first kubeconfig -func PathFromEnv() string { - kubeConfigEnv := os.Getenv(constants.KubeconfigEnvVar) - if kubeConfigEnv == "" { - return constants.KubeconfigPath - } - kubeConfigFiles := filepath.SplitList(kubeConfigEnv) - for _, kubeConfigFile := range kubeConfigFiles { - if kubeConfigFile != "" { - return kubeConfigFile - } - klog.Infof("Ignoring empty entry in %s env var", constants.KubeconfigEnvVar) + // check & fix kubeconfig if the cluster or context setting is missing, or server address needs updating + errs := configIssues(cfg, contextName, address) + if errs == nil { + return false, nil } - return constants.KubeconfigPath -} + klog.Infof("%s needs updating (will repair): %v", configPath, errs) -// Endpoint returns the IP:port address stored for minikube in the kubeconfig specified -func Endpoint(contextName string, configPath ...string) (string, int, error) { - path := PathFromEnv() - if configPath != nil { - path = configPath[0] - } - apiCfg, err := readOrNew(path) - if err != nil { - return "", 0, errors.Wrap(err, "read") - } - cluster, ok := apiCfg.Clusters[contextName] - if !ok { - return "", 0, errors.Errorf("%q does not appear in %s", contextName, path) + kcs := &Settings{ + ClusterName: contextName, + ClusterServerAddress: address, + KeepContext: false, } - klog.Infof("found %q server: %q", contextName, cluster.Server) - u, err := url.Parse(cluster.Server) - if err != nil { - return "", 0, errors.Wrap(err, "url parse") + populateCerts(kcs, *cfg, contextName) + + if ext != nil { + kcs.ExtensionCluster = ext + } + if err = PopulateFromSettings(kcs, cfg); err != nil { + return false, errors.Wrap(err, "populate kubeconfig") } - port, err := strconv.Atoi(u.Port()) + err = writeToFile(cfg, configPath) if err != nil { - return "", 0, errors.Wrap(err, "atoi") + return false, errors.Wrap(err, "write kubeconfig") } - return u.Hostname(), port, nil + return true, nil } -// verifyKubeconfig verifies that the cluster and context entries in the kubeconfig are valid -func verifyKubeconfig(contextName string, hostname string, port int, configPath ...string) error { - if err := VerifyEndpoint(contextName, hostname, port, configPath...); err != nil { - return err +// VerifyEndpoint verifies the host:port stored in kubeconfig. +func VerifyEndpoint(contextName string, host string, port int, configPath string) error { + if host == "" { + return fmt.Errorf("empty host") } - path := PathFromEnv() - if configPath != nil { - path = configPath[0] + + if configPath == "" { + configPath = PathFromEnv() } - apiCfg, err := readOrNew(path) + + gotHost, gotPort, err := Endpoint(contextName, configPath) if err != nil { - return errors.Wrap(err, "read") + return errors.Wrap(err, "get endpoint") } - if _, ok := apiCfg.Contexts[contextName]; !ok { - return errors.Errorf("%q does not appear in %s", contextName, path) + + if host != gotHost || port != gotPort { + return fmt.Errorf("got: %s:%d, want: %s:%d", gotHost, gotPort, host, port) } + return nil } -// UpdateEndpoint overwrites the IP stored in kubeconfig with the provided IP. -func UpdateEndpoint(contextName string, hostname string, port int, confpath string, ext *Extension) (bool, error) { - if hostname == "" { - return false, fmt.Errorf("empty ip") +// Endpoint returns the IP:port address stored for minikube in the kubeconfig specified. +func Endpoint(contextName string, configPath string) (string, int, error) { + if configPath == "" { + configPath = PathFromEnv() } - err := verifyKubeconfig(contextName, hostname, port, confpath) - if err == nil { - return false, nil + apiCfg, err := readOrNew(configPath) + if err != nil { + return "", 0, errors.Wrap(err, "read kubeconfig") + } + + cluster, ok := apiCfg.Clusters[contextName] + if !ok { + return "", 0, errors.Errorf("%q does not appear in %s", contextName, configPath) } - klog.Infof("verify returned: %v", err) - cfg, err := readOrNew(confpath) + klog.Infof("found %q server: %q", contextName, cluster.Server) + u, err := url.Parse(cluster.Server) if err != nil { - return false, errors.Wrap(err, "read") - } - - address := "https://" + hostname + ":" + strconv.Itoa(port) - - // if the cluster or context setting is missing in the kubeconfig, create it - if configNeedsRepair(contextName, cfg) { - klog.Infof("%q context is missing from %s - will repair!", contextName, confpath) - lp := localpath.Profile(contextName) - gp := localpath.MiniPath() - kcs := &Settings{ - ClusterName: contextName, - ClusterServerAddress: address, - ClientCertificate: path.Join(lp, "client.crt"), - ClientKey: path.Join(lp, "client.key"), - CertificateAuthority: path.Join(gp, "ca.crt"), - KeepContext: false, - } - if ext != nil { - kcs.ExtensionCluster = ext - } - err = PopulateFromSettings(kcs, cfg) - if err != nil { - return false, errors.Wrap(err, "populating kubeconfig") - } - } else { - cfg.Clusters[contextName].Server = address + return "", 0, errors.Wrap(err, "url parse") } - err = writeToFile(cfg, confpath) + port, err := strconv.Atoi(u.Port()) if err != nil { - return false, errors.Wrap(err, "write") + return "", 0, errors.Wrap(err, "atoi") } - return true, nil + return u.Hostname(), port, nil } -func configNeedsRepair(contextName string, cfg *api.Config) bool { +// configIssues returns list of errors found in kubeconfig for given contextName and server address. +func configIssues(cfg *api.Config, contextName string, address string) []error { + errs := []error{} if _, ok := cfg.Clusters[contextName]; !ok { - return true + errs = append(errs, errors.Errorf("kubeconfig missing %q cluster setting", contextName)) + } else if cfg.Clusters[contextName].Server != address { + errs = append(errs, errors.Errorf("kubeconfig needs server address update")) } + if _, ok := cfg.Contexts[contextName]; !ok { - return true + errs = append(errs, errors.Errorf("kubeconfig missing %q context setting", contextName)) } - return false -} -// writeToFile encodes the configuration and writes it to the given file. -// If the file exists, it's contents will be overwritten. -func writeToFile(config runtime.Object, configPath ...string) error { - fPath := PathFromEnv() - if configPath != nil { - fPath = configPath[0] + if len(errs) > 0 { + return errs } + return nil +} - if config == nil { - klog.Errorf("could not write to '%s': config can't be nil", fPath) - } +// populateCerts retains certs already defined in kubeconfig or sets default ones for those missing. +func populateCerts(kcs *Settings, cfg api.Config, contextName string) { + lp := localpath.Profile(contextName) + gp := localpath.MiniPath() - // encode config to YAML - data, err := runtime.Encode(latest.Codec, config) - if err != nil { - return errors.Errorf("could not write to '%s': failed to encode config: %v", fPath, err) + kcs.CertificateAuthority = path.Join(gp, "ca.crt") + if cluster, ok := cfg.Clusters[contextName]; ok { + kcs.CertificateAuthority = cluster.CertificateAuthority } - // create parent dir if doesn't exist - dir := filepath.Dir(fPath) - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err = os.MkdirAll(dir, 0755); err != nil { - return errors.Wrapf(err, "Error creating directory: %s", dir) + kcs.ClientCertificate = path.Join(lp, "client.crt") + kcs.ClientKey = path.Join(lp, "client.key") + if context, ok := cfg.Contexts[contextName]; ok { + if user, ok := cfg.AuthInfos[context.AuthInfo]; ok { + kcs.ClientCertificate = user.ClientCertificate + kcs.ClientKey = user.ClientKey } } - - // write with restricted permissions - if err := lock.WriteFile(fPath, data, 0600); err != nil { - return errors.Wrapf(err, "Error writing file %s", fPath) - } - - if err := pkgutil.MaybeChownDirRecursiveToMinikubeUser(dir); err != nil { - return errors.Wrapf(err, "Error recursively changing ownership for dir: %s", dir) - } - - return nil } // readOrNew retrieves Kubernetes client configuration from a file. // If no files exists, an empty configuration is returned. -func readOrNew(configPath ...string) (*api.Config, error) { - fPath := PathFromEnv() - if configPath != nil { - fPath = configPath[0] +func readOrNew(configPath string) (*api.Config, error) { + if configPath == "" { + configPath = PathFromEnv() } - data, err := os.ReadFile(fPath) + data, err := os.ReadFile(configPath) if os.IsNotExist(err) { return api.NewConfig(), nil - } else if err != nil { - return nil, errors.Wrapf(err, "Error reading file %q", fPath) + } + if err != nil { + return nil, errors.Wrapf(err, "read kubeconfig from %q", configPath) } // decode config, empty if no bytes kcfg, err := decode(data) if err != nil { - return nil, errors.Errorf("could not read config: %v", err) + return nil, errors.Wrapf(err, "decode kubeconfig from %q", configPath) } // initialize nil maps @@ -266,8 +220,61 @@ func decode(data []byte) (*api.Config, error) { kcfg, _, err := latest.Codec.Decode(data, nil, nil) if err != nil { - return nil, errors.Wrapf(err, "Error decoding config from data: %s", string(data)) + return nil, errors.Wrapf(err, "decode data: %s", string(data)) } return kcfg.(*api.Config), nil } + +// writeToFile encodes the configuration and writes it to the given file. +// If the file exists, it's contents will be overwritten. +func writeToFile(config runtime.Object, configPath string) error { + if configPath == "" { + configPath = PathFromEnv() + } + + if config == nil { + klog.Errorf("could not write to '%s': config can't be nil", configPath) + } + + // encode config to YAML + data, err := runtime.Encode(latest.Codec, config) + if err != nil { + return errors.Errorf("could not write to '%s': failed to encode config: %v", configPath, err) + } + + // create parent dir if doesn't exist + dir := filepath.Dir(configPath) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return errors.Wrapf(err, "Error creating directory: %s", dir) + } + } + + // write with restricted permissions + if err := lock.WriteFile(configPath, data, 0600); err != nil { + return errors.Wrapf(err, "Error writing file %s", configPath) + } + + if err := pkgutil.MaybeChownDirRecursiveToMinikubeUser(dir); err != nil { + return errors.Wrapf(err, "Error recursively changing ownership for dir: %s", dir) + } + + return nil +} + +// PathFromEnv gets the path to the first kubeconfig +func PathFromEnv() string { + kubeConfigEnv := os.Getenv(constants.KubeconfigEnvVar) + if kubeConfigEnv == "" { + return constants.KubeconfigPath + } + kubeConfigFiles := filepath.SplitList(kubeConfigEnv) + for _, kubeConfigFile := range kubeConfigFiles { + if kubeConfigFile != "" { + return kubeConfigFile + } + klog.Infof("Ignoring empty entry in %s env var", constants.KubeconfigEnvVar) + } + return constants.KubeconfigPath +} diff --git a/pkg/minikube/machine/build_images.go b/pkg/minikube/machine/build_images.go index 637c15b5edd9..f89454121174 100644 --- a/pkg/minikube/machine/build_images.go +++ b/pkg/minikube/machine/build_images.go @@ -70,7 +70,7 @@ func BuildImage(path string, file string, tag string, push bool, env []string, o continue } - cp, err := config.PrimaryControlPlane(p.Config) + cp, err := config.ControlPlane(*p.Config) if err != nil { return err } @@ -79,7 +79,7 @@ func BuildImage(path string, file string, tag string, push bool, env []string, o m := config.MachineName(*c, n) if !allNodes { - // build images on the primary control plane node by default + // build images on the control-plane node by default if nodeName == "" && n != cp { continue } else if nodeName != n.Name && nodeName != m { diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index a2d757a79b15..862bd918c317 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -85,11 +85,11 @@ func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, images [] return nil } - klog.Infof("LoadImages start: %s", images) + klog.Infof("LoadCachedImages start: %s", images) start := time.Now() defer func() { - klog.Infof("LoadImages completed in %s", time.Since(start)) + klog.Infof("duration metric: took %s to LoadCachedImages", time.Since(start)) }() var g errgroup.Group @@ -338,11 +338,11 @@ func removeExistingImage(r cruntime.Manager, src string, imgName string) error { // SaveCachedImages saves from the container runtime to the cache func SaveCachedImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string) error { - klog.Infof("SaveImages start: %s", images) + klog.Infof("SaveCachedImages start: %s", images) start := time.Now() defer func() { - klog.Infof("SaveImages completed in %s", time.Since(start)) + klog.Infof("duration metric: took %s to SaveCachedImages", time.Since(start)) }() var g errgroup.Group @@ -509,11 +509,11 @@ func transferAndSaveImage(cr command.Runner, k8s config.KubernetesConfig, dst st // pullImages pulls images to the container run time func pullImages(cruntime cruntime.Manager, images []string) error { - klog.Infof("PullImages start: %s", images) + klog.Infof("pullImages start: %s", images) start := time.Now() defer func() { - klog.Infof("PullImages completed in %s", time.Since(start)) + klog.Infof("duration metric: took %s to pullImages", time.Since(start)) }() var g errgroup.Group @@ -590,11 +590,11 @@ func PullImages(images []string, profile *config.Profile) error { // removeImages removes images from the container run time func removeImages(cruntime cruntime.Manager, images []string) error { - klog.Infof("RemovingImages start: %s", images) + klog.Infof("removeImages start: %s", images) start := time.Now() defer func() { - klog.Infof("RemovingImages completed in %s", time.Since(start)) + klog.Infof("duration metric: took %s to removeImages", time.Since(start)) }() var g errgroup.Group @@ -894,11 +894,11 @@ func TagImage(profile *config.Profile, source string, target string) error { // pushImages pushes images from the container run time func pushImages(cruntime cruntime.Manager, images []string) error { - klog.Infof("PushImages start: %s", images) + klog.Infof("pushImages start: %s", images) start := time.Now() defer func() { - klog.Infof("PushImages completed in %s", time.Since(start)) + klog.Infof("duration metric: took %s to pushImages", time.Since(start)) }() var g errgroup.Group diff --git a/pkg/minikube/machine/client.go b/pkg/minikube/machine/client.go index 29f50c0ba3f1..ff959365da42 100644 --- a/pkg/minikube/machine/client.go +++ b/pkg/minikube/machine/client.go @@ -168,7 +168,7 @@ func (api *LocalClient) Create(h *host.Host) error { klog.Infof("LocalClient.Create starting") start := time.Now() defer func() { - klog.Infof("LocalClient.Create took %s", time.Since(start)) + klog.Infof("duration metric: took %s to LocalClient.Create", time.Since(start)) }() def := registry.Driver(h.DriverName) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 5c106aa6e114..27610458448c 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -53,7 +53,7 @@ func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*hos start := time.Now() klog.Infof("fixHost starting: %s", n.Name) defer func() { - klog.Infof("fixHost completed within %s", time.Since(start)) + klog.Infof("duration metric: took %s for fixHost", time.Since(start)) }() h, err := api.Load(config.MachineName(*cc, *n)) @@ -90,6 +90,16 @@ func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*hos return h, errors.Wrap(err, "post-start") } + // on vm node restart and for ha topology only (for now), + // we deliberately aim to restore backed up machine config early, + // so that remaining code logic can amend files as needed, + // it's intentionally non-fatal in case of any error + if driver.IsVM(h.DriverName) && config.HA(*cc) { + if err := restore(*h); err != nil { + klog.Warningf("cannot read backup folder, skipping restore: %v", err) + } + } + return h, nil } diff --git a/pkg/minikube/machine/machine.go b/pkg/minikube/machine/machine.go index 9f96600fa35a..8ee223c934a0 100644 --- a/pkg/minikube/machine/machine.go +++ b/pkg/minikube/machine/machine.go @@ -17,6 +17,10 @@ limitations under the License. package machine import ( + "fmt" + "os/exec" + "path/filepath" + "strings" "time" "github.com/docker/machine/libmachine" @@ -26,7 +30,9 @@ import ( "k8s.io/klog/v2" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/provision" + "k8s.io/minikube/pkg/util/retry" ) // Machine contains information about a machine @@ -85,16 +91,31 @@ func LoadMachine(name string) (*Machine, error) { // provisionDockerMachine provides fast provisioning of a docker machine func provisionDockerMachine(h *host.Host) error { - klog.Infof("provisioning docker machine ...") + klog.Infof("provisionDockerMachine start ...") start := time.Now() defer func() { - klog.Infof("provisioned docker machine in %s", time.Since(start)) + klog.Infof("duration metric: took %s to provisionDockerMachine", time.Since(start)) }() p, err := fastDetectProvisioner(h) if err != nil { return errors.Wrap(err, "fast detect") } + + // avoid costly need to stop/power off/delete and then re-create docker machine due to the un-ready ssh server and hence errors like: + // 'error starting host: creating host: create: provisioning: ssh command error: command : sudo hostname minikube-m02 && echo "minikube-m02" | sudo tee /etc/hostname; err: exit status 255' + // so retry only on "exit status 255" ssh error and fall through in all other cases + trySSH := func() error { + if _, err := h.RunSSHCommand("hostname"); err != nil && strings.Contains(err.Error(), "exit status 255") { + klog.Warning("ssh server returned retryable error (will retry)") + return err + } + return nil + } + if err := retry.Expo(trySSH, 100*time.Millisecond, 5*time.Second); err != nil { + klog.Errorf("ssh server returned non-retryable error (will continue): %v", err) + } + return p.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions) } @@ -128,3 +149,66 @@ func saveHost(api libmachine.API, h *host.Host, cfg *config.ClusterConfig, n *co n.IP = ip return config.SaveNode(cfg, n) } + +// backup copies critical ephemeral vm config files from tmpfs to persistent storage under /var/lib/minikube/backup, +// preserving same perms as original files/folders, from where they can be restored on next start, +// and returns any error occurred. +func backup(h host.Host, files []string) error { + klog.Infof("backing up vm config to %s: %v", vmpath.GuestBackupDir, files) + + r, err := CommandRunner(&h) + if err != nil { + return errors.Wrap(err, "command runner") + } + + // ensure target dir exists + if _, err := r.RunCmd(exec.Command("sudo", "mkdir", "-p", vmpath.GuestBackupDir)); err != nil { + return errors.Wrapf(err, "create dir") + } + + errs := []error{} + for _, src := range []string{"/etc/cni", "/etc/kubernetes"} { + if _, err := r.RunCmd(exec.Command("sudo", "cp", "--archive", "--parents", "--force", src, vmpath.GuestBackupDir)); err != nil { + errs = append(errs, errors.Errorf("failed to copy %q to %q (will continue): %v", src, vmpath.GuestBackupDir, err)) + } + } + if len(errs) > 0 { + return errors.Errorf(fmt.Sprintf("%v", errs)) + } + return nil +} + +// restore copies back everything from backup folder using relative paths as their absolute restore locations, +// eg, "/var/lib/minikube/backup/etc/kubernetes" will be restored to "/etc/kubernetes", +// preserving same perms as original files/folders, +// files that were updated since last backup should not be overwritten, +func restore(h host.Host) error { + r, err := CommandRunner(&h) + if err != nil { + return errors.Wrap(err, "command runner") + } + + // check first if we have anything to restore + out, err := r.RunCmd(exec.Command("sudo", "ls", "--almost-all", "-1", vmpath.GuestBackupDir)) + if err != nil { + return errors.Wrapf(err, "read dir") + } + files := strings.Split(strings.TrimSpace(out.Stdout.String()), "\n") + + klog.Infof("restoring vm config from %s: %v", vmpath.GuestBackupDir, files) + + errs := []error{} + for _, dst := range files { + if len(dst) == 0 { + continue + } + src := filepath.Join(vmpath.GuestBackupDir, dst) + if _, err := r.RunCmd(exec.Command("sudo", "cp", "--archive", "--update", "--force", src, "/")); err != nil { + errs = append(errs, errors.Errorf("failed to copy %q to %q (will continue): %v", src, dst, err)) + } + } + if len(errs) > 0 { + return errors.Errorf(fmt.Sprintf("%v", errs)) + } + return nil +} diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index f1876b7f181a..d52eee5f436d 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -125,7 +125,7 @@ func createHost(api libmachine.API, cfg *config.ClusterConfig, n *config.Node) ( klog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver) start := time.Now() defer func() { - klog.Infof("duration metric: createHost completed in %s", time.Since(start)) + klog.Infof("duration metric: took %s to createHost", time.Since(start)) }() if cfg.Driver != driver.SSH { @@ -164,7 +164,7 @@ func createHost(api libmachine.API, cfg *config.ClusterConfig, n *config.Node) ( if err := timedCreateHost(h, api, cfg.StartHostTimeout); err != nil { return nil, errors.Wrap(err, "creating host") } - klog.Infof("duration metric: libmachine.API.Create for %q took %s", cfg.Name, time.Since(cstart)) + klog.Infof("duration metric: took %s to libmachine.API.Create %q", time.Since(cstart), cfg.Name) if cfg.Driver == driver.SSH { showHostInfo(h, *cfg) } @@ -180,28 +180,21 @@ func createHost(api libmachine.API, cfg *config.ClusterConfig, n *config.Node) ( } func timedCreateHost(h *host.Host, api libmachine.API, t time.Duration) error { - timeout := make(chan bool, 1) + create := make(chan error, 1) go func() { - time.Sleep(t) - timeout <- true - }() - - createFinished := make(chan bool, 1) - var err error - go func() { - err = api.Create(h) - createFinished <- true + defer close(create) + create <- api.Create(h) }() select { - case <-createFinished: + case err := <-create: if err != nil { // Wait for all the logs to reach the client time.Sleep(2 * time.Second) return errors.Wrap(err, "create") } return nil - case <-timeout: + case <-time.After(t): return fmt.Errorf("create host timed out in %f seconds", t.Seconds()) } } @@ -297,10 +290,10 @@ func DiskAvailable(cr command.Runner, dir string) (int, error) { // postStartSetup are functions shared between startHost and fixHost func postStartSetup(h *host.Host, mc config.ClusterConfig) error { - klog.Infof("post-start starting for %q (driver=%q)", h.Name, h.DriverName) + klog.Infof("postStartSetup for %q (driver=%q)", h.Name, h.DriverName) start := time.Now() defer func() { - klog.Infof("post-start completed in %s", time.Since(start)) + klog.Infof("duration metric: took %s for postStartSetup", time.Since(start)) }() if driver.IsMock(h.DriverName) { @@ -341,9 +334,11 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error { if driver.BareMetal(mc.Driver) { showLocalOsRelease() } + if driver.IsVM(mc.Driver) || driver.IsKIC(mc.Driver) || driver.IsSSH(mc.Driver) { logRemoteOsRelease(r) } + return syncLocalAssets(r) } @@ -362,11 +357,11 @@ func acquireMachinesLock(name string, drv string) (mutex.Releaser, error) { spec.Timeout = 10 * time.Minute } - klog.Infof("acquiring machines lock for %s: %+v", name, spec) + klog.Infof("acquireMachinesLock for %s: %+v", name, spec) start := time.Now() r, err := mutex.Acquire(spec) if err == nil { - klog.Infof("acquired machines lock for %q in %s", name, time.Since(start)) + klog.Infof("duration metric: took %s to acquireMachinesLock for %q", time.Since(start), name) } return r, err } diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index 2db2521d6d8b..4397052c7462 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -49,6 +49,13 @@ func StopHost(api libmachine.API, machineName string) error { // stop forcibly stops a host without needing to load func stop(h *host.Host) error { start := time.Now() + + if driver.IsVM(h.DriverName) { + if err := backup(*h, []string{"/etc/cni", "/etc/kubernetes"}); err != nil { + klog.Warningf("failed to complete vm config backup (will continue): %v", err) + } + } + if driver.NeedsShutdown(h.DriverName) { if err := trySSHPowerOff(h); err != nil { return errors.Wrap(err, "ssh power off") @@ -64,7 +71,8 @@ func stop(h *host.Host) error { } return &retry.RetriableError{Err: errors.Wrap(err, "stop")} } - klog.Infof("duration metric: stop complete within %s", time.Since(start)) + + klog.Infof("duration metric: took %s to stop", time.Since(start)) return nil } diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index 1634ad318fe7..f50996562f71 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -82,84 +82,121 @@ func Partial(name string, miniHome ...string) (libmachine.API, *config.ClusterCo // Running is a cmd-friendly way to load a running cluster func Running(name string) ClusterController { - api, cc := Partial(name) - - cp, err := config.PrimaryControlPlane(cc) + ctrls, err := running(name, true) if err != nil { - exit.Error(reason.GuestCpConfig, "Unable to find control plane", err) + out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) + exit.Message(reason.GuestCpConfig, "Unable to get running control-plane nodes") } - machineName := config.MachineName(*cc, cp) - hs, err := machine.Status(api, machineName) - if err != nil { - exit.Error(reason.GuestStatus, "Unable to get machine status", err) + if len(ctrls) == 0 { + out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) + exit.Message(reason.GuestCpConfig, "Unable to find any running control-plane nodes") } + return ctrls[0] +} - if hs == state.None.String() { - out.Styled(style.Shrug, `The control plane node "{{.name}}" does not exist.`, out.V{"name": cp.Name}) - exitTip("start", name, reason.ExGuestNotFound) - } +// running returns first or all running ClusterControllers found or an error. +func running(name string, first bool) ([]ClusterController, error) { + api, cc := Partial(name) - if hs == state.Stopped.String() { - out.Styled(style.Shrug, `The control plane node must be running for this command`) - exitTip("start", name, reason.ExGuestUnavailable) + cps := config.ControlPlanes(*cc) + if len(cps) == 0 { + return nil, fmt.Errorf("unable to find any control-plane nodes") } - if hs != state.Running.String() { - out.Styled(style.Shrug, `The control plane node is not running (state={{.state}})`, out.V{"name": cp.Name, "state": hs}) - exitTip("start", name, reason.ExSvcUnavailable) - } + running := []ClusterController{} + for _, cp := range cps { + machineName := config.MachineName(*cc, cp) - host, err := machine.LoadHost(api, name) - if err != nil { - exit.Error(reason.GuestLoadHost, "Unable to load host", err) - } + status, err := machine.Status(api, machineName) + if err != nil { + out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} status (will continue): {{.err}}`, out.V{"name": machineName, "err": err}) + continue + } - cr, err := machine.CommandRunner(host) - if err != nil { - exit.Error(reason.InternalCommandRunner, "Unable to get command runner", err) - } + if status == state.None.String() { + out.Styled(style.Shrug, `The control-plane node {{.name}} does not exist (will continue)`, out.V{"name": machineName}) + continue + } - hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, host.DriverName) - if err != nil { - exit.Error(reason.DrvCPEndpoint, "Unable to get forwarded endpoint", err) - } + if status != state.Running.String() { + out.Styled(style.Shrug, `The control-plane node {{.name}} is not running (will continue): state={{.state}}`, out.V{"name": machineName, "state": status}) + continue + } - return ClusterController{ - API: api, - Config: cc, - CP: ControlPlane{ - Runner: cr, - Host: host, - Node: &cp, - Hostname: hostname, - IP: ip, - Port: port, - }, + host, err := machine.LoadHost(api, machineName) + if err != nil { + out.Styled(style.Shrug, `Unable to load control-plane node {{.name}} host (will continue): {{.err}}`, out.V{"name": machineName, "err": err}) + continue + } + + cr, err := machine.CommandRunner(host) + if err != nil { + out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} command runner (will continue): {{.err}}`, out.V{"name": machineName, "err": err}) + continue + } + + hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, host.DriverName) + if err != nil { + out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} endpoint (will continue): {{.err}}`, out.V{"name": machineName, "err": err}) + continue + } + + running = append(running, ClusterController{ + API: api, + Config: cc, + CP: ControlPlane{ + Runner: cr, + Host: host, + Node: &cp, + Hostname: hostname, + IP: ip, + Port: port, + }}) + if first { + return running, nil + } } + return running, nil } // Healthy is a cmd-friendly way to load a healthy cluster func Healthy(name string) ClusterController { - co := Running(name) - - as, err := kverify.APIServerStatus(co.CP.Runner, co.CP.Hostname, co.CP.Port) + ctrls, err := running(name, false) if err != nil { - out.FailureT(`Unable to get control plane status: {{.error}}`, out.V{"error": err}) - exitTip("delete", name, reason.ExSvcError) + out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) + exit.Message(reason.GuestCpConfig, "Unable to get running control-plane nodes") } - if as == state.Paused { - out.Styled(style.Shrug, `The control plane for "{{.name}}" is paused!`, out.V{"name": name}) - exitTip("unpause", name, reason.ExSvcConfig) + if len(ctrls) == 0 { + out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) + exit.Message(reason.GuestCpConfig, "Unable to find any running control-plane nodes") } - if as != state.Running { - out.Styled(style.Shrug, `This control plane is not running! (state={{.state}})`, out.V{"state": as.String()}) - out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) - exitTip("start", name, reason.ExSvcUnavailable) + for _, ctrl := range ctrls { + machineName := config.MachineName(*ctrl.Config, *ctrl.CP.Node) + + as, err := kverify.APIServerStatus(ctrl.CP.Runner, ctrl.CP.Hostname, ctrl.CP.Port) + if err != nil { + out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} apiserver status: {{.error}}`, out.V{"name": machineName, "error": err}) + continue + } + + if as == state.Paused { + out.Styled(style.Shrug, `The control-plane node {{.name}} apiserver is paused (will continue)`, out.V{"name": machineName}) + continue + } + + if as != state.Running { + out.Styled(style.Shrug, `The control-plane node {{.name}} apiserver is not running (will continue): (state={{.state}})`, out.V{"name": machineName, "state": as.String()}) + continue + } + + return ctrl } - return co + out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) + exit.Message(reason.GuestCpConfig, "Unable to find any healthy control-plane nodes") + return ClusterController{} } // ExampleCmd Return a minikube command containing the current profile name diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index c26f266e479b..4050de1abfc2 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -56,7 +56,7 @@ func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVe klog.Info("Caching tarball of preloaded images") err := download.Preload(k8sVersion, cRuntime, driverName) if err == nil { - klog.Infof("Finished verifying existence of preloaded tar for %s on %s", k8sVersion, cRuntime) + klog.Infof("Finished verifying existence of preloaded tar for %s on %s", k8sVersion, cRuntime) return // don't cache individual images if preload is successful. } klog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err) diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 273a59a2ad1e..d34bcf6bb486 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -20,15 +20,22 @@ import ( "context" "fmt" "os/exec" + "strconv" + "strings" + "github.com/blang/semver/v4" "github.com/pkg/errors" "github.com/spf13/viper" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" "k8s.io/minikube/pkg/kapi" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/util" ) // Add adds a new node config to an existing cluster. @@ -51,11 +58,15 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { } } + if n.ControlPlane && n.Port == 0 { + n.Port = cc.APIServerPort + } + if err := config.SaveNode(cc, &n); err != nil { return errors.Wrap(err, "save node") } - r, p, m, h, err := Provision(cc, &n, false, delOnFail) + r, p, m, h, err := Provision(cc, &n, delOnFail) if err != nil { return err } @@ -69,46 +80,84 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { ExistingAddons: nil, } - _, err = Start(s, false) + _, err = Start(s) return err } -// drainNode drains then deletes (removes) node from cluster. -func drainNode(cc config.ClusterConfig, name string) (*config.Node, error) { +// teardown drains, then resets and finally deletes node from cluster. +// ref: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#tear-down +func teardown(cc config.ClusterConfig, name string) (*config.Node, error) { + // get runner for named node - has to be done before node is drained n, _, err := Retrieve(cc, name) if err != nil { - return n, errors.Wrap(err, "retrieve") + return n, errors.Wrap(err, "retrieve node") } - m := config.MachineName(cc, *n) + api, err := machine.NewAPIClient() if err != nil { - return n, err + return n, errors.Wrap(err, "get api client") } - // grab control plane to use kubeconfig - host, err := machine.LoadHost(api, cc.Name) + h, err := machine.LoadHost(api, m) if err != nil { - return n, err + return n, errors.Wrap(err, "load host") } - runner, err := machine.CommandRunner(host) + r, err := machine.CommandRunner(h) if err != nil { - return n, err + return n, errors.Wrap(err, "get command runner") } - // kubectl drain with extra options to prevent ending up stuck in the process - // ref: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain + // get runner for healthy control-plane node + cpr := mustload.Healthy(cc.Name).CP.Runner + kubectl := kapi.KubectlBinaryPath(cc.KubernetesConfig.KubernetesVersion) + + // kubectl drain node with extra options to prevent ending up stuck in the process + // ref: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain + // ref: https://github.com/kubernetes/kubernetes/pull/95076 cmd := exec.Command("sudo", "KUBECONFIG=/var/lib/minikube/kubeconfig", kubectl, "drain", m, - "--force", "--grace-period=1", "--skip-wait-for-delete-timeout=1", "--disable-eviction", "--ignore-daemonsets", "--delete-emptydir-data", "--delete-local-data") - if _, err := runner.RunCmd(cmd); err != nil { - klog.Warningf("unable to drain node %q: %v", name, err) + "--force", "--grace-period=1", "--skip-wait-for-delete-timeout=1", "--disable-eviction", "--ignore-daemonsets", "--delete-emptydir-data") + if _, err := cpr.RunCmd(cmd); err != nil { + klog.Warningf("kubectl drain node %q failed (will continue): %v", m, err) } else { - klog.Infof("successfully drained node %q", name) + klog.Infof("successfully drained node %q", m) + } + + // kubeadm reset node to revert any changes made by previous kubeadm init/join + // it's to inform cluster of the node that is about to be removed and should be unregistered (eg, from etcd quorum, that would otherwise complain) + // ref: https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-reset/ + // avoid "Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock" error + // intentionally non-fatal on any error, propagate and check at the end of segment + var kerr error + var kv semver.Version + kv, kerr = util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) + if kerr == nil { + var crt cruntime.Manager + crt, kerr = cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: r, Socket: cc.KubernetesConfig.CRISocket, KubernetesVersion: kv}) + if kerr == nil { + sp := crt.SocketPath() + // avoid warning/error: + // 'Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. + // Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". + // Please update your configuration!' + if !strings.HasPrefix(sp, "unix://") { + sp = "unix://" + sp + } + + cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("KUBECONFIG=/var/lib/minikube/kubeconfig %s reset --force --ignore-preflight-errors=all --cri-socket=%s", + bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), sp)) + if _, kerr = r.RunCmd(cmd); kerr == nil { + klog.Infof("successfully reset node %q", m) + } + } + } + if kerr != nil { + klog.Warningf("kubeadm reset node %q failed (will continue, but cluster might become unstable): %v", m, kerr) } - // kubectl delete + // kubectl delete node client, err := kapi.Client(cc.Name) if err != nil { return n, err @@ -118,17 +167,17 @@ func drainNode(cc config.ClusterConfig, name string) (*config.Node, error) { var grace *int64 err = client.CoreV1().Nodes().Delete(context.Background(), m, v1.DeleteOptions{GracePeriodSeconds: grace}) if err != nil { - klog.Errorf("unable to delete node %q: %v", name, err) + klog.Errorf("kubectl delete node %q failed: %v", m, err) return n, err } - klog.Infof("successfully deleted node %q", name) + klog.Infof("successfully deleted node %q", m) return n, nil } -// Delete calls drainNode to remove node from cluster and deletes the host. +// Delete calls teardownNode to remove node from cluster and deletes the host. func Delete(cc config.ClusterConfig, name string) (*config.Node, error) { - n, err := drainNode(cc, name) + n, err := teardown(cc, name) if err != nil { return n, err } @@ -187,7 +236,26 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error { return config.SaveProfile(viper.GetString(config.ProfileName), cfg) } -// Name returns the appropriate name for the node given the current number of nodes +// Name returns the appropriate name for the node given the node index. func Name(index int) string { + if index == 0 { + return "" + } return fmt.Sprintf("m%02d", index) } + +// ID returns the appropriate node id from the node name. +// ID of first (primary control-plane) node (with empty name) is 1, so next one would be "m02", etc. +// Eg, "m05" should return "5", regardles if any preceded nodes were deleted. +func ID(name string) (int, error) { + if name == "" { + return 1, nil + } + + name = strings.TrimPrefix(name, "m") + i, err := strconv.Atoi(name) + if err != nil { + return -1, err + } + return i, nil +} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index baebf81b9baa..1cc6f7cea69e 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -91,7 +91,7 @@ type Starter struct { } // Start spins up a guest and starts the Kubernetes node. -func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { +func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo var wg sync.WaitGroup stopk8s, err := handleNoKubernetes(starter) if err != nil { @@ -125,34 +125,70 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { showVersionInfo(starter.Node.KubernetesVersion, cr) - // Add "host.minikube.internal" DNS alias (intentionally non-fatal) + // add "host.minikube.internal" dns alias (intentionally non-fatal) hostIP, err := cluster.HostIP(starter.Host, starter.Cfg.Name) if err != nil { klog.Errorf("Unable to get host IP: %v", err) } else if err := machine.AddHostAlias(starter.Runner, constants.HostAlias, hostIP); err != nil { - klog.Errorf("Unable to add host alias: %v", err) + klog.Errorf("Unable to add minikube host alias: %v", err) } var kcs *kubeconfig.Settings var bs bootstrapper.Bootstrapper - if apiServer { - kcs, bs, err = handleAPIServer(starter, cr, hostIP) + if config.IsPrimaryControlPlane(*starter.Node) { + // [re]start primary control-plane node + kcs, bs, err = startPrimaryControlPlane(starter, cr) if err != nil { return nil, err } + // configure CoreDNS concurently from primary control-plane node only and only on first node start + if !starter.PreExists { + wg.Add(1) + go func() { + defer wg.Done() + // inject {"host.minikube.internal": hostIP} record into coredns for primary control-plane node host ip + if hostIP != nil { + if err := addCoreDNSEntry(starter.Runner, constants.HostAlias, hostIP.String(), *starter.Cfg); err != nil { + klog.Warningf("Unable to inject {%q: %s} record into CoreDNS: %v", constants.HostAlias, hostIP.String(), err) + out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP") + } + } + // scale down CoreDNS from default 2 to 1 replica only for non-ha cluster and if optimisation is not disabled + if !starter.Cfg.DisableOptimizations && !config.HA(*starter.Cfg) { + if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { + klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) + } + } + }() + } } else { bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner) if err != nil { return nil, errors.Wrap(err, "Failed to get bootstrapper") } - if err = bs.SetupCerts(*starter.Cfg, *starter.Node); err != nil { + // for ha, use already running control-plane node to copy over certs to this secondary control-plane node + cpr := mustload.Running(starter.Cfg.Name).CP.Runner + if err = bs.SetupCerts(*starter.Cfg, *starter.Node, cpr); err != nil { return nil, errors.Wrap(err, "setting up certs") } if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil { return nil, errors.Wrap(err, "update node") } + + // join cluster only on first node start + // except for vm driver in non-ha cluster - fallback to old behaviour + if !starter.PreExists || (driver.IsVM(starter.Cfg.Driver) && !config.HA(*starter.Cfg)) { + // make sure to use the command runner for the primary control plane to generate the join token + pcpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) + if err != nil { + return nil, errors.Wrap(err, "get primary control-plane bootstrapper") + } + if err := joinCluster(starter, pcpBs, bs); err != nil { + return nil, errors.Wrap(err, "join node to cluster") + } + } } go configureMounts(&wg, *starter.Cfg) @@ -186,45 +222,21 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { warnVirtualBox() } - if apiServer { - // special ops for none , like change minikube directory. - // multinode super doesn't work on the none driver - if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 { - prepareNone() - } - } else { - // Make sure to use the command runner for the control plane to generate the join token - cpBs, cpr, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) - if err != nil { - return nil, errors.Wrap(err, "getting control plane bootstrapper") - } - - if err := joinCluster(starter, cpBs, bs); err != nil { - return nil, errors.Wrap(err, "joining cp") - } - - cnm, err := cni.New(starter.Cfg) - if err != nil { - return nil, errors.Wrap(err, "cni") - } - - if err := cnm.Apply(cpr); err != nil { - return nil, errors.Wrap(err, "cni apply") - } + // special ops for "none" driver on control-plane node, like change minikube directory + if starter.Node.ControlPlane && driver.IsNone(starter.Cfg.Driver) { + prepareNone() } - if !starter.Cfg.DisableOptimizations { - // Scale down CoreDNS from default 2 to 1 replica. - if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { - klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) + // for ha cluster, primary control-plane node will not come up alone until secondary joins + if config.HA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Node) { + klog.Infof("HA cluster: will skip waiting for primary control-plane node %+v", starter.Node) + } else { + klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node) + if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { + return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout)) } } - klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node) - if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { - return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout)) - } - klog.Infof("waiting for startup goroutines ...") wg.Wait() @@ -260,23 +272,31 @@ func handleNoKubernetes(starter Starter) (bool, error) { return false, nil } -// handleAPIServer handles starting the API server. -func handleAPIServer(starter Starter, cr cruntime.Manager, hostIP net.IP) (*kubeconfig.Settings, bootstrapper.Bootstrapper, error) { - var err error +// startPrimaryControlPlane starts control-plane node. +func startPrimaryControlPlane(starter Starter, cr cruntime.Manager) (*kubeconfig.Settings, bootstrapper.Bootstrapper, error) { + if !config.IsPrimaryControlPlane(*starter.Node) { + return nil, nil, fmt.Errorf("node not marked as primary control-plane") + } - // Must be written before bootstrap, otherwise health checks may flake due to stale IP. - kcs := setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name) - if err != nil { - return nil, nil, errors.Wrap(err, "Failed to setup kubeconfig") + if config.HA(*starter.Cfg) { + n, err := network.Inspect(starter.Node.IP) + if err != nil { + return nil, nil, errors.Wrapf(err, "inspect network") + } + // update cluster config + starter.Cfg.KubernetesConfig.APIServerHAVIP = n.ClientMax // last available ip from node's subnet, should've been reserved already } - // Setup kubeadm (must come after setupKubeconfig). - bs, err := setupKubeAdm(starter.MachineAPI, *starter.Cfg, *starter.Node, starter.Runner) + // must be written before bootstrap, otherwise health checks may flake due to stale IP + kcs := setupKubeconfig(*starter.Host, *starter.Cfg, *starter.Node, starter.Cfg.Name) + + // setup kubeadm (must come after setupKubeconfig) + bs, err := setupKubeadm(starter.MachineAPI, *starter.Cfg, *starter.Node, starter.Runner) if err != nil { return nil, nil, errors.Wrap(err, "Failed to setup kubeadm") } - err = bs.StartCluster(*starter.Cfg) - if err != nil { + + if err := bs.StartCluster(*starter.Cfg); err != nil { ExitIfFatal(err, false) out.LogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, *starter.Cfg, starter.Runner)) return nil, bs, err @@ -287,51 +307,48 @@ func handleAPIServer(starter Starter, cr cruntime.Manager, hostIP net.IP) (*kube return nil, bs, errors.Wrap(err, "Failed kubeconfig update") } - // Not running this in a Go func can result in DNS answering taking up to 38 seconds, with the Go func it takes 6-10 seconds. - go func() { - // Inject {"host.minikube.internal": hostIP} record into CoreDNS. - if err := addCoreDNSEntry(starter.Runner, "host.minikube.internal", hostIP.String(), *starter.Cfg); err != nil { - klog.Warningf("Unable to inject {%q: %s} record into CoreDNS: %v", "host.minikube.internal", hostIP.String(), err) - out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP") - } - }() return kcs, bs, nil } // joinCluster adds new or prepares and then adds existing node to the cluster. func joinCluster(starter Starter, cpBs bootstrapper.Bootstrapper, bs bootstrapper.Bootstrapper) error { start := time.Now() - klog.Infof("JoinCluster: %+v", starter.Cfg) + klog.Infof("joinCluster: %+v", starter.Cfg) defer func() { - klog.Infof("JoinCluster complete in %s", time.Since(start)) + klog.Infof("duration metric: took %s to joinCluster", time.Since(start)) }() - joinCmd, err := cpBs.GenerateToken(*starter.Cfg) - if err != nil { - return fmt.Errorf("error generating join token: %w", err) + role := "worker" + if starter.Node.ControlPlane { + role = "control-plane" } // avoid "error execution phase kubelet-start: a Node with name "" and status "Ready" already exists in the cluster. // You must delete the existing Node or change the name of this new joining Node" if starter.PreExists { - klog.Infof("removing existing worker node %q before attempting to rejoin cluster: %+v", starter.Node.Name, starter.Node) - if _, err := drainNode(*starter.Cfg, starter.Node.Name); err != nil { - klog.Errorf("error removing existing worker node before rejoining cluster, will continue anyway: %v", err) + klog.Infof("removing existing %s node %q before attempting to rejoin cluster: %+v", role, starter.Node.Name, starter.Node) + if _, err := teardown(*starter.Cfg, starter.Node.Name); err != nil { + klog.Errorf("error removing existing %s node %q before rejoining cluster, will continue anyway: %v", role, starter.Node.Name, err) } - klog.Infof("successfully removed existing worker node %q from cluster: %+v", starter.Node.Name, starter.Node) + klog.Infof("successfully removed existing %s node %q from cluster: %+v", role, starter.Node.Name, starter.Node) + } + + joinCmd, err := cpBs.GenerateToken(*starter.Cfg) + if err != nil { + return fmt.Errorf("error generating join token: %w", err) } join := func() error { - klog.Infof("trying to join worker node %q to cluster: %+v", starter.Node.Name, starter.Node) + klog.Infof("trying to join %s node %q to cluster: %+v", role, starter.Node.Name, starter.Node) if err := bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil { - klog.Errorf("worker node failed to join cluster, will retry: %v", err) + klog.Errorf("%s node failed to join cluster, will retry: %v", role, err) - // reset worker node to revert any changes made by previous kubeadm init/join - klog.Infof("resetting worker node %q before attempting to rejoin cluster...", starter.Node.Name) + // reset node to revert any changes made by previous kubeadm init/join + klog.Infof("resetting %s node %q before attempting to rejoin cluster...", role, starter.Node.Name) if _, err := starter.Runner.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s reset --force", bsutil.InvokeKubeadm(starter.Cfg.KubernetesConfig.KubernetesVersion)))); err != nil { klog.Infof("kubeadm reset failed, continuing anyway: %v", err) } else { - klog.Infof("successfully reset worker node %q", starter.Node.Name) + klog.Infof("successfully reset %s node %q", role, starter.Node.Name) } return err @@ -339,17 +356,17 @@ func joinCluster(starter Starter, cpBs bootstrapper.Bootstrapper, bs bootstrappe return nil } if err := retry.Expo(join, 10*time.Second, 3*time.Minute); err != nil { - return fmt.Errorf("error joining worker node to cluster: %w", err) + return fmt.Errorf("error joining %s node %q to cluster: %w", role, starter.Node.Name, err) } - if err := cpBs.ApplyNodeLabels(*starter.Cfg); err != nil { - return fmt.Errorf("error applying node label: %w", err) + if err := cpBs.LabelAndUntaintNode(*starter.Cfg, *starter.Node); err != nil { + return fmt.Errorf("error applying %s node %q label: %w", role, starter.Node.Name, err) } return nil } // Provision provisions the machine/container for the node -func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) { +func Provision(cc *config.ClusterConfig, n *config.Node, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) { register.Reg.SetStep(register.StartingNode) name := config.MachineName(*cc, *n) @@ -357,12 +374,14 @@ func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFa if cc.KubernetesConfig.KubernetesVersion == constants.NoKubernetesVersion { out.Step(style.ThumbsUp, "Starting minikube without Kubernetes in cluster {{.cluster}}", out.V{"cluster": cc.Name}) } else { - if apiServer { - out.Step(style.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) - } else { - out.Step(style.ThumbsUp, "Starting worker node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + role := "worker" + if n.ControlPlane { + role = "control-plane" } - + if config.IsPrimaryControlPlane(*n) { + role = "primary control-plane" + } + out.Step(style.ThumbsUp, "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster", out.V{"node": name, "role": role, "cluster": cc.Name}) } if driver.IsKIC(cc.Driver) { @@ -562,8 +581,8 @@ func waitForCRIVersion(runner cruntime.CommandRunner, socket string, wait int, i return retry.Expo(chkInfo, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second) } -// setupKubeAdm adds any requested files into the VM before Kubernetes is started -func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, r command.Runner) (bootstrapper.Bootstrapper, error) { +// setupKubeadm adds any requested files into the VM before Kubernetes is started. +func setupKubeadm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, r command.Runner) (bootstrapper.Bootstrapper, error) { deleteOnFailure := viper.GetBool("delete-on-failure") bs, err := cluster.Bootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper), cfg, r) if err != nil { @@ -576,6 +595,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, for _, eo := range cfg.KubernetesConfig.ExtraOptions { out.Infof("{{.extra_option_component_name}}.{{.key}}={{.value}}", out.V{"extra_option_component_name": eo.Component, "key": eo.Key, "value": eo.Value}) } + // Loads cached images, generates config files, download binaries // update cluster and set up certs @@ -590,7 +610,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, return nil, err } - if err := bs.SetupCerts(cfg, n); err != nil { + if err := bs.SetupCerts(cfg, n, r); err != nil { if !deleteOnFailure { exit.Error(reason.GuestCert, "Failed to setup certs", err) } @@ -601,15 +621,22 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, return bs, nil } -func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clusterName string) *kubeconfig.Settings { - addr, err := apiServerURL(*h, *cc, *n) - if err != nil { - exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to get API Server URL: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)}) +// setupKubeconfig generates kubeconfig. +func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, clusterName string) *kubeconfig.Settings { + host := cc.KubernetesConfig.APIServerHAVIP + port := cc.APIServerPort + if !config.HA(cc) { + var err error + if host, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil { + exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to construct cluster server address: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)}) + } } + addr := fmt.Sprintf("https://" + net.JoinHostPort(host, strconv.Itoa(port))) if cc.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.ReplaceAll(addr, n.IP, cc.KubernetesConfig.APIServerName) + addr = strings.ReplaceAll(addr, host, cc.KubernetesConfig.APIServerName) } + kcs := &kubeconfig.Settings{ ClusterName: clusterName, Namespace: cc.KubernetesConfig.Namespace, @@ -625,14 +652,6 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu return kcs } -func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { - hostname, _, port, err := driver.ControlPlaneEndpoint(&cc, &n, h.DriverName) - if err != nil { - return "", err - } - return fmt.Sprintf("https://" + net.JoinHostPort(hostname, strconv.Itoa(port))), nil -} - // StartMachine starts a VM func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host, err error) { m, err := machine.NewAPIClient() diff --git a/pkg/minikube/tunnel/cluster_inspector.go b/pkg/minikube/tunnel/cluster_inspector.go index 6f6f9f38edad..95aae6c459ae 100644 --- a/pkg/minikube/tunnel/cluster_inspector.go +++ b/pkg/minikube/tunnel/cluster_inspector.go @@ -94,7 +94,7 @@ func getRoute(host *host.Host, clusterConfig config.ClusterConfig) (*Route, erro if ip == nil { return nil, fmt.Errorf("invalid IP for host %s", hostDriverIP) } - dnsIP, err := util.GetDNSIP(ipNet.String()) + dnsIP, err := util.DNSIP(ipNet.String()) if err != nil { return nil, err } diff --git a/pkg/minikube/tunnel/cluster_inspector_test.go b/pkg/minikube/tunnel/cluster_inspector_test.go index 834bd8241d36..813a743305f9 100644 --- a/pkg/minikube/tunnel/cluster_inspector_test.go +++ b/pkg/minikube/tunnel/cluster_inspector_test.go @@ -84,7 +84,7 @@ func TestMinikubeCheckReturnsHostInformation(t *testing.T) { ip := net.ParseIP("1.2.3.4") _, ipNet, _ := net.ParseCIDR("96.0.0.0/12") - dnsIP, err := util.GetDNSIP(ipNet.String()) + dnsIP, err := util.DNSIP(ipNet.String()) if err != nil { t.Errorf("getdnsIP: %v", err) } diff --git a/pkg/minikube/tunnel/route_test.go b/pkg/minikube/tunnel/route_test.go index 8173186777fb..41b7eb2dc75d 100644 --- a/pkg/minikube/tunnel/route_test.go +++ b/pkg/minikube/tunnel/route_test.go @@ -132,7 +132,7 @@ got func unsafeParseRoute(gatewayIP string, destCIDR string) *Route { ip := net.ParseIP(gatewayIP) _, ipNet, _ := net.ParseCIDR(destCIDR) - dnsIP, _ := util.GetDNSIP(ipNet.String()) + dnsIP, _ := util.DNSIP(ipNet.String()) expectedRoute := &Route{ Gateway: ip, diff --git a/pkg/minikube/vmpath/constants.go b/pkg/minikube/vmpath/constants.go index 60b259825f62..d6dc1b592eb3 100644 --- a/pkg/minikube/vmpath/constants.go +++ b/pkg/minikube/vmpath/constants.go @@ -25,6 +25,8 @@ const ( GuestEphemeralDir = "/var/tmp/minikube" // GuestPersistentDir is the path where persistent data should be stored within the VM (not tmpfs) GuestPersistentDir = "/var/lib/minikube" + // GuestBackupDir is the path where persistent backup data should be stored within the VM (not tmpfs) + GuestBackupDir = GuestPersistentDir + "/backup" // GuestKubernetesCertsDir are where Kubernetes certificates are stored GuestKubernetesCertsDir = GuestPersistentDir + "/certs" // GuestCertAuthDir is where system CA certificates are installed to diff --git a/pkg/network/network.go b/pkg/network/network.go index 616f75f0c4b4..58f6fbddd0a7 100644 --- a/pkg/network/network.go +++ b/pkg/network/network.go @@ -36,9 +36,9 @@ type Parameters struct { Prefix int // network prefix length (number of leading ones in network mask) CIDR string // CIDR format ('a.b.c.d/n') Gateway string // taken from network interface address or assumed as first network IP address from given addr - ClientMin string // second IP address - ClientMax string // last IP address before broadcast - Broadcast string // last IP address + ClientMin string // first available client IP address after gateway + ClientMax string // last available client IP address before broadcast + Broadcast string // last network IP address IsPrivate bool // whether the IP is private or not Interface reservation mutex.Releaser // subnet reservation has lifespan of the process: "If a process dies while the mutex is held, the mutex is automatically released." @@ -95,11 +95,10 @@ func lookupInInterfaces(ip net.IP) (*Parameters, *net.IPNet, error) { return nil, nil, nil } -// inspect initialises IPv4 network parameters struct from given address addr. +// Inspect initialises IPv4 network parameters struct from given address addr. // addr can be single address (like "192.168.17.42"), network address (like "192.168.17.0") or in CIDR form (like "192.168.17.42/24 or "192.168.17.0/24"). // If addr belongs to network of local network interface, parameters will also contain info about that network interface. -var inspect = func(addr string) (*Parameters, error) { - +var Inspect = func(addr string) (*Parameters, error) { // extract ip from addr ip, network, err := ParseAddr(addr) if err != nil { @@ -156,7 +155,7 @@ var inspect = func(addr string) (*Parameters, error) { n.ClientMin = min.String() max := make(net.IP, 4) - binary.BigEndian.PutUint32(max, broadcastIP-1) // clients-from: last network IP address before broadcast + binary.BigEndian.PutUint32(max, broadcastIP-1) // clients-to: last network IP address before broadcast n.ClientMax = max.String() return n, nil @@ -193,7 +192,7 @@ func IsBuiltinQEMU(network string) bool { func FreeSubnet(startSubnet string, step, tries int) (*Parameters, error) { currSubnet := startSubnet for try := 0; try < tries; try++ { - n, err := inspect(currSubnet) + n, err := Inspect(currSubnet) if err != nil { return nil, err } diff --git a/pkg/network/network_test.go b/pkg/network/network_test.go index ad1eddead432..2639f5192185 100644 --- a/pkg/network/network_test.go +++ b/pkg/network/network_test.go @@ -64,12 +64,12 @@ func TestFreeSubnet(t *testing.T) { t.Run("FirstSubnetIPV6NetworkFound", func(t *testing.T) { count := 0 - originalInspect := inspect + originalInspect := Inspect defer func() { - inspect = originalInspect + Inspect = originalInspect }() - inspect = func(addr string) (*Parameters, error) { + Inspect = func(addr string) (*Parameters, error) { count++ p := &Parameters{IP: addr, IsPrivate: true} if count == 1 { diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 6ea1932786a2..661988c47db5 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -23,6 +23,7 @@ import ( "os/exec" "path" "path/filepath" + "slices" "strings" "text/template" "time" @@ -83,7 +84,7 @@ func configureAuth(p miniProvisioner) error { klog.Infof("configureAuth start") start := time.Now() defer func() { - klog.Infof("duration metric: configureAuth took %s", time.Since(start)) + klog.Infof("duration metric: took %s to configureAuth", time.Since(start)) }() driver := p.GetDriver() @@ -109,6 +110,10 @@ func configureAuth(p miniProvisioner) error { hosts := authOptions.ServerCertSANs // The Host IP is always added to the certificate's SANs list hosts = append(hosts, ip, hostIP, "localhost", "127.0.0.1", "minikube", machineName) + // eliminate duplicates in 'hosts' + slices.Sort(hosts) + hosts = slices.Compact(hosts) + klog.Infof("generating server cert: %s ca-key=%s private-key=%s org=%s san=%s", authOptions.ServerCertPath, authOptions.CaCertPath, diff --git a/pkg/util/constants.go b/pkg/util/constants.go index 5020b7931330..db1a39c0568e 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -22,14 +22,8 @@ import ( "github.com/pkg/errors" ) -// These constants are used by both minikube -const ( - APIServerPort = 8443 - DefaultDNSDomain = "cluster.local" -) - -// DefaultV114AdmissionControllers are admission controllers we default to in v1.14.x -var DefaultV114AdmissionControllers = []string{ +// DefaultAdmissionControllers are admission controllers we default to +var DefaultAdmissionControllers = []string{ "NamespaceLifecycle", "LimitRanger", "ServiceAccount", @@ -41,11 +35,8 @@ var DefaultV114AdmissionControllers = []string{ "ResourceQuota", } -// DefaultLegacyAdmissionControllers are admission controllers we include with Kubernetes <1.14.0 -var DefaultLegacyAdmissionControllers = append([]string{"Initializers"}, DefaultV114AdmissionControllers...) - -// GetServiceClusterIP returns the first IP of the ServiceCIDR -func GetServiceClusterIP(serviceCIDR string) (net.IP, error) { +// ServiceClusterIP returns the first IP of the ServiceCIDR +func ServiceClusterIP(serviceCIDR string) (net.IP, error) { ip, _, err := net.ParseCIDR(serviceCIDR) if err != nil { return nil, errors.Wrap(err, "parsing default service cidr") @@ -55,8 +46,8 @@ func GetServiceClusterIP(serviceCIDR string) (net.IP, error) { return ip, nil } -// GetDNSIP returns x.x.x.10 of the service CIDR -func GetDNSIP(serviceCIDR string) (net.IP, error) { +// DNSIP returns x.x.x.10 of the service CIDR +func DNSIP(serviceCIDR string) (net.IP, error) { ip, _, err := net.ParseCIDR(serviceCIDR) if err != nil { return nil, errors.Wrap(err, "parsing default service cidr") @@ -66,7 +57,7 @@ func GetDNSIP(serviceCIDR string) (net.IP, error) { return ip, nil } -// GetAlternateDNS returns a list of alternate names for a domain -func GetAlternateDNS(domain string) []string { +// AlternateDNS returns a list of alternate names for a domain +func AlternateDNS(domain string) []string { return []string{"kubernetes.default.svc." + domain, "kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"} } diff --git a/pkg/util/constants_test.go b/pkg/util/constants_test.go index 6c9722566251..c5bbc1e5c5a9 100644 --- a/pkg/util/constants_test.go +++ b/pkg/util/constants_test.go @@ -31,7 +31,7 @@ func TestGetServiceClusterIP(t *testing.T) { } for _, tt := range testData { - ip, err := GetServiceClusterIP(tt.serviceCIRD) + ip, err := ServiceClusterIP(tt.serviceCIRD) if err != nil && !tt.err { t.Fatalf("GetServiceClusterIP() err = %v", err) } @@ -57,7 +57,7 @@ func TestGetDNSIP(t *testing.T) { } for _, tt := range testData { - ip, err := GetDNSIP(tt.serviceCIRD) + ip, err := DNSIP(tt.serviceCIRD) if err != nil && !tt.err { t.Fatalf("GetDNSIP() err = %v", err) } diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index c11a228c1a27..f971f695b32b 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -645,8 +645,8 @@ func validateSoftStart(ctx context.Context, t *testing.T, profile string) { if err != nil { t.Fatalf("error reading cluster config before soft start: %v", err) } - if beforeCfg.Config.KubernetesConfig.NodePort != apiPortTest { - t.Errorf("expected cluster config node port before soft start to be %d but got %d", apiPortTest, beforeCfg.Config.KubernetesConfig.NodePort) + if beforeCfg.Config.APIServerPort != apiPortTest { + t.Errorf("expected cluster config node port before soft start to be %d but got %d", apiPortTest, beforeCfg.Config.APIServerPort) } // docs: Run `minikube start` again as a soft start @@ -664,8 +664,8 @@ func validateSoftStart(ctx context.Context, t *testing.T, profile string) { t.Errorf("error reading cluster config after soft start: %v", err) } - if afterCfg.Config.KubernetesConfig.NodePort != apiPortTest { - t.Errorf("expected node port in the config not change after soft start. exepceted node port to be %d but got %d.", apiPortTest, afterCfg.Config.KubernetesConfig.NodePort) + if afterCfg.Config.APIServerPort != apiPortTest { + t.Errorf("expected node port in the config not to change after soft start. expected node port to be %d but got %d.", apiPortTest, afterCfg.Config.APIServerPort) } } diff --git a/test/integration/functional_test_tunnel_test.go b/test/integration/functional_test_tunnel_test.go index 63939d2fb11f..7a8238de8ab0 100644 --- a/test/integration/functional_test_tunnel_test.go +++ b/test/integration/functional_test_tunnel_test.go @@ -100,8 +100,8 @@ func checkDNSForward(t *testing.T) { } } -// getKubeDNSIP returns kube-dns ClusterIP -func getKubeDNSIP(t *testing.T, profile string) string { +// kubeDNSIP returns kube-dns ClusterIP +func kubeDNSIP(t *testing.T, profile string) string { // Load ClusterConfig c, err := config.Load(profile) if err != nil { @@ -113,7 +113,7 @@ func getKubeDNSIP(t *testing.T, profile string) string { t.Errorf("failed to parse service CIDR: %v", err) } // Get kube-dns ClusterIP - ip, err := util.GetDNSIP(ipNet.String()) + ip, err := util.DNSIP(ipNet.String()) if err != nil { t.Errorf("failed to get kube-dns IP: %v", err) } @@ -312,7 +312,7 @@ func validateDNSDig(ctx context.Context, t *testing.T, profile string) { checkRoutePassword(t) checkDNSForward(t) - ip := getKubeDNSIP(t, profile) + ip := kubeDNSIP(t, profile) dnsIP := fmt.Sprintf("@%s", ip) // Check if the dig DNS lookup works toward kube-dns IP @@ -375,7 +375,7 @@ func validateAccessDNS(_ context.Context, t *testing.T, profile string) { got := []byte{} url := fmt.Sprintf("http://%s", domain) - ip := getKubeDNSIP(t, profile) + ip := kubeDNSIP(t, profile) dnsIP := fmt.Sprintf("%s:53", ip) // Set kube-dns dial diff --git a/test/integration/ha_test.go b/test/integration/ha_test.go new file mode 100644 index 000000000000..d5a88d2a1177 --- /dev/null +++ b/test/integration/ha_test.go @@ -0,0 +1,627 @@ +//go:build integration + +/* +Copyright 2024 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "encoding/json" + "fmt" + "net" + "os/exec" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "k8s.io/minikube/cmd/minikube/cmd" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/util/retry" +) + +// TestHA tests all ha cluster functionality +func TestHA(t *testing.T) { + if NoneDriver() { + t.Skip("none driver does not support multinode/ha") + } + + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) + if err != nil { + t.Fatalf("docker is broken: %v", err) + } + if strings.Contains(rr.Stdout.String(), "azure") { + t.Skip("kic containers are not supported on docker's azure") + } + } + + type validatorFunc func(context.Context, *testing.T, string) + profile := UniqueProfileName("ha") + ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + defer CleanupWithLogs(t, profile, cancel) + + t.Run("serial", func(t *testing.T) { + tests := []struct { + name string + validator validatorFunc + }{ + {"StartCluster", validateHAStartCluster}, + {"DeployApp", validateHADeployApp}, + {"PingHostFromPods", validateHAPingHostFromPods}, + {"AddWorkerNode", validateHAAddWorkerNode}, + {"NodeLabels", validateHANodeLabels}, + {"HAppyAfterClusterStart", validateHAStatusHAppy}, + {"CopyFile", validateHACopyFile}, + {"StopSecondaryNode", validateHAStopSecondaryNode}, + {"DegradedAfterControlPlaneNodeStop", validateHAStatusDegraded}, + {"RestartSecondaryNode", validateHARestartSecondaryNode}, + {"HAppyAfterSecondaryNodeRestart", validateHAStatusHAppy}, + {"RestartClusterKeepsNodes", validateHARestartClusterKeepsNodes}, + {"DeleteSecondaryNode", validateHADeleteSecondaryNode}, + {"DegradedAfterSecondaryNodeDelete", validateHAStatusDegraded}, + {"StopCluster", validateHAStopCluster}, + {"RestartCluster", validateHARestartCluster}, + {"DegradedAfterClusterRestart", validateHAStatusDegraded}, + {"AddSecondaryNode", validateHAAddSecondaryNode}, + {"HAppyAfterSecondaryNodeAdd", validateHAStatusHAppy}, + } + for _, tc := range tests { + tc := tc + if ctx.Err() == context.DeadlineExceeded { + t.Fatalf("Unable to run more tests (deadline exceeded)") + } + t.Run(tc.name, func(t *testing.T) { + defer PostMortemLogs(t, profile) + tc.validator(ctx, t, profile) + }) + } + }) +} + +// validateHAStartCluster ensures ha cluster can start. +func validateHAStartCluster(ctx context.Context, t *testing.T, profile string) { + // start ha cluster + startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--ha", "-v=7", "--alsologtostderr"}, StartArgs()...) + rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Fatalf("failed to fresh-start ha cluster. args %q : %v", rr.Command(), err) + } + + // ensure minikube status shows 3 operational control-plane nodes + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "type: Control Plane") != 3 { + t.Errorf("status says not all three control-plane nodes are present: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "host: Running") != 3 { + t.Errorf("status says not all three hosts are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { + t.Errorf("status says not all three kubelets are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "apiserver: Running") != 3 { + t.Errorf("status says not all three apiservers are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + +// validateHADeployApp deploys an app to ha cluster and ensures all nodes can serve traffic. +func validateHADeployApp(ctx context.Context, t *testing.T, profile string) { + // Create a deployment for app + _, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "apply", "-f", "./testdata/ha/ha-pod-dns-test.yaml")) + if err != nil { + t.Errorf("failed to create busybox deployment to ha cluster") + } + + _, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/busybox")) + if err != nil { + t.Errorf("failed to deploy busybox to ha cluster") + } + + // resolve Pod IPs + resolvePodIPs := func() error { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "get", "pods", "-o", "jsonpath='{.items[*].status.podIP}'")) + if err != nil { + err := fmt.Errorf("failed to retrieve Pod IPs (may be temporary): %v", err) + t.Logf(err.Error()) + return err + } + podIPs := strings.Split(strings.Trim(rr.Stdout.String(), "'"), " ") + if len(podIPs) != 3 { + err := fmt.Errorf("expected 3 Pod IPs but got %d (may be temporary), output: %q", len(podIPs), rr.Output()) + t.Logf(err.Error()) + return err + } else if podIPs[0] == podIPs[1] || podIPs[0] == podIPs[2] || podIPs[1] == podIPs[2] { + err := fmt.Errorf("expected 3 different pod IPs but got %s and %s (may be temporary), output: %q", podIPs[0], podIPs[1], rr.Output()) + t.Logf(err.Error()) + return err + } + return nil + } + if err := retry.Expo(resolvePodIPs, 1*time.Second, Seconds(120)); err != nil { + t.Errorf("failed to resolve pod IPs: %v", err) + } + + // get Pod names + rr, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "get", "pods", "-o", "jsonpath='{.items[*].metadata.name}'")) + if err != nil { + t.Errorf("failed get Pod names") + } + podNames := strings.Split(strings.Trim(rr.Stdout.String(), "'"), " ") + + // verify all Pods could resolve a public DNS + for _, name := range podNames { + _, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.io")) + if err != nil { + t.Errorf("Pod %s could not resolve 'kubernetes.io': %v", name, err) + } + } + + // verify all Pods could resolve "kubernetes.default" + // this one is also checked by k8s e2e node conformance tests: + // https://github.com/kubernetes/kubernetes/blob/f137c4777095b3972e2dd71a01365d47be459389/test/e2e_node/environment/conformance.go#L125-L179 + for _, name := range podNames { + _, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.default")) + if err != nil { + t.Errorf("Pod %s could not resolve 'kubernetes.default': %v", name, err) + } + } + + // verify all pods could resolve to a local service. + for _, name := range podNames { + _, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "nslookup", "kubernetes.default.svc.cluster.local")) + if err != nil { + t.Errorf("Pod %s could not resolve local service (kubernetes.default.svc.cluster.local): %v", name, err) + } + } +} + +// validateHAPingHostFromPods uses app previously deplyed by validateDeployAppToHACluster to verify its pods, located on different nodes, can resolve "host.minikube.internal". +func validateHAPingHostFromPods(ctx context.Context, t *testing.T, profile string) { + // get Pod names + rr, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "get", "pods", "-o", "jsonpath='{.items[*].metadata.name}'")) + if err != nil { + t.Fatalf("failed to get Pod names: %v", err) + } + podNames := strings.Split(strings.Trim(rr.Stdout.String(), "'"), " ") + + for _, name := range podNames { + // get host.minikube.internal ip as resolved by nslookup + out, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "sh", "-c", "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3")) + if err != nil { + t.Errorf("Pod %s could not resolve 'host.minikube.internal': %v", name, err) + continue + } + hostIP := net.ParseIP(strings.TrimSpace(out.Stdout.String())) + if hostIP == nil { + t.Fatalf("minikube host ip is nil: %s", out.Output()) + } + // try pinging host from pod + ping := fmt.Sprintf("ping -c 1 %s", hostIP) + if _, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "exec", name, "--", "sh", "-c", ping)); err != nil { + t.Errorf("Failed to ping host (%s) from pod (%s): %v", hostIP, name, err) + } + } +} + +// validateHAAddWorkerNode uses the minikube node add command to add a worker node to an existing ha cluster. +func validateHAAddWorkerNode(ctx context.Context, t *testing.T, profile string) { + // add a node to the current ha cluster + addArgs := []string{"node", "add", "-p", profile, "-v=7", "--alsologtostderr"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) + if err != nil { + t.Fatalf("failed to add worker node to current ha cluster. args %q : %v", rr.Command(), err) + } + + // ensure minikube status shows 3 operational control-plane nodes and 1 worker node + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "type: Control Plane") != 3 { + t.Errorf("status says not all three control-plane nodes are present: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "host: Running") != 4 { + t.Errorf("status says not all four hosts are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 4 { + t.Errorf("status says not all four kubelets are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "apiserver: Running") != 3 { + t.Errorf("status says not all three apiservers are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + +// validateHANodeLabels check if all node labels were configured correctly. +func validateHANodeLabels(ctx context.Context, t *testing.T, profile string) { + // docs: Get the node labels from the cluster with `kubectl get nodes` + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "-o", "jsonpath=[{range .items[*]}{.metadata.labels},{end}]")) + if err != nil { + t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err) + } + + nodeLabelsList := []map[string]string{} + fixedString := strings.Replace(rr.Stdout.String(), ",]", "]", 1) + err = json.Unmarshal([]byte(fixedString), &nodeLabelsList) + if err != nil { + t.Errorf("failed to decode json from label list: args %q: %v", rr.Command(), err) + } + + // docs: check if all node labels matches with the expected Minikube labels: `minikube.k8s.io/*` + expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name", "minikube.k8s.io/primary"} + + for _, nodeLabels := range nodeLabelsList { + for _, el := range expectedLabels { + if _, ok := nodeLabels[el]; !ok { + t.Errorf("expected to have label %q in node labels but got : %s", el, rr.Output()) + } + } + } +} + +// validateHAStatusHAppy ensures minikube profile list outputs correct with ha clusters. +func validateHAStatusHAppy(ctx context.Context, t *testing.T, profile string) { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) + if err != nil { + t.Errorf("failed to list profiles with json format. args %q: %v", rr.Command(), err) + } + + var jsonObject map[string][]config.Profile + err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) + if err != nil { + t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err) + } + + validProfiles := jsonObject["valid"] + var profileObject *config.Profile + for _, obj := range validProfiles { + if obj.Name == profile { + profileObject = &obj + break + } + } + + if profileObject == nil { + t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command()) + } else if expected, numNodes := 4, len(profileObject.Config.Nodes); numNodes != expected { + t.Errorf("expected profile %q in json of 'profile list' to include %d nodes but have %d nodes. got *%q*. args: %q", profile, expected, numNodes, rr.Stdout.String(), rr.Command()) + + if expected, status := "HAppy", profileObject.Status; status != expected { + t.Errorf("expected profile %q in json of 'profile list' to have %q status but have %q status. got *%q*. args: %q", profile, expected, status, rr.Stdout.String(), rr.Command()) + } + } + + if invalidPs, ok := jsonObject["invalid"]; ok { + for _, ps := range invalidPs { + if strings.Contains(ps.Name, profile) { + t.Errorf("expected the json of 'profile list' to not include profile or node in invalid profile but got *%q*. args: %q", rr.Stdout.String(), rr.Command()) + } + } + } +} + +// validateHACopyFile ensures minikube cp works with ha clusters. +func validateHACopyFile(ctx context.Context, t *testing.T, profile string) { + if NoneDriver() { + t.Skipf("skipping: cp is unsupported by none driver") + } + + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--output", "json", "-v=7", "--alsologtostderr")) + if err != nil && rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + var statuses []cmd.Status + if err = json.Unmarshal(rr.Stdout.Bytes(), &statuses); err != nil { + t.Errorf("failed to decode json from status: args %q: %v", rr.Command(), err) + } + + tmpDir := t.TempDir() + + srcPath := cpTestLocalPath() + dstPath := cpTestMinikubePath() + + for _, n := range statuses { + // copy local to node + testCpCmd(ctx, t, profile, "", srcPath, n.Name, dstPath) + + // copy back from node to local + tmpPath := filepath.Join(tmpDir, fmt.Sprintf("cp-test_%s.txt", n.Name)) + testCpCmd(ctx, t, profile, n.Name, dstPath, "", tmpPath) + + // copy node to node + for _, n2 := range statuses { + if n.Name == n2.Name { + continue + } + fp := path.Join("/home/docker", fmt.Sprintf("cp-test_%s_%s.txt", n.Name, n2.Name)) + testCpCmd(ctx, t, profile, n.Name, dstPath, n2.Name, fp) + } + } +} + +// validateHAStopSecondaryNode tests ha cluster by stopping a secondary control-plane node using minikube node stop command. +func validateHAStopSecondaryNode(ctx context.Context, t *testing.T, profile string) { + // run minikube node stop on secondary control-plane node + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", SecondNodeName, "-v=7", "--alsologtostderr")) + if err != nil { + t.Errorf("secondary control-plane node stop returned an error. args %q: %v", rr.Command(), err) + } + + // ensure minikube status shows 3 running nodes and 1 stopped node + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + // exit code 7 means a host is stopped, which we are expecting + if err != nil && rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "type: Control Plane") != 3 { + t.Errorf("status says not all three control-plane nodes are present: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "host: Running") != 3 { + t.Errorf("status says not three hosts are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { + t.Errorf("status says not three kubelets are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "apiserver: Running") != 2 { + t.Errorf("status says not two apiservers are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + +// validateHAStatusDegraded ensures minikube profile list outputs correct with ha clusters. +func validateHAStatusDegraded(ctx context.Context, t *testing.T, profile string) { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) + if err != nil { + t.Errorf("failed to list profiles with json format. args %q: %v", rr.Command(), err) + } + + var jsonObject map[string][]config.Profile + err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) + if err != nil { + t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err) + } + + validProfiles := jsonObject["valid"] + var profileObject *config.Profile + for _, obj := range validProfiles { + if obj.Name == profile { + profileObject = &obj + break + } + } + + if profileObject == nil { + t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command()) + } else if expected, status := "Degraded", profileObject.Status; status != expected { + t.Errorf("expected profile %q in json of 'profile list' to have %q status but have %q status. got *%q*. args: %q", profile, expected, status, rr.Stdout.String(), rr.Command()) + } +} + +// validateHARestartSecondaryNode tests the minikube node start command on existing stopped secondary node. +func validateHARestartSecondaryNode(ctx context.Context, t *testing.T, profile string) { + // start stopped node(s) back up + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", SecondNodeName, "-v=7", "--alsologtostderr")) + if err != nil { + t.Logf(rr.Stderr.String()) + t.Errorf("secondary control-plane node start returned an error. args %q: %v", rr.Command(), err) + } + + // ensure minikube status shows all 4 nodes running, waiting for ha cluster/apiservers to stabilise + minikubeStatus := func() error { + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + return err + } + if err := retry.Expo(minikubeStatus, 1*time.Second, 60*time.Second); err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "type: Control Plane") != 3 { + t.Errorf("status says not all three control-plane nodes are present: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "host: Running") != 4 { + t.Errorf("status says not all four hosts are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 4 { + t.Errorf("status says not all four kubelets are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "apiserver: Running") != 3 { + t.Errorf("status says not all three apiservers are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + // ensure kubectl can connect correctly + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) + if err != nil { + t.Fatalf("failed to kubectl get nodes. args %q : %v", rr.Command(), err) + } +} + +// validateHARestartClusterKeepsNodes restarts minikube cluster and checks if the reported node list is unchanged. +func validateHARestartClusterKeepsNodes(ctx context.Context, t *testing.T, profile string) { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "node", "list", "-p", profile, "-v=7", "--alsologtostderr")) + if err != nil { + t.Errorf("failed to run node list. args %q : %v", rr.Command(), err) + } + nodeList := rr.Stdout.String() + + _, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "-v=7", "--alsologtostderr")) + if err != nil { + t.Errorf("failed to run minikube stop. args %q : %v", rr.Command(), err) + } + + _, err = Run(t, exec.CommandContext(ctx, Target(), "start", "-p", profile, "--wait=true", "-v=7", "--alsologtostderr")) + if err != nil { + t.Errorf("failed to run minikube start. args %q : %v", rr.Command(), err) + } + + rr, err = Run(t, exec.CommandContext(ctx, Target(), "node", "list", "-p", profile)) + if err != nil { + t.Errorf("failed to run node list. args %q : %v", rr.Command(), err) + } + + restartedNodeList := rr.Stdout.String() + if nodeList != restartedNodeList { + t.Fatalf("reported node list is not the same after restart. Before restart: %s\nAfter restart: %s", nodeList, restartedNodeList) + } +} + +// validateHADeleteSecondaryNode tests the minikube node delete command on secondary control-plane. +// note: currently, 'minikube status' subcommand relies on primary control-plane node and storage-provisioner only runs on a primary control-plane node. +func validateHADeleteSecondaryNode(ctx context.Context, t *testing.T, profile string) { + // delete the other secondary control-plane node + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", ThirdNodeName, "-v=7", "--alsologtostderr")) + if err != nil { + t.Errorf("node delete returned an error. args %q: %v", rr.Command(), err) + } + + // ensure status is back down to 3 hosts + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "type: Control Plane") != 2 { + t.Errorf("status says not two control-plane nodes are present: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "host: Running") != 3 { + t.Errorf("status says not three hosts are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { + t.Errorf("status says not three kubelets are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "apiserver: Running") != 2 { + t.Errorf("status says not two apiservers are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + // ensure kubectl knows the node is gone + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) + if err != nil { + t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "NotReady") > 0 { + t.Errorf("expected 3 nodes to be Ready, got %v", rr.Output()) + } + + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`)) + if err != nil { + t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "True") != 3 { + t.Errorf("expected 3 nodes Ready status to be True, got %v", rr.Output()) + } +} + +// validateHAStopCluster runs minikube stop on a ha cluster. +func validateHAStopCluster(ctx context.Context, t *testing.T, profile string) { + // Run minikube stop on the cluster + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "stop", "-v=7", "--alsologtostderr")) + if err != nil { + t.Errorf("failed to stop cluster. args %q: %v", rr.Command(), err) + } + + // ensure minikube status shows all 3 nodes stopped + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + // exit code 7 means a host is stopped, which we are expecting + if err != nil && rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "type: Control Plane") != 2 { + t.Errorf("status says not two control-plane nodes are present: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "host: Running") != 0 { + t.Errorf("status says there are running hosts: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 3 { + t.Errorf("status says not three kubelets are stopped: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "apiserver: Stopped") != 2 { + t.Errorf("status says not two apiservers are stopped: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + +// validateHARestartCluster verifies a soft restart on a ha cluster works. +func validateHARestartCluster(ctx context.Context, t *testing.T, profile string) { + // restart cluster with minikube start + startArgs := append([]string{"start", "-p", profile, "--wait=true", "-v=7", "--alsologtostderr"}, StartArgs()...) + rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) + } + + // ensure minikube status shows all 3 nodes running + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "type: Control Plane") != 2 { + t.Errorf("status says not two control-plane nodes are present: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "host: Running") != 3 { + t.Errorf("status says not three hosts are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { + t.Errorf("status says not three kubelets are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "apiserver: Running") != 2 { + t.Errorf("status says not two apiservers are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + // ensure kubectl reports that all nodes are ready + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) + if err != nil { + t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "NotReady") > 0 { + t.Errorf("expected 3 nodes to be Ready, got %v", rr.Output()) + } + + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`)) + if err != nil { + t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "True") != 3 { + t.Errorf("expected 3 nodes Ready status to be True, got %v", rr.Output()) + } +} + +// validateHAAddSecondaryNode uses the minikube node add command to add a secondary control-plane node to an existing ha cluster. +func validateHAAddSecondaryNode(ctx context.Context, t *testing.T, profile string) { + // add a node to the current ha cluster + addArgs := []string{"node", "add", "-p", profile, "--control-plane", "-v=7", "--alsologtostderr"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) + if err != nil { + t.Fatalf("failed to add control-plane node to current ha cluster. args %q : %v", rr.Command(), err) + } + + // ensure minikube status shows 3 operational control-plane nodes and 1 worker node + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "type: Control Plane") != 3 { + t.Errorf("status says not all three control-plane nodes are present: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "host: Running") != 4 { + t.Errorf("status says not all four hosts are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 4 { + t.Errorf("status says not all four kubelets are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + if strings.Count(rr.Stdout.String(), "apiserver: Running") != 3 { + t.Errorf("status says not all three apiservers are running: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index e5a115b444e5..3800f4b4f9b4 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -41,6 +41,16 @@ func TestMultiNode(t *testing.T) { t.Skip("none driver does not support multinode") } + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) + if err != nil { + t.Fatalf("docker is broken: %v", err) + } + if strings.Contains(rr.Stdout.String(), "azure") { + t.Skip("kic containers are not supported on docker's azure") + } + } + type validatorFunc func(context.Context, *testing.T, string) profile := UniqueProfileName("multinode") ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) @@ -165,7 +175,7 @@ func validateProfileListWithMultiNode(ctx context.Context, t *testing.T, profile } } -// validateProfileListWithMultiNode make sure minikube profile list outputs correct with multinode clusters +// validateCopyFileWithMultiNode make sure minikube cp works with multinode clusters. func validateCopyFileWithMultiNode(ctx context.Context, t *testing.T, profile string) { if NoneDriver() { t.Skipf("skipping: cp is unsupported by none driver") @@ -268,26 +278,19 @@ func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) // validateStartNodeAfterStop tests the minikube node start command on an existing stopped node func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) { - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) - if err != nil { - t.Fatalf("docker is broken: %v", err) - } - if strings.Contains(rr.Stdout.String(), "azure") { - t.Skip("kic containers are not supported on docker's azure") - } - } - // Start the node back up - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", ThirdNodeName, "--alsologtostderr")) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", ThirdNodeName, "-v=7", "--alsologtostderr")) if err != nil { t.Logf(rr.Stderr.String()) t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) } // Make sure minikube status shows 3 running hosts - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) - if err != nil { + minikubeStatus := func() error { + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) + return err + } + if err := retry.Expo(minikubeStatus, 1*time.Second, 60*time.Second); err != nil { t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) } @@ -341,7 +344,7 @@ func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile str // Run minikube stop on the cluster rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "stop")) if err != nil { - t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) + t.Errorf("failed to stop cluster. args %q: %v", rr.Command(), err) } // Run status to see the stopped hosts @@ -368,15 +371,6 @@ func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile str // validateRestartMultiNodeCluster verifies a soft restart on a multinode cluster works func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile string) { - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) - if err != nil { - t.Fatalf("docker is broken: %v", err) - } - if strings.Contains(rr.Stdout.String(), "azure") { - t.Skip("kic containers are not supported on docker's azure") - } - } // Restart a full cluster with minikube start startArgs := append([]string{"start", "-p", profile, "--wait=true", "-v=8", "--alsologtostderr"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) @@ -418,10 +412,10 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile // validateDeleteNodeFromMultiNode tests the minikube node delete command func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) { - // Start the node back up + // Delete a node from the current cluster rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", ThirdNodeName)) if err != nil { - t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) + t.Errorf("node delete returned an error. args %q: %v", rr.Command(), err) } // Make sure status is back down to 2 hosts @@ -438,16 +432,6 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) } - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "volume", "ls")) - if err != nil { - t.Errorf("failed to run %q : %v", rr.Command(), err) - } - if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s-%s", profile, ThirdNodeName)) { - t.Errorf("docker volume was not properly deleted: %s", rr.Stdout.String()) - } - } - // Make sure kubectl knows the node is gone rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) if err != nil { diff --git a/test/integration/testdata/ha/ha-pod-dns-test.yaml b/test/integration/testdata/ha/ha-pod-dns-test.yaml new file mode 100644 index 000000000000..91c63a8164f1 --- /dev/null +++ b/test/integration/testdata/ha/ha-pod-dns-test.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: busybox + labels: + app: busybox +spec: + replicas: 3 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + # flaky nslookup in busybox versions newer than 1.28: + # https://github.com/docker-library/busybox/issues/48 + # note: registry.k8s.io/e2e-test-images/agnhost:2.32 + # has similar issues (ie, resolves but returns exit code 1) + image: gcr.io/k8s-minikube/busybox:1.28 + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + restartPolicy: Always + affinity: + # ⬇⬇⬇ This ensures pods will land on separate hosts + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: [{ key: app, operator: In, values: [busybox] }] + topologyKey: "kubernetes.io/hostname" From bcdfc03020e51673413dde02eedc2cba5d92bf9c Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Tue, 9 Jan 2024 23:37:46 +0000 Subject: [PATCH 02/41] workaround for kube-vip and k8s v1.29+ --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 10 +++++++++- pkg/minikube/cluster/ha/kube-vip/kube-vip.go | 17 +++++++++++------ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 69bd5b0f3c94..394f48bc039b 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -951,7 +951,15 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru } // deploy kube-vip for ha cluster if config.HA(cfg) { - kubevipCfg, err := kubevip.Configure(cfg) + // workaround for kube-vip + // only applicable for k8s v1.29+ during primary control-plane node's kubeadm init (ie, first boot) + // TODO (prezha): remove when fixed upstream - ref: https://github.com/kube-vip/kube-vip/issues/684#issuecomment-1864855405 + kv, err := semver.ParseTolerant(cfg.KubernetesConfig.KubernetesVersion) + if err != nil { + return errors.Wrapf(err, "parsing kubernetes version %q", cfg.KubernetesConfig.KubernetesVersion) + } + workaround := kv.GTE(semver.Version{Major: 1, Minor: 29}) && config.IsPrimaryControlPlane(n) && len(config.ControlPlanes(cfg)) == 1 + kubevipCfg, err := kubevip.Configure(cfg, workaround) if err != nil { klog.Errorf("couldn't generate kube-vip config, this might cause issues (will continue): %v", err) } else { diff --git a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go index 521a2814a876..49960baaa7c9 100644 --- a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go +++ b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go @@ -91,21 +91,26 @@ spec: hostNetwork: true volumes: - hostPath: - path: /etc/kubernetes/admin.conf + path: "{{ .AdminConf }}" name: kubeconfig status: {} `)) // Configure takes last client ip address in cluster nodes network subnet as vip address and generates kube-vip.yaml file. -func Configure(cc config.ClusterConfig) ([]byte, error) { +func Configure(cc config.ClusterConfig, workaround bool) ([]byte, error) { klog.Info("generating kube-vip config ...") params := struct { - VIP string - Port int + VIP string + Port int + AdminConf string }{ - VIP: cc.KubernetesConfig.APIServerHAVIP, - Port: cc.APIServerPort, + VIP: cc.KubernetesConfig.APIServerHAVIP, + Port: cc.APIServerPort, + AdminConf: "/etc/kubernetes/admin.conf", + } + if workaround { + params.AdminConf = "/etc/kubernetes/super-admin.conf" } b := bytes.Buffer{} From f650994b396dc9f49f01f775cffc478fbf5eacc9 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 10 Jan 2024 22:45:05 +0000 Subject: [PATCH 03/41] use path and path_filepath according to os https://pkg.go.dev/path: "The path package should only be used for paths separated by forward slashes, such as the paths in URLs. This package does not deal with Windows paths with drive letters or backslashes; to manipulate operating system paths, use the path/filepath package." - user os can be eg, windows => use 'filepath' to reference local paths - kic/iso use linux os => use 'path' to reference "internal" paths (independently of user's os) --- pkg/minikube/bootstrapper/certs.go | 36 +++++++++++++++++------------- pkg/minikube/machine/machine.go | 4 ++-- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index ef1eb80dd5b0..974602a92610 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -24,11 +24,14 @@ import ( "net" "os" "os/exec" - "path/filepath" "slices" "strings" "time" + // WARNING: use path for kic/iso and path/filepath for user os + "path" + "path/filepath" + "github.com/juju/mutex/v2" "github.com/otiai10/copy" "github.com/pkg/errors" @@ -95,6 +98,7 @@ func SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCmd command.Runner, }() for _, c := range xfer { + // note: src(c) is user os' path, dst is kic/iso (linux) path certFile, err := assets.NewFileAsset(c, vmpath.GuestKubernetesCertsDir, filepath.Base(c), properPerms(c)) if err != nil { return errors.Wrapf(err, "create cert file asset for %s", c) @@ -109,7 +113,8 @@ func SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCmd command.Runner, for src, dst := range caCerts { // note: these are all public certs, so should be world-readeable - certFile, err := assets.NewFileAsset(src, filepath.Dir(dst), filepath.Base(dst), "0644") + // note: src is user os' path, dst is kic/iso (linux) path + certFile, err := assets.NewFileAsset(src, path.Dir(dst), path.Base(dst), "0644") if err != nil { return errors.Wrapf(err, "create ca cert file asset for %s", src) } @@ -147,9 +152,9 @@ func SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCmd command.Runner, kcs := &kubeconfig.Settings{ ClusterName: n.Name, ClusterServerAddress: fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprint(n.Port))), - ClientCertificate: filepath.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), - ClientKey: filepath.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), - CertificateAuthority: filepath.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), + ClientCertificate: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), + ClientKey: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), + CertificateAuthority: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), ExtensionContext: kubeconfig.NewExtension(), ExtensionCluster: kubeconfig.NewExtension(), KeepContext: false, @@ -390,7 +395,7 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, shared shared // renewExpiredKubeadmCerts checks if kubeadm certs already exists and are still valid, then renews them if needed. // if certs don't exist already (eg, kubeadm hasn't run yet), then checks are skipped. func renewExpiredKubeadmCerts(cmd command.Runner, cc config.ClusterConfig) error { - if _, err := cmd.RunCmd(exec.Command("stat", filepath.Join(vmpath.GuestPersistentDir, "certs", "apiserver-kubelet-client.crt"))); err != nil { + if _, err := cmd.RunCmd(exec.Command("stat", path.Join(vmpath.GuestPersistentDir, "certs", "apiserver-kubelet-client.crt"))); err != nil { klog.Infof("'apiserver-kubelet-client' cert doesn't exist, likely first start: %v", err) return nil } @@ -405,7 +410,7 @@ func renewExpiredKubeadmCerts(cmd command.Runner, cc config.ClusterConfig) error certPath = append(certPath, "etcd") } certPath = append(certPath, strings.TrimPrefix(cert, "etcd-")+".crt") - if !isKubeadmCertValid(cmd, filepath.Join(certPath...)) { + if !isKubeadmCertValid(cmd, path.Join(certPath...)) { expiredCerts = true } } @@ -413,7 +418,7 @@ func renewExpiredKubeadmCerts(cmd command.Runner, cc config.ClusterConfig) error return nil } out.WarningT("kubeadm certificates have expired. Generating new ones...") - kubeadmPath := filepath.Join(vmpath.GuestPersistentDir, "binaries", cc.KubernetesConfig.KubernetesVersion) + kubeadmPath := path.Join(vmpath.GuestPersistentDir, "binaries", cc.KubernetesConfig.KubernetesVersion) bashCmd := fmt.Sprintf("sudo env PATH=\"%s:$PATH\" kubeadm certs renew all --config %s", kubeadmPath, constants.KubeadmYamlPath) if _, err := cmd.RunCmd(exec.Command("/bin/bash", "-c", bashCmd)); err != nil { return errors.Wrap(err, "kubeadm certs renew") @@ -450,6 +455,7 @@ func isValidPEMCertificate(filePath string) (bool, error) { // minikube root CA is also included but libmachine certificates (ca.pem/cert.pem) are excluded. func collectCACerts() (map[string]string, error) { localPath := localpath.MiniPath() + // note: certFiles map's key is user os' path, whereas map's value is kic/iso (linux) path certFiles := map[string]string{} dirs := []string{filepath.Join(localPath, "certs"), filepath.Join(localPath, "files", "etc", "ssl", "certs")} @@ -485,7 +491,7 @@ func collectCACerts() (map[string]string, error) { if validPem { filename := filepath.Base(hostpath) dst := fmt.Sprintf("%s.%s", strings.TrimSuffix(filename, ext), "pem") - certFiles[hostpath] = filepath.Join(vmpath.GuestCertAuthDir, dst) + certFiles[hostpath] = path.Join(vmpath.GuestCertAuthDir, dst) } } return nil @@ -502,7 +508,7 @@ func collectCACerts() (map[string]string, error) { } // include minikube CA - certFiles[localpath.CACert()] = filepath.Join(vmpath.GuestCertAuthDir, "minikubeCA.pem") + certFiles[localpath.CACert()] = path.Join(vmpath.GuestCertAuthDir, "minikubeCA.pem") filtered := map[string]string{} for k, v := range certFiles { @@ -544,8 +550,8 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error { } for _, caCertFile := range caCerts { - dstFilename := filepath.Base(caCertFile) - certStorePath := filepath.Join(vmpath.GuestCertStoreDir, dstFilename) + dstFilename := path.Base(caCertFile) + certStorePath := path.Join(vmpath.GuestCertStoreDir, dstFilename) cmd := fmt.Sprintf("test -s %s && ln -fs %s %s", caCertFile, caCertFile, certStorePath) if _, err := cr.RunCmd(exec.Command("sudo", "/bin/bash", "-c", cmd)); err != nil { @@ -560,7 +566,7 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error { if err != nil { return errors.Wrapf(err, "calculate hash for cacert %s", caCertFile) } - subjectHashLink := filepath.Join(vmpath.GuestCertStoreDir, fmt.Sprintf("%s.0", subjectHash)) + subjectHashLink := path.Join(vmpath.GuestCertStoreDir, fmt.Sprintf("%s.0", subjectHash)) // NOTE: This symlink may exist, but point to a missing file cmd = fmt.Sprintf("test -L %s || ln -fs %s %s", subjectHashLink, certStorePath, subjectHashLink) @@ -614,7 +620,7 @@ func isValid(certPath, keyPath string) bool { } if cert.NotAfter.Before(time.Now()) { - out.WarningT("Certificate {{.certPath}} has expired. Generating a new one...", out.V{"certPath": filepath.Base(certPath)}) + out.WarningT("Certificate {{.certPath}} has expired. Generating a new one...", out.V{"certPath": path.Base(certPath)}) klog.Infof("cert expired %s: expiration: %s, now: %s", certPath, cert.NotAfter, time.Now()) os.Remove(certPath) os.Remove(keyPath) @@ -636,7 +642,7 @@ func isKubeadmCertValid(cmd command.Runner, certPath string) bool { func properPerms(cert string) string { perms := "0644" - ext := strings.ToLower(filepath.Ext(cert)) + ext := strings.ToLower(path.Ext(cert)) if ext == ".key" || ext == ".pem" { perms = "0600" } diff --git a/pkg/minikube/machine/machine.go b/pkg/minikube/machine/machine.go index 8ee223c934a0..92329914e042 100644 --- a/pkg/minikube/machine/machine.go +++ b/pkg/minikube/machine/machine.go @@ -19,7 +19,7 @@ package machine import ( "fmt" "os/exec" - "path/filepath" + "path" "strings" "time" @@ -202,7 +202,7 @@ func restore(h host.Host) error { if len(dst) == 0 { continue } - src := filepath.Join(vmpath.GuestBackupDir, dst) + src := path.Join(vmpath.GuestBackupDir, dst) if _, err := r.RunCmd(exec.Command("sudo", "cp", "--archive", "--update", "--force", src, "/")); err != nil { errs = append(errs, errors.Errorf("failed to copy %q to %q (will continue): %v", src, dst, err)) } From d45ff8f1eae37b3e55b844f09757f7b38fd1eae9 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 10 Jan 2024 23:12:20 +0000 Subject: [PATCH 04/41] resolve merge conflict due to rebase-needed --- cmd/minikube/cmd/start_flags.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index bf29a790f8b0..024da82cad9d 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -605,7 +605,6 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, rtime str CNI: getCNIConfig(cmd), }, MultiNodeRequested: viper.GetInt(nodes) > 1 || viper.GetBool(ha), - AutoPauseInterval: viper.GetDuration(autoPauseInterval), GPUs: viper.GetString(gpus), } cc.VerifyComponents = interpretWaitFlag(*cmd) From d002316736c2d2d2ab9fd818687dd624fabf52cd Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Thu, 11 Jan 2024 00:33:11 +0000 Subject: [PATCH 05/41] fix k8s node labels for "none" driver --- pkg/minikube/bootstrapper/certs.go | 4 ++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 11 ++++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 974602a92610..d6a7683a37b8 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -620,7 +620,7 @@ func isValid(certPath, keyPath string) bool { } if cert.NotAfter.Before(time.Now()) { - out.WarningT("Certificate {{.certPath}} has expired. Generating a new one...", out.V{"certPath": path.Base(certPath)}) + out.WarningT("Certificate {{.certPath}} has expired. Generating a new one...", out.V{"certPath": filepath.Base(certPath)}) klog.Infof("cert expired %s: expiration: %s, now: %s", certPath, cert.NotAfter, time.Now()) os.Remove(certPath) os.Remove(keyPath) @@ -642,7 +642,7 @@ func isKubeadmCertValid(cmd command.Runner, certPath string) bool { func properPerms(cert string) string { perms := "0644" - ext := strings.ToLower(path.Ext(cert)) + ext := strings.ToLower(filepath.Ext(cert)) if ext == ".key" || ext == ".pem" { perms = "0600" } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 394f48bc039b..4318256a43d9 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "net" + "os" "os/exec" "path" "runtime" @@ -1058,10 +1059,18 @@ func (k *Bootstrapper) labelAndUntaintNode(cfg config.ClusterConfig, n config.No ctx, cancel := context.WithTimeout(context.Background(), applyTimeoutSeconds*time.Second) defer cancel() + // node name is usually based on profile/cluster name, except for "none" driver where it assumes hostname + nodeName := config.MachineName(cfg, n) + if driver.IsNone(cfg.Driver) { + if n, err := os.Hostname(); err == nil { + nodeName = n + } + } + // example: // sudo /var/lib/minikube/binaries//kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes test-357 minikube.k8s.io/version= minikube.k8s.io/commit=aa91f39ffbcf27dcbb93c4ff3f457c54e585cf4a-dirty minikube.k8s.io/name=p1 minikube.k8s.io/updated_at=2020_02_20T12_05_35_0700 cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg), fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), - "label", "--overwrite", "nodes", config.MachineName(cfg, n), createdAtLbl, verLbl, commitLbl, profileNameLbl, primaryLbl) + "label", "--overwrite", "nodes", nodeName, createdAtLbl, verLbl, commitLbl, profileNameLbl, primaryLbl) if _, err := k.c.RunCmd(cmd); err != nil { if ctx.Err() == context.DeadlineExceeded { return errors.Wrapf(err, "timeout apply node labels") From fb7ae16d9f27e24592db68ad1d1b1dccc95492cf Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sat, 13 Jan 2024 16:35:39 +0000 Subject: [PATCH 06/41] ensure override with --kubernetes-version is respected --- cmd/minikube/cmd/start.go | 5 ++++- cmd/minikube/cmd/start_flags.go | 2 -- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 8b9a694d53b7..c8188b30c7d0 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -334,8 +334,9 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * rtime := getContainerRuntime(existing) cc, n, err := generateClusterConfig(cmd, existing, k8sVersion, rtime, driverName) if err != nil { - return node.Starter{}, errors.Wrap(err, "Failed to generate config") + return node.Starter{}, errors.Wrap(err, "Failed to generate cluster config") } + klog.Infof("cluster config:\n%+v", cc) if firewall.IsBootpdBlocked(cc) { if err := firewall.UnblockBootpd(); err != nil { @@ -1699,6 +1700,8 @@ func configureNodes(cc config.ClusterConfig, existing *config.ClusterConfig) (co if err != nil { return cc, config.Node{}, errors.Wrapf(err, "failed getting control-plane node") } + pcp.KubernetesVersion = kv + pcp.ContainerRuntime = cr return cc, pcp, nil } diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 024da82cad9d..9ca85cbbd079 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -319,8 +319,6 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k } } - klog.Infof("config:\n%+v", cc) - r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime}) if err != nil { return cc, config.Node{}, errors.Wrap(err, "new runtime manager") From dad37da3721683d80757ea9b3c101407a60e7222 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sun, 14 Jan 2024 16:07:27 +0000 Subject: [PATCH 07/41] improve specific error message handling in mustload package --- pkg/minikube/mustload/mustload.go | 114 ++++++++++++++++++------------ 1 file changed, 68 insertions(+), 46 deletions(-) diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index f50996562f71..5090698d87bc 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -80,65 +80,82 @@ func Partial(name string, miniHome ...string) (libmachine.API, *config.ClusterCo return api, cc } -// Running is a cmd-friendly way to load a running cluster +// Running is a cmd-friendly way to load a running cluster. func Running(name string) ClusterController { - ctrls, err := running(name, true) - if err != nil { - out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) - exit.Message(reason.GuestCpConfig, "Unable to get running control-plane nodes") - } - - if len(ctrls) == 0 { - out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) - exit.Message(reason.GuestCpConfig, "Unable to find any running control-plane nodes") + if r := running(name, true); r != nil { + return r[0] } - return ctrls[0] + return ClusterController{} } -// running returns first or all running ClusterControllers found or an error. -func running(name string, first bool) ([]ClusterController, error) { +// running returns first or all running ClusterControllers found or exits with specific error if none found. +func running(name string, first bool) []ClusterController { api, cc := Partial(name) cps := config.ControlPlanes(*cc) if len(cps) == 0 { - return nil, fmt.Errorf("unable to find any control-plane nodes") + out.Styled(style.Shrug, "Unable to find any control-plane nodes") + exitTip("delete", name, reason.ExControlPlaneNotFound) } running := []ClusterController{} - for _, cp := range cps { + for i, cp := range cps { + // control flow depending on if we have any other control-plane nodes to try in case of an error + last := i == len(cps)-1 + machineName := config.MachineName(*cc, cp) status, err := machine.Status(api, machineName) if err != nil { - out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} status (will continue): {{.err}}`, out.V{"name": machineName, "err": err}) + if last { + exit.Message(reason.GuestStatus, `Unable to get control-plane node {{.name}} host status: {{.err}}`, out.V{"name": machineName, "err": err}) + } + out.WarningT(`Unable to get control-plane node {{.name}} host status (will try others): {{.err}}`, out.V{"name": machineName, "err": err}) continue } if status == state.None.String() { - out.Styled(style.Shrug, `The control-plane node {{.name}} does not exist (will continue)`, out.V{"name": machineName}) + if last { + out.Styled(style.Shrug, `The control-plane node {{.name}} host does not exist`, out.V{"name": machineName}) + exitTip("start", name, reason.ExGuestNotFound) + } + out.WarningT(`The control-plane node {{.name}} host does not exist (will try others)`, out.V{"name": machineName}) continue } if status != state.Running.String() { - out.Styled(style.Shrug, `The control-plane node {{.name}} is not running (will continue): state={{.state}}`, out.V{"name": machineName, "state": status}) + if last { + out.Styled(style.Shrug, `The control-plane node {{.name}} host is not running: state={{.state}}`, out.V{"name": machineName, "state": status}) + exitTip("start", name, reason.ExGuestNotRunning) + } + out.WarningT(`The control-plane node {{.name}} host is not running (will try others): state={{.state}}`, out.V{"name": machineName, "state": status}) continue } host, err := machine.LoadHost(api, machineName) if err != nil { - out.Styled(style.Shrug, `Unable to load control-plane node {{.name}} host (will continue): {{.err}}`, out.V{"name": machineName, "err": err}) + if last { + exit.Message(reason.GuestLoadHost, `Unable to load control-plane node {{.name}} host: {{.err}}`, out.V{"name": machineName, "err": err}) + } + out.WarningT(`Unable to load control-plane node {{.name}} host (will try others): {{.err}}`, out.V{"name": machineName, "err": err}) continue } cr, err := machine.CommandRunner(host) if err != nil { - out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} command runner (will continue): {{.err}}`, out.V{"name": machineName, "err": err}) + if last { + exit.Message(reason.InternalCommandRunner, `Unable to get control-plane node {{.name}} host command runner: {{.err}}`, out.V{"name": machineName, "err": err}) + } + out.WarningT(`Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}`, out.V{"name": machineName, "err": err}) continue } hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, host.DriverName) if err != nil { - out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} endpoint (will continue): {{.err}}`, out.V{"name": machineName, "err": err}) + if last { + exit.Message(reason.DrvCPEndpoint, `Unable to get control-plane node {{.name}} endpoint: {{.err}}`, out.V{"name": machineName, "err": err}) + } + out.WarningT(`Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}`, out.V{"name": machineName, "err": err}) continue } @@ -153,52 +170,64 @@ func running(name string, first bool) ([]ClusterController, error) { IP: ip, Port: port, }}) + if first { - return running, nil + break } } - return running, nil + return running } -// Healthy is a cmd-friendly way to load a healthy cluster +// Healthy is a cmd-friendly way to load a healthy cluster. func Healthy(name string) ClusterController { - ctrls, err := running(name, false) - if err != nil { - out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) - exit.Message(reason.GuestCpConfig, "Unable to get running control-plane nodes") - } + ctrls := running(name, false) - if len(ctrls) == 0 { - out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) - exit.Message(reason.GuestCpConfig, "Unable to find any running control-plane nodes") - } + for i, ctrl := range ctrls { + // control flow depending on if we have any other cluster controllers to try in case of an error + last := i == len(ctrls)-1 - for _, ctrl := range ctrls { machineName := config.MachineName(*ctrl.Config, *ctrl.CP.Node) as, err := kverify.APIServerStatus(ctrl.CP.Runner, ctrl.CP.Hostname, ctrl.CP.Port) if err != nil { - out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} apiserver status: {{.error}}`, out.V{"name": machineName, "error": err}) + if last { + out.Styled(style.Shrug, `Unable to get control-plane node {{.name}} apiserver status: {{.error}}`, out.V{"name": machineName, "error": err}) + exitTip("delete", name, reason.ExControlPlaneError) + } + out.WarningT(`Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}`, out.V{"name": machineName, "error": err}) continue } if as == state.Paused { - out.Styled(style.Shrug, `The control-plane node {{.name}} apiserver is paused (will continue)`, out.V{"name": machineName}) + if last { + out.Styled(style.Shrug, `The control-plane node {{.name}} apiserver is paused`, out.V{"name": machineName}) + exitTip("unpause", name, reason.ExControlPlaneNotRunning) + } + out.WarningT(`The control-plane node {{.name}} apiserver is paused (will try others)`, out.V{"name": machineName}) continue } if as != state.Running { - out.Styled(style.Shrug, `The control-plane node {{.name}} apiserver is not running (will continue): (state={{.state}})`, out.V{"name": machineName, "state": as.String()}) + if last { + out.Styled(style.Shrug, `The control-plane node {{.name}} apiserver is not running: (state={{.state}})`, out.V{"name": machineName, "state": as.String()}) + exitTip("start", name, reason.ExControlPlaneNotRunning) + } + out.WarningT(`The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})`, out.V{"name": machineName, "state": as.String()}) continue } return ctrl } - out.WarningT(`This is unusual - you may want to investigate using "{{.command}}"`, out.V{"command": ExampleCmd(name, "logs")}) - exit.Message(reason.GuestCpConfig, "Unable to find any healthy control-plane nodes") return ClusterController{} } +// exitTip returns an action tip and exits +func exitTip(action string, profile string, code int) { + command := ExampleCmd(profile, action) + out.Styled(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": command}) + exit.Code(code) +} + // ExampleCmd Return a minikube command containing the current profile name func ExampleCmd(cname string, action string) string { if cname != constants.DefaultClusterName { @@ -206,10 +235,3 @@ func ExampleCmd(cname string, action string) string { } return fmt.Sprintf("minikube %s", action) } - -// exitTip returns an action tip and exits -func exitTip(action string, profile string, code int) { - command := ExampleCmd(profile, action) - out.Styled(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": command}) - exit.Code(code) -} From 99ea346caa99c480193f1ff0c5e3a52e4775b15e Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sun, 14 Jan 2024 19:55:15 +0000 Subject: [PATCH 08/41] allow "cluster does not require reconfiguration" msg to passthrough --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 4318256a43d9..16b80bba7a0b 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -619,14 +619,18 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro conf := constants.KubeadmYamlPath // check whether or not the cluster needs to be reconfigured - // except for vm driver in non-ha cluster - fallback to old behaviour - if config.HA(cfg) || !driver.IsVM(cfg.Driver) { - rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")) - if err == nil { - // DANGER: This log message is hard-coded in an integration test! - klog.Infof("The running cluster does not require reconfiguration: %s", host) + if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err == nil { + // DANGER: This log message is hard-coded in an integration test! + klog.Infof("The running cluster does not require reconfiguration: %s", host) + // taking a shortcut, as the cluster seems to be properly configured + // except for vm driver in non-ha cluster - fallback to old behaviour + // here we're making a tradeoff to avoid significant (10sec) waiting on restarting stopped non-ha cluster with vm driver + // where such cluster needs to be reconfigured b/c of (currently) ephemeral config, but then also, + // starting already started such cluster (hard to know w/o investing that time) will fallthrough the same path and reconfigure cluster + if config.HA(cfg) || !driver.IsVM(cfg.Driver) { return nil } + } else { klog.Infof("detected kubeadm config drift (will reconfigure cluster from new %s):\n%s", conf, rr.Output()) } From 32b2a5fece3308ee5469fc0bf0007c33b5e4c18a Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Mon, 15 Jan 2024 01:57:10 +0000 Subject: [PATCH 09/41] workaround for "none" driver and IsPrimaryControlPlane() --- pkg/addons/addons_storage_classes.go | 2 +- pkg/minikube/bootstrapper/certs.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 12 ++++++------ pkg/minikube/config/profile.go | 6 ++++-- pkg/minikube/node/start.go | 8 ++++---- 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/pkg/addons/addons_storage_classes.go b/pkg/addons/addons_storage_classes.go index b30f2eb4f306..b4ef3cdf9c6b 100644 --- a/pkg/addons/addons_storage_classes.go +++ b/pkg/addons/addons_storage_classes.go @@ -50,7 +50,7 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st defer api.Close() pcp, err := config.ControlPlane(*cc) - if err != nil || !config.IsPrimaryControlPlane(pcp) { + if err != nil || !config.IsPrimaryControlPlane(*cc, pcp) { return errors.Wrap(err, "get primary control-plane node") } if !machine.IsRunning(api, config.MachineName(*cc, pcp)) { diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index d6a7683a37b8..99e400a1f0c4 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -124,7 +124,7 @@ func SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCmd command.Runner, if n.ControlPlane { // copy essential certs from primary control-plane node to secondaries // ref: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/#manual-certs - if !config.IsPrimaryControlPlane(n) { + if !config.IsPrimaryControlPlane(k8s, n) { pcpCerts := []struct { srcDir string srcFile string diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 16b80bba7a0b..3ec47e172f6f 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -596,7 +596,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro } pcp, err := config.ControlPlane(cfg) - if err != nil || !config.IsPrimaryControlPlane(pcp) { + if err != nil || !config.IsPrimaryControlPlane(cfg, pcp) { return errors.Wrap(err, "get primary control-plane node") } @@ -911,7 +911,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { } pcp, err := config.ControlPlane(cfg) - if err != nil || !config.IsPrimaryControlPlane(pcp) { + if err != nil || !config.IsPrimaryControlPlane(cfg, pcp) { return errors.Wrap(err, "get primary control-plane node") } @@ -947,7 +947,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru if n.ControlPlane { // for primary control-plane node only, generate kubeadm config based on current params // on node restart, it will be checked against later if anything needs changing - if config.IsPrimaryControlPlane(n) { + if config.IsPrimaryControlPlane(cfg, n) { kubeadmCfg, err := bsutil.GenerateKubeadmYAML(cfg, n, r) if err != nil { return errors.Wrap(err, "generating kubeadm cfg") @@ -963,7 +963,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru if err != nil { return errors.Wrapf(err, "parsing kubernetes version %q", cfg.KubernetesConfig.KubernetesVersion) } - workaround := kv.GTE(semver.Version{Major: 1, Minor: 29}) && config.IsPrimaryControlPlane(n) && len(config.ControlPlanes(cfg)) == 1 + workaround := kv.GTE(semver.Version{Major: 1, Minor: 29}) && config.IsPrimaryControlPlane(cfg, n) && len(config.ControlPlanes(cfg)) == 1 kubevipCfg, err := kubevip.Configure(cfg, workaround) if err != nil { klog.Errorf("couldn't generate kube-vip config, this might cause issues (will continue): %v", err) @@ -1056,7 +1056,7 @@ func (k *Bootstrapper) labelAndUntaintNode(cfg config.ClusterConfig, n config.No // ensure that "primary" label is applied only to the 1st node in the cluster (used eg for placing ingress there) // this is used to uniquely distinguish that from other nodes in multi-master/multi-control-plane cluster config primaryLbl := "minikube.k8s.io/primary=false" - if config.IsPrimaryControlPlane(n) { + if config.IsPrimaryControlPlane(cfg, n) { primaryLbl = "minikube.k8s.io/primary=true" } @@ -1083,7 +1083,7 @@ func (k *Bootstrapper) labelAndUntaintNode(cfg config.ClusterConfig, n config.No } // primary control-plane and worker nodes should be untainted by default - if n.ControlPlane && !config.IsPrimaryControlPlane(n) { + if n.ControlPlane && !config.IsPrimaryControlPlane(cfg, n) { // example: // sudo /var/lib/minikube/binaries//kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes test-357 node-role.kubernetes.io/control-plane:NoSchedule- cmd := exec.CommandContext(ctx, "sudo", kubectlPath(cfg), fmt.Sprintf("--kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "kubeconfig")), diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 11eca01487bd..50111ca5bd83 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -54,8 +54,10 @@ func ControlPlanes(cc ClusterConfig) []Node { } // IsPrimaryControlPlane returns if node is primary control-plane node. -func IsPrimaryControlPlane(node Node) bool { - return node.ControlPlane && node.Name == "" +func IsPrimaryControlPlane(cc ClusterConfig, node Node) bool { + // TODO (prezha): find where, for "none" driver, we set first (ie, primary control-plane) node name to "m01" - that should not happen but it's happening before pr #17909 + // return node.ControlPlane && node.Name == "" + return cc.Nodes != nil && cc.Nodes[0].Name == node.Name } // IsValid checks if the profile has the essential info needed for a profile diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 1cc6f7cea69e..a4804cc6a84f 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -135,7 +135,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo var kcs *kubeconfig.Settings var bs bootstrapper.Bootstrapper - if config.IsPrimaryControlPlane(*starter.Node) { + if config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) { // [re]start primary control-plane node kcs, bs, err = startPrimaryControlPlane(starter, cr) if err != nil { @@ -228,7 +228,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo } // for ha cluster, primary control-plane node will not come up alone until secondary joins - if config.HA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Node) { + if config.HA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) { klog.Infof("HA cluster: will skip waiting for primary control-plane node %+v", starter.Node) } else { klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node) @@ -274,7 +274,7 @@ func handleNoKubernetes(starter Starter) (bool, error) { // startPrimaryControlPlane starts control-plane node. func startPrimaryControlPlane(starter Starter, cr cruntime.Manager) (*kubeconfig.Settings, bootstrapper.Bootstrapper, error) { - if !config.IsPrimaryControlPlane(*starter.Node) { + if !config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) { return nil, nil, fmt.Errorf("node not marked as primary control-plane") } @@ -378,7 +378,7 @@ func Provision(cc *config.ClusterConfig, n *config.Node, delOnFail bool) (comman if n.ControlPlane { role = "control-plane" } - if config.IsPrimaryControlPlane(*n) { + if config.IsPrimaryControlPlane(*cc, *n) { role = "primary control-plane" } out.Step(style.ThumbsUp, "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster", out.V{"node": name, "role": role, "cluster": cc.Name}) From 6f580e8a51b8762753300fb4ec05826e8e7b6e74 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Tue, 16 Jan 2024 00:24:06 +0000 Subject: [PATCH 10/41] fix PingHostFromPods integration test by adding NET_RAW capabilities --- test/integration/testdata/ha/ha-pod-dns-test.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/integration/testdata/ha/ha-pod-dns-test.yaml b/test/integration/testdata/ha/ha-pod-dns-test.yaml index 91c63a8164f1..12eaf9c410be 100644 --- a/test/integration/testdata/ha/ha-pod-dns-test.yaml +++ b/test/integration/testdata/ha/ha-pod-dns-test.yaml @@ -25,6 +25,9 @@ spec: - sleep - "3600" imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: ["NET_RAW"] restartPolicy: Always affinity: # ⬇⬇⬇ This ensures pods will land on separate hosts From 782d55bd51f0fc8fc0b339836a47ec51ee23bb62 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 31 Jan 2024 01:08:01 +0000 Subject: [PATCH 11/41] Update pkg/minikube/bootstrapper/bsutil/kubeadm.go spowelljr: Use variable to shorten long lines Co-authored-by: Steven Powell <44844360+spowelljr@users.noreply.github.com> --- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 87a3b51a658e..5edf5ffae1e6 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -89,10 +89,11 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana // ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options // ref: https://github.com/kubernetes/kubernetes/issues/118787 if version.GTE(semver.MustParse("1.27.0")) { - kubeletConfigOpts["containerRuntimeEndpoint"] = k8s.ExtraOptions.Get("container-runtime-endpoint", Kubelet) - if kubeletConfigOpts["containerRuntimeEndpoint"] == "" { - kubeletConfigOpts["containerRuntimeEndpoint"] = r.KubeletOptions()["container-runtime-endpoint"] + runtimeEndpoint := k8s.ExtraOptions.Get("container-runtime-endpoint", Kubelet) + if runtimeEndpoint == "" { + runtimeEndpoint = r.KubeletOptions()["container-runtime-endpoint"] } + kubeletConfigOpts["containerRuntimeEndpoint"] = runtimeEndpoint } // set hairpin mode to hairpin-veth to achieve hairpin NAT, because promiscuous-bridge assumes the existence of a container bridge named cbr0 // ref: https://kubernetes.io/docs/tasks/debug/debug-application/debug-service/#a-pod-fails-to-reach-itself-via-the-service-ip From 9d5dfafe7cba78a5558d065db6ea1f1f5f4aeeda Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 31 Jan 2024 01:08:36 +0000 Subject: [PATCH 12/41] Update pkg/addons/addons_storage_classes.go spowelljr: reduce calling that function twice Co-authored-by: Steven Powell <44844360+spowelljr@users.noreply.github.com> --- pkg/addons/addons_storage_classes.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/addons/addons_storage_classes.go b/pkg/addons/addons_storage_classes.go index b4ef3cdf9c6b..a9212943431e 100644 --- a/pkg/addons/addons_storage_classes.go +++ b/pkg/addons/addons_storage_classes.go @@ -53,8 +53,9 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st if err != nil || !config.IsPrimaryControlPlane(*cc, pcp) { return errors.Wrap(err, "get primary control-plane node") } - if !machine.IsRunning(api, config.MachineName(*cc, pcp)) { - klog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", config.MachineName(*cc, pcp), name, val) + machineName := config.MachineName(*cc, pcp) + if !machine.IsRunning(api, machineName) { + klog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", machineName, name, val) return EnableOrDisableAddon(cc, name, val) } From 2ec34729add3b6ecef8c02750849c36f2f0f88d4 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 31 Jan 2024 01:09:54 +0000 Subject: [PATCH 13/41] Update cmd/minikube/cmd/config/profile_list.go spowelljr: set status to as.String() just once Co-authored-by: Steven Powell <44844360+spowelljr@users.noreply.github.com> --- cmd/minikube/cmd/config/profile_list.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/config/profile_list.go b/cmd/minikube/cmd/config/profile_list.go index b9d93f3c6409..12ba0fd7dd9f 100644 --- a/cmd/minikube/cmd/config/profile_list.go +++ b/cmd/minikube/cmd/config/profile_list.go @@ -166,13 +166,12 @@ func profileStatus(p *config.Profile, api libmachine.API) string { klog.Warningf("error loading profile (will continue): apiserver status for %s: %v", machineName, err) continue } + status = as.String() if as != state.Running { klog.Warningf("error loading profile (will continue): apiserver %s is not running: %q", machineName, hs) - status = as.String() continue } - status = state.Running.String() healthyCPs++ } From 3a0ada1f6d553bf0a3f1a720585d945de20cf972 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 31 Jan 2024 01:20:13 +0000 Subject: [PATCH 14/41] spowelljr: rename HA() to IsHA() --- cmd/minikube/cmd/config/profile_list.go | 4 ++-- cmd/minikube/cmd/node_add.go | 2 +- cmd/minikube/cmd/status.go | 3 +-- pkg/minikube/bootstrapper/certs.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 6 +++--- pkg/minikube/config/config.go | 4 ++-- pkg/minikube/machine/fix.go | 2 +- pkg/minikube/node/start.go | 10 +++++----- 8 files changed, 16 insertions(+), 17 deletions(-) diff --git a/cmd/minikube/cmd/config/profile_list.go b/cmd/minikube/cmd/config/profile_list.go index 12ba0fd7dd9f..bbb33db2cfb4 100644 --- a/cmd/minikube/cmd/config/profile_list.go +++ b/cmd/minikube/cmd/config/profile_list.go @@ -175,7 +175,7 @@ func profileStatus(p *config.Profile, api libmachine.API) string { healthyCPs++ } - if config.HA(*p.Config) { + if config.IsHA(*p.Config) { switch { case healthyCPs < 2: return state.Stopped.String() @@ -204,7 +204,7 @@ func profilesToTableData(profiles []*config.Profile) [][]string { for _, p := range profiles { cpIP := p.Config.KubernetesConfig.APIServerHAVIP cpPort := p.Config.APIServerPort - if !config.HA(*p.Config) { + if !config.IsHA(*p.Config) { cp, err := config.ControlPlane(*p.Config) if err != nil { exit.Error(reason.GuestCpConfig, "error getting control-plane node", err) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 73f8fa62694d..5cad0e77177b 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -50,7 +50,7 @@ var nodeAddCmd = &cobra.Command{ out.FailureT("none driver does not support multi-node clusters") } - if cpNode && !config.HA(*cc) { + if cpNode && !config.IsHA(*cc) { out.FailureT("Adding a control-plane node to a non-HA cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.") } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index f63a77cd40a8..8249b903d1f3 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -403,10 +403,9 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St if cc.Addons["auto-pause"] { hostname, _, port, err = driver.AutoPauseProxyEndpoint(&cc, &n, host.DriverName) } else { - if config.HA(cc) { + if config.IsHA(cc) { hostname = cc.KubernetesConfig.APIServerHAVIP port = cc.APIServerPort - err = nil // checked below } else { hostname, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, host.DriverName) } diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 99e400a1f0c4..23f2cb3788ec 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -268,7 +268,7 @@ func generateProfileCerts(cfg config.ClusterConfig, n config.Node, shared shared for _, n := range config.ControlPlanes(cfg) { apiServerIPs = append(apiServerIPs, net.ParseIP(n.IP)) } - if config.HA(cfg) { + if config.IsHA(cfg) { apiServerIPs = append(apiServerIPs, net.ParseIP(cfg.KubernetesConfig.APIServerHAVIP)) } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 3ec47e172f6f..ff9157488787 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -627,7 +627,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro // here we're making a tradeoff to avoid significant (10sec) waiting on restarting stopped non-ha cluster with vm driver // where such cluster needs to be reconfigured b/c of (currently) ephemeral config, but then also, // starting already started such cluster (hard to know w/o investing that time) will fallthrough the same path and reconfigure cluster - if config.HA(cfg) || !driver.IsVM(cfg.Driver) { + if config.IsHA(cfg) || !driver.IsVM(cfg.Driver) { return nil } } else { @@ -955,7 +955,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, constants.KubeadmYamlPath+".new", "0640")) } // deploy kube-vip for ha cluster - if config.HA(cfg) { + if config.IsHA(cfg) { // workaround for kube-vip // only applicable for k8s v1.29+ during primary control-plane node's kubeadm init (ie, first boot) // TODO (prezha): remove when fixed upstream - ref: https://github.com/kube-vip/kube-vip/issues/684#issuecomment-1864855405 @@ -998,7 +998,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru // add "control-plane.minikube.internal" dns alias // note: needs to be called after APIServerHAVIP is set (in startPrimaryControlPlane()) and before kubeadm kicks off cpIP := cfg.KubernetesConfig.APIServerHAVIP - if !config.HA(cfg) { + if !config.IsHA(cfg) { cp, err := config.ControlPlane(cfg) if err != nil { return errors.Wrap(err, "get control-plane node") diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index b3741af96974..68f33c00028e 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -247,8 +247,8 @@ func MultiNode(cc ClusterConfig) bool { return viper.GetInt("nodes") > 1 } -// HA returns true if HA is requested. -func HA(cc ClusterConfig) bool { +// IsHA returns true if HA is requested. +func IsHA(cc ClusterConfig) bool { if len(ControlPlanes(cc)) > 1 { return true } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 27610458448c..29c2f2deb567 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -94,7 +94,7 @@ func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*hos // we deliberately aim to restore backed up machine config early, // so that remaining code logic can amend files as needed, // it's intentionally non-fatal in case of any error - if driver.IsVM(h.DriverName) && config.HA(*cc) { + if driver.IsVM(h.DriverName) && config.IsHA(*cc) { if err := restore(*h); err != nil { klog.Warningf("cannot read backup folder, skipping restore: %v", err) } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index a4804cc6a84f..6ec58cd00751 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -154,7 +154,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo } } // scale down CoreDNS from default 2 to 1 replica only for non-ha cluster and if optimisation is not disabled - if !starter.Cfg.DisableOptimizations && !config.HA(*starter.Cfg) { + if !starter.Cfg.DisableOptimizations && !config.IsHA(*starter.Cfg) { if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) } @@ -179,7 +179,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo // join cluster only on first node start // except for vm driver in non-ha cluster - fallback to old behaviour - if !starter.PreExists || (driver.IsVM(starter.Cfg.Driver) && !config.HA(*starter.Cfg)) { + if !starter.PreExists || (driver.IsVM(starter.Cfg.Driver) && !config.IsHA(*starter.Cfg)) { // make sure to use the command runner for the primary control plane to generate the join token pcpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) if err != nil { @@ -228,7 +228,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo } // for ha cluster, primary control-plane node will not come up alone until secondary joins - if config.HA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) { + if config.IsHA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) { klog.Infof("HA cluster: will skip waiting for primary control-plane node %+v", starter.Node) } else { klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node) @@ -278,7 +278,7 @@ func startPrimaryControlPlane(starter Starter, cr cruntime.Manager) (*kubeconfig return nil, nil, fmt.Errorf("node not marked as primary control-plane") } - if config.HA(*starter.Cfg) { + if config.IsHA(*starter.Cfg) { n, err := network.Inspect(starter.Node.IP) if err != nil { return nil, nil, errors.Wrapf(err, "inspect network") @@ -625,7 +625,7 @@ func setupKubeadm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, clusterName string) *kubeconfig.Settings { host := cc.KubernetesConfig.APIServerHAVIP port := cc.APIServerPort - if !config.HA(cc) { + if !config.IsHA(cc) { var err error if host, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil { exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to construct cluster server address: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)}) From dda8db0d021bfc373f11b75f77c6701bc2b43a67 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 31 Jan 2024 01:33:46 +0000 Subject: [PATCH 15/41] spowelljr: prevent user from modifying apiserver port --- cmd/minikube/cmd/start_flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 9ca85cbbd079..da2e5300e801 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -747,7 +747,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC out.WarningT("Changing the HA mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.") } - if cmd.Flags().Changed(apiServerPort) && viper.GetBool(ha) { + if cmd.Flags().Changed(apiServerPort) && config.IsHA(*existing) { out.WarningT("Changing the apiserver port of an existing minikube ha cluster is not currently supported. Please first delete the cluster.") } else { updateIntFromFlag(cmd, &cc.APIServerPort, apiServerPort) From 22603d178f0627e374b83b43b13996c9893288ef Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 7 Feb 2024 00:23:15 +0000 Subject: [PATCH 16/41] use forwarded apiserver port, for drivers that neededs it --- cmd/minikube/cmd/status.go | 7 +++---- pkg/minikube/node/start.go | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 8249b903d1f3..c8319405fbbd 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -403,10 +403,9 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St if cc.Addons["auto-pause"] { hostname, _, port, err = driver.AutoPauseProxyEndpoint(&cc, &n, host.DriverName) } else { - if config.IsHA(cc) { - hostname = cc.KubernetesConfig.APIServerHAVIP - port = cc.APIServerPort - } else { + hostname = cc.KubernetesConfig.APIServerHAVIP + port = cc.APIServerPort + if !config.IsHA(cc) || driver.NeedsPortForward(cc.Driver) { hostname, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, host.DriverName) } } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 6ec58cd00751..0d0e18efd80c 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -625,7 +625,7 @@ func setupKubeadm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, clusterName string) *kubeconfig.Settings { host := cc.KubernetesConfig.APIServerHAVIP port := cc.APIServerPort - if !config.IsHA(cc) { + if !config.IsHA(cc) || driver.NeedsPortForward(cc.Driver) { var err error if host, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil { exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to construct cluster server address: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)}) From 2f040dcf24203298116fc1828fd4d2899cf26fe9 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Wed, 7 Feb 2024 01:30:09 +0000 Subject: [PATCH 17/41] bump kube-vip to v0.7.0 --- pkg/minikube/cluster/ha/kube-vip/kube-vip.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go index 49960baaa7c9..8921ee7c92a8 100644 --- a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go +++ b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go @@ -72,7 +72,7 @@ spec: value: {{ .VIP }} - name: prometheus_server value: :2112 - image: ghcr.io/kube-vip/kube-vip:v0.6.4 + image: ghcr.io/kube-vip/kube-vip:v0.7.0 imagePullPolicy: IfNotPresent name: kube-vip resources: {} From 54c6e698a707c8427acdb4bbe66d5806713e0beb Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sun, 25 Feb 2024 20:00:08 +0000 Subject: [PATCH 18/41] add "(multi-control plane)" after each reference to "ha" --- cmd/minikube/cmd/node_add.go | 4 +- cmd/minikube/cmd/start_flags.go | 12 +++--- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 8 ++-- pkg/minikube/config/config.go | 2 +- pkg/minikube/machine/fix.go | 2 +- pkg/minikube/node/start.go | 10 ++--- test/integration/ha_test.go | 42 ++++++++++---------- 7 files changed, 40 insertions(+), 40 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 5cad0e77177b..bb014ec28811 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -51,7 +51,7 @@ var nodeAddCmd = &cobra.Command{ } if cpNode && !config.IsHA(*cc) { - out.FailureT("Adding a control-plane node to a non-HA cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.") + out.FailureT("Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.") } roles := []string{} @@ -106,7 +106,7 @@ var nodeAddCmd = &cobra.Command{ } func init() { - nodeAddCmd.Flags().BoolVar(&cpNode, "control-plane", false, "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA clusters.") + nodeAddCmd.Flags().BoolVar(&cpNode, "control-plane", false, "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.") nodeAddCmd.Flags().BoolVar(&workerNode, "worker", true, "If set, added node will be available as worker. Defaults to true.") nodeAddCmd.Flags().BoolVar(&deleteNodeOnFailure, "delete-on-failure", false, "If set, delete the current cluster if start fails and try again. Defaults to false.") diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index da2e5300e801..45bdf48dc318 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -190,7 +190,7 @@ func initMinikubeFlags() { startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.") startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") - startCmd.Flags().Bool(ha, false, "Create Highly Available Cluster with a minimum of three control-plane nodes that will also be marked for work.") + startCmd.Flags().Bool(ha, false, "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.") startCmd.Flags().IntP(nodes, "n", 1, "The total number of nodes to spin up. Defaults to 1.") startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.") startCmd.Flags().Bool(noKubernetes, false, "If set, minikube VM/container will start without starting or configuring Kubernetes. (only works on new clusters)") @@ -666,20 +666,20 @@ func addFeatureGate(featureGates, s string) string { return strings.Join(split, ",") } -// validateHANodeCount ensures correct total number of nodes in HA cluster. +// validateHANodeCount ensures correct total number of nodes in ha (multi-control plane) cluster. func validateHANodeCount(cmd *cobra.Command) { if !viper.GetBool(ha) { return } - // set total number of nodes in ha cluster to 3, if not otherwise defined by user + // set total number of nodes in ha (multi-control plane) cluster to 3, if not otherwise defined by user if !cmd.Flags().Changed(nodes) { viper.Set(nodes, 3) } // respect user preference, if correct if cmd.Flags().Changed(nodes) && viper.GetInt(nodes) < 3 { - exit.Message(reason.Usage, "HA clusters require 3 or more control-plane nodes") + exit.Message(reason.Usage, "HA (multi-control plane) clusters require 3 or more control-plane nodes") } } @@ -744,11 +744,11 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC } if cmd.Flags().Changed(ha) { - out.WarningT("Changing the HA mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.") + out.WarningT("Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.") } if cmd.Flags().Changed(apiServerPort) && config.IsHA(*existing) { - out.WarningT("Changing the apiserver port of an existing minikube ha cluster is not currently supported. Please first delete the cluster.") + out.WarningT("Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.") } else { updateIntFromFlag(cmd, &cc.APIServerPort, apiServerPort) } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index ff9157488787..c72ff85e3304 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -623,8 +623,8 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro // DANGER: This log message is hard-coded in an integration test! klog.Infof("The running cluster does not require reconfiguration: %s", host) // taking a shortcut, as the cluster seems to be properly configured - // except for vm driver in non-ha cluster - fallback to old behaviour - // here we're making a tradeoff to avoid significant (10sec) waiting on restarting stopped non-ha cluster with vm driver + // except for vm driver in non-ha (non-multi-control plane) cluster - fallback to old behaviour + // here we're making a tradeoff to avoid significant (10sec) waiting on restarting stopped non-ha (non-multi-control plane) cluster with vm driver // where such cluster needs to be reconfigured b/c of (currently) ephemeral config, but then also, // starting already started such cluster (hard to know w/o investing that time) will fallthrough the same path and reconfigure cluster if config.IsHA(cfg) || !driver.IsVM(cfg.Driver) { @@ -954,7 +954,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru } files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, constants.KubeadmYamlPath+".new", "0640")) } - // deploy kube-vip for ha cluster + // deploy kube-vip for ha (multi-control plane) cluster if config.IsHA(cfg) { // workaround for kube-vip // only applicable for k8s v1.29+ during primary control-plane node's kubeadm init (ie, first boot) @@ -1043,7 +1043,7 @@ func (k *Bootstrapper) LabelAndUntaintNode(cfg config.ClusterConfig, n config.No return k.labelAndUntaintNode(cfg, n) } -// labelAndUntaintNode applies minikube labels to node and removes NoSchedule taints that might be set to secondary control-plane nodes by default in ha cluster. +// labelAndUntaintNode applies minikube labels to node and removes NoSchedule taints that might be set to secondary control-plane nodes by default in ha (multi-control plane) cluster. func (k *Bootstrapper) labelAndUntaintNode(cfg config.ClusterConfig, n config.Node) error { // time node was created. time format is based on ISO 8601 (RFC 3339) // converting - and : to _ because of Kubernetes label restriction diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 68f33c00028e..1010d1fc1382 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -247,7 +247,7 @@ func MultiNode(cc ClusterConfig) bool { return viper.GetInt("nodes") > 1 } -// IsHA returns true if HA is requested. +// IsHA returns true if ha (multi-control plane) cluster is requested. func IsHA(cc ClusterConfig) bool { if len(ControlPlanes(cc)) > 1 { return true diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 29c2f2deb567..fc66e2320490 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -90,7 +90,7 @@ func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*hos return h, errors.Wrap(err, "post-start") } - // on vm node restart and for ha topology only (for now), + // on vm node restart and for ha (multi-control plane) topology only (for now), // we deliberately aim to restore backed up machine config early, // so that remaining code logic can amend files as needed, // it's intentionally non-fatal in case of any error diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 0d0e18efd80c..c382a4304fd5 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -153,7 +153,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo out.Err("Failed to inject host.minikube.internal into CoreDNS, this will limit the pods access to the host IP") } } - // scale down CoreDNS from default 2 to 1 replica only for non-ha cluster and if optimisation is not disabled + // scale down CoreDNS from default 2 to 1 replica only for non-ha (non-multi-control plane) cluster and if optimisation is not disabled if !starter.Cfg.DisableOptimizations && !config.IsHA(*starter.Cfg) { if err := kapi.ScaleDeployment(starter.Cfg.Name, meta.NamespaceSystem, kconst.CoreDNSDeploymentName, 1); err != nil { klog.Errorf("Unable to scale down deployment %q in namespace %q to 1 replica: %v", kconst.CoreDNSDeploymentName, meta.NamespaceSystem, err) @@ -167,7 +167,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo return nil, errors.Wrap(err, "Failed to get bootstrapper") } - // for ha, use already running control-plane node to copy over certs to this secondary control-plane node + // for ha (multi-control plane) cluster, use already running control-plane node to copy over certs to this secondary control-plane node cpr := mustload.Running(starter.Cfg.Name).CP.Runner if err = bs.SetupCerts(*starter.Cfg, *starter.Node, cpr); err != nil { return nil, errors.Wrap(err, "setting up certs") @@ -178,7 +178,7 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo } // join cluster only on first node start - // except for vm driver in non-ha cluster - fallback to old behaviour + // except for vm driver in non-ha (non-multi-control plane) cluster - fallback to old behaviour if !starter.PreExists || (driver.IsVM(starter.Cfg.Driver) && !config.IsHA(*starter.Cfg)) { // make sure to use the command runner for the primary control plane to generate the join token pcpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) @@ -227,9 +227,9 @@ func Start(starter Starter) (*kubeconfig.Settings, error) { // nolint:gocyclo prepareNone() } - // for ha cluster, primary control-plane node will not come up alone until secondary joins + // for ha (multi-control plane) cluster, primary control-plane node will not come up alone until secondary joins if config.IsHA(*starter.Cfg) && config.IsPrimaryControlPlane(*starter.Cfg, *starter.Node) { - klog.Infof("HA cluster: will skip waiting for primary control-plane node %+v", starter.Node) + klog.Infof("HA (multi-control plane) cluster: will skip waiting for primary control-plane node %+v", starter.Node) } else { klog.Infof("Will wait %s for node %+v", viper.GetDuration(waitTimeout), starter.Node) if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { diff --git a/test/integration/ha_test.go b/test/integration/ha_test.go index d5a88d2a1177..2b37b9858c6d 100644 --- a/test/integration/ha_test.go +++ b/test/integration/ha_test.go @@ -35,10 +35,10 @@ import ( "k8s.io/minikube/pkg/util/retry" ) -// TestHA tests all ha cluster functionality +// TestHA tests all ha (multi-control plane) cluster functionality func TestHA(t *testing.T) { if NoneDriver() { - t.Skip("none driver does not support multinode/ha") + t.Skip("none driver does not support multinode/ha(multi-control plane) cluster") } if DockerDriver() { @@ -94,13 +94,13 @@ func TestHA(t *testing.T) { }) } -// validateHAStartCluster ensures ha cluster can start. +// validateHAStartCluster ensures ha (multi-control plane) cluster can start. func validateHAStartCluster(ctx context.Context, t *testing.T, profile string) { - // start ha cluster + // start ha (multi-control plane) cluster startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--ha", "-v=7", "--alsologtostderr"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("failed to fresh-start ha cluster. args %q : %v", rr.Command(), err) + t.Fatalf("failed to fresh-start ha (multi-control plane) cluster. args %q : %v", rr.Command(), err) } // ensure minikube status shows 3 operational control-plane nodes @@ -122,17 +122,17 @@ func validateHAStartCluster(ctx context.Context, t *testing.T, profile string) { } } -// validateHADeployApp deploys an app to ha cluster and ensures all nodes can serve traffic. +// validateHADeployApp deploys an app to ha (multi-control plane) cluster and ensures all nodes can serve traffic. func validateHADeployApp(ctx context.Context, t *testing.T, profile string) { // Create a deployment for app _, err := Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "apply", "-f", "./testdata/ha/ha-pod-dns-test.yaml")) if err != nil { - t.Errorf("failed to create busybox deployment to ha cluster") + t.Errorf("failed to create busybox deployment to ha (multi-control plane) cluster") } _, err = Run(t, exec.CommandContext(ctx, Target(), "kubectl", "-p", profile, "--", "rollout", "status", "deployment/busybox")) if err != nil { - t.Errorf("failed to deploy busybox to ha cluster") + t.Errorf("failed to deploy busybox to ha (multi-control plane) cluster") } // resolve Pod IPs @@ -221,13 +221,13 @@ func validateHAPingHostFromPods(ctx context.Context, t *testing.T, profile strin } } -// validateHAAddWorkerNode uses the minikube node add command to add a worker node to an existing ha cluster. +// validateHAAddWorkerNode uses the minikube node add command to add a worker node to an existing ha (multi-control plane) cluster. func validateHAAddWorkerNode(ctx context.Context, t *testing.T, profile string) { - // add a node to the current ha cluster + // add a node to the current ha (multi-control plane) cluster addArgs := []string{"node", "add", "-p", profile, "-v=7", "--alsologtostderr"} rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) if err != nil { - t.Fatalf("failed to add worker node to current ha cluster. args %q : %v", rr.Command(), err) + t.Fatalf("failed to add worker node to current ha (multi-control plane) cluster. args %q : %v", rr.Command(), err) } // ensure minikube status shows 3 operational control-plane nodes and 1 worker node @@ -276,7 +276,7 @@ func validateHANodeLabels(ctx context.Context, t *testing.T, profile string) { } } -// validateHAStatusHAppy ensures minikube profile list outputs correct with ha clusters. +// validateHAStatusHAppy ensures minikube profile list outputs correct with ha (multi-control plane) clusters. func validateHAStatusHAppy(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { @@ -317,7 +317,7 @@ func validateHAStatusHAppy(ctx context.Context, t *testing.T, profile string) { } } -// validateHACopyFile ensures minikube cp works with ha clusters. +// validateHACopyFile ensures minikube cp works with ha (multi-control plane) clusters. func validateHACopyFile(ctx context.Context, t *testing.T, profile string) { if NoneDriver() { t.Skipf("skipping: cp is unsupported by none driver") @@ -357,7 +357,7 @@ func validateHACopyFile(ctx context.Context, t *testing.T, profile string) { } } -// validateHAStopSecondaryNode tests ha cluster by stopping a secondary control-plane node using minikube node stop command. +// validateHAStopSecondaryNode tests ha (multi-control plane) cluster by stopping a secondary control-plane node using minikube node stop command. func validateHAStopSecondaryNode(ctx context.Context, t *testing.T, profile string) { // run minikube node stop on secondary control-plane node rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", SecondNodeName, "-v=7", "--alsologtostderr")) @@ -385,7 +385,7 @@ func validateHAStopSecondaryNode(ctx context.Context, t *testing.T, profile stri } } -// validateHAStatusDegraded ensures minikube profile list outputs correct with ha clusters. +// validateHAStatusDegraded ensures minikube profile list outputs correct with ha (multi-control plane) clusters. func validateHAStatusDegraded(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { @@ -423,7 +423,7 @@ func validateHARestartSecondaryNode(ctx context.Context, t *testing.T, profile s t.Errorf("secondary control-plane node start returned an error. args %q: %v", rr.Command(), err) } - // ensure minikube status shows all 4 nodes running, waiting for ha cluster/apiservers to stabilise + // ensure minikube status shows all 4 nodes running, waiting for ha (multi-control plane) cluster/apiservers to stabilise minikubeStatus := func() error { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-v=7", "--alsologtostderr")) return err @@ -525,7 +525,7 @@ func validateHADeleteSecondaryNode(ctx context.Context, t *testing.T, profile st } } -// validateHAStopCluster runs minikube stop on a ha cluster. +// validateHAStopCluster runs minikube stop on a ha (multi-control plane) cluster. func validateHAStopCluster(ctx context.Context, t *testing.T, profile string) { // Run minikube stop on the cluster rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "stop", "-v=7", "--alsologtostderr")) @@ -553,7 +553,7 @@ func validateHAStopCluster(ctx context.Context, t *testing.T, profile string) { } } -// validateHARestartCluster verifies a soft restart on a ha cluster works. +// validateHARestartCluster verifies a soft restart on a ha (multi-control plane) cluster works. func validateHARestartCluster(ctx context.Context, t *testing.T, profile string) { // restart cluster with minikube start startArgs := append([]string{"start", "-p", profile, "--wait=true", "-v=7", "--alsologtostderr"}, StartArgs()...) @@ -598,13 +598,13 @@ func validateHARestartCluster(ctx context.Context, t *testing.T, profile string) } } -// validateHAAddSecondaryNode uses the minikube node add command to add a secondary control-plane node to an existing ha cluster. +// validateHAAddSecondaryNode uses the minikube node add command to add a secondary control-plane node to an existing ha (multi-control plane) cluster. func validateHAAddSecondaryNode(ctx context.Context, t *testing.T, profile string) { - // add a node to the current ha cluster + // add a node to the current ha (multi-control plane) cluster addArgs := []string{"node", "add", "-p", profile, "--control-plane", "-v=7", "--alsologtostderr"} rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) if err != nil { - t.Fatalf("failed to add control-plane node to current ha cluster. args %q : %v", rr.Command(), err) + t.Fatalf("failed to add control-plane node to current ha (multi-control plane) cluster. args %q : %v", rr.Command(), err) } // ensure minikube status shows 3 operational control-plane nodes and 1 worker node From 88f327710cefb5e3a0f37ffb9a2db8ef3cb89962 Mon Sep 17 00:00:00 2001 From: Steven Powell Date: Wed, 28 Feb 2024 15:46:47 -0800 Subject: [PATCH 19/41] testing: Update oldest supported K8s version to v1.20 --- pkg/minikube/constants/constants.go | 2 +- test/integration/ingress_addon_legacy_test.go | 82 ------------------- 2 files changed, 1 insertion(+), 83 deletions(-) delete mode 100644 test/integration/ingress_addon_legacy_test.go diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 7852cc028c35..404a2359f4f2 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -39,7 +39,7 @@ const ( // NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go NewestKubernetesVersion = "v1.29.0-rc.2" // OldestKubernetesVersion is the oldest Kubernetes version to test against - OldestKubernetesVersion = "v1.16.0" + OldestKubernetesVersion = "v1.20.0" // NoKubernetesVersion is the version used when users does NOT want to install kubernetes NoKubernetesVersion = "v0.0.0" diff --git a/test/integration/ingress_addon_legacy_test.go b/test/integration/ingress_addon_legacy_test.go deleted file mode 100644 index 3bbc847bb9b0..000000000000 --- a/test/integration/ingress_addon_legacy_test.go +++ /dev/null @@ -1,82 +0,0 @@ -//go:build integration - -/* -Copyright 2021 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "context" - "os/exec" - "testing" -) - -// TestIngressAddonLegacy tests ingress and ingress-dns addons with legacy k8s version <1.19 -func TestIngressAddonLegacy(t *testing.T) { - if NoneDriver() { - t.Skipf("skipping: none driver does not support ingress") - } - - profile := UniqueProfileName("ingress-addon-legacy") - ctx, cancel := context.WithTimeout(context.Background(), Minutes(10)) - defer Cleanup(t, profile, cancel) - - t.Run("StartLegacyK8sCluster", func(t *testing.T) { - args := append([]string{"start", "-p", profile, "--kubernetes-version=v1.18.20", "--memory=4096", "--wait=true", "--alsologtostderr", "-v=5"}, StartArgs()...) - rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) - if err != nil { - t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) - } - }) - - t.Run("serial", func(t *testing.T) { - tests := []struct { - name string - validator validateFunc - }{ - {"ValidateIngressAddonActivation", validateIngressAddonActivation}, - {"ValidateIngressDNSAddonActivation", validateIngressDNSAddonActivation}, - {"ValidateIngressAddons", validateIngressAddon}, - } - for _, tc := range tests { - tc := tc - if ctx.Err() == context.DeadlineExceeded { - t.Fatalf("Unable to run more tests (deadline exceeded)") - } - t.Run(tc.name, func(t *testing.T) { - tc.validator(ctx, t, profile) - }) - } - }) -} - -// validateIngressAddonActivation tests ingress addon activation -func validateIngressAddonActivation(ctx context.Context, t *testing.T, profile string) { - defer PostMortemLogs(t, profile) - - if _, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "enable", "ingress", "--alsologtostderr", "-v=5")); err != nil { - t.Errorf("failed to enable ingress addon: %v", err) - } -} - -// validateIngressDNSAddonActivation tests ingress-dns addon activation -func validateIngressDNSAddonActivation(ctx context.Context, t *testing.T, profile string) { - defer PostMortemLogs(t, profile) - - if _, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "enable", "ingress-dns", "--alsologtostderr", "-v=5")); err != nil { - t.Errorf("failed to enable ingress-dns addon: %v", err) - } -} From 2696a6da12966b5660a512b1c43982feda183f37 Mon Sep 17 00:00:00 2001 From: Steven Powell Date: Thu, 29 Feb 2024 09:39:57 -0800 Subject: [PATCH 20/41] CI: Fix path to CNI Plugins file --- hack/update/get_version/get_version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/update/get_version/get_version.go b/hack/update/get_version/get_version.go index 5f994dc86a0f..eb72c405ad94 100644 --- a/hack/update/get_version/get_version.go +++ b/hack/update/get_version/get_version.go @@ -36,7 +36,7 @@ var dependencies = map[string]dependency{ "buildkit": {"deploy/iso/minikube-iso/arch/x86_64/package/buildkit-bin/buildkit-bin.mk", `BUILDKIT_BIN_VERSION = (.*)`}, "calico": {"pkg/minikube/bootstrapper/images/images.go", `calicoVersion = "(.*)"`}, "cloud-spanner": {addonsFile, `cloud-spanner-emulator/emulator:(.*)@`}, - "cni-plugins": {"deploy/iso/minikube-iso/arch/x86_64/package/cni-plugins/cni-plugins.mk", `CNI_PLUGINS_VERSION = (.*)`}, + "cni-plugins": {"deploy/iso/minikube-iso/arch/x86_64/package/cni-plugins/cni-plugins-latest.mk", `CNI_PLUGINS_VERSION = (.*)`}, "containerd": {"deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/containerd-bin.mk", `CONTAINERD_BIN_VERSION = (.*)`}, "cri-dockerd": {dockerfile, `CRI_DOCKERD_VERSION="(.*)"`}, "cri-o": {"deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk", `CRIO_BIN_VERSION = (.*)`}, From 84bb58f811b1d92400b6d25b21fa242a4024bf82 Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Sat, 2 Mar 2024 00:04:37 +0000 Subject: [PATCH 21/41] Update yearly leaderboard --- .../en/docs/contrib/leaderboard/2024.html | 84 +++++++++++-------- 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/site/content/en/docs/contrib/leaderboard/2024.html b/site/content/en/docs/contrib/leaderboard/2024.html index 9d6fb96402fb..e42dbb1e766b 100644 --- a/site/content/en/docs/contrib/leaderboard/2024.html +++ b/site/content/en/docs/contrib/leaderboard/2024.html @@ -87,7 +87,7 @@

kubernetes/minikube

-
2024-01-01 — 2024-01-31
+
2024-01-01 — 2024-02-29

Reviewers

@@ -103,9 +103,9 @@

Most Influential

function drawreviewCounts() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of Merged PRs reviewed', type: 'number'}, { role: 'annotation' }], + ["medyagh", 3, "3"], + ["spowelljr", 2, "2"], ["afbjorklund", 1, "1"], - ["spowelljr", 1, "1"], - ["medyagh", 1, "1"], ]); @@ -138,9 +138,9 @@

Most Helpful

function drawreviewWords() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of words written in merged PRs', type: 'number'}, { role: 'annotation' }], - ["spowelljr", 118, "118"], + ["spowelljr", 786, "786"], + ["medyagh", 37, "37"], ["afbjorklund", 16, "16"], - ["medyagh", 11, "11"], ]); @@ -173,9 +173,9 @@

Most Demanding

function drawreviewComments() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of Review Comments in merged PRs', type: 'number'}, { role: 'annotation' }], - ["medyagh", 1, "1"], + ["medyagh", 2, "2"], + ["spowelljr", 1, "1"], ["afbjorklund", 0, "0"], - ["spowelljr", 0, "0"], ]); @@ -212,13 +212,19 @@

Most Active

function drawprCounts() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of Pull Requests Merged', type: 'number'}, { role: 'annotation' }], - ["spowelljr", 15, "15"], + ["spowelljr", 20, "20"], ["prezha", 6, "6"], + ["jeffmaury", 3, "3"], ["sandipanpanda", 2, "2"], + ["travier", 2, "2"], + ["ComradeProgrammer", 1, "1"], + ["syxunion", 1, "1"], + ["Juneezee", 1, "1"], + ["mahmut-Abi", 1, "1"], ["ph-ngn", 1, "1"], ["MarcusDunn", 1, "1"], - ["jeffmaury", 1, "1"], ["qlijin", 1, "1"], + ["coderrick", 1, "1"], ]); @@ -251,12 +257,18 @@

Big Movers

function drawprDeltas() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: 'Lines of code (delta)', type: 'number'}, { role: 'annotation' }], - ["spowelljr", 760, "760"], + ["spowelljr", 932, "932"], + ["travier", 586, "586"], ["prezha", 163, "163"], ["sandipanpanda", 95, "95"], + ["ComradeProgrammer", 12, "12"], + ["coderrick", 5, "5"], ["ph-ngn", 4, "4"], + ["Juneezee", 4, "4"], ["qlijin", 2, "2"], ["MarcusDunn", 2, "2"], + ["mahmut-Abi", 0, "0"], + ["syxunion", 0, "0"], ["jeffmaury", 0, "0"], ]); @@ -290,12 +302,18 @@

Most difficult to review

function drawprSize() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: 'Average PR size (added+changed)', type: 'number'}, { role: 'annotation' }], + ["travier", 123, "123"], ["sandipanpanda", 44, "44"], - ["spowelljr", 31, "31"], + ["spowelljr", 30, "30"], ["prezha", 14, "14"], + ["ComradeProgrammer", 7, "7"], + ["coderrick", 5, "5"], ["ph-ngn", 2, "2"], + ["Juneezee", 2, "2"], ["MarcusDunn", 1, "1"], ["qlijin", 1, "1"], + ["mahmut-Abi", 0, "0"], + ["syxunion", 0, "0"], ["jeffmaury", 0, "0"], ]); @@ -333,21 +351,21 @@

Most Active

function drawcomments() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of comments', type: 'number'}, { role: 'annotation' }], - ["caerulescens", 42, "42"], - ["afbjorklund", 17, "17"], + ["caerulescens", 46, "46"], + ["afbjorklund", 26, "26"], + ["T-Lakshmi", 15, "15"], + ["spowelljr", 11, "11"], ["liangyuanpeng", 10, "10"], - ["spowelljr", 10, "10"], - ["prezha", 3, "3"], + ["kundan2707", 6, "6"], + ["prezha", 4, "4"], + ["medyagh", 4, "4"], + ["64J0", 3, "3"], + ["vaibhav2107", 3, "3"], + ["leeseoungsuk1", 3, "3"], ["mazzystr", 2, "2"], - ["medyagh", 2, "2"], - ["logopk", 2, "2"], - ["iamprakash89", 2, "2"], + ["maksymilian-mulawa-form3", 2, "2"], ["jusito", 2, "2"], - ["kundan2707", 2, "2"], - ["yamaken1343", 1, "1"], - ["chronicc", 1, "1"], - ["lb7", 1, "1"], - ["pshiki", 1, "1"], + ["metalcycling", 2, "2"], ]); @@ -380,21 +398,21 @@

Most Helpful

function drawcommentWords() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of words (excludes authored)', type: 'number'}, { role: 'annotation' }], - ["caerulescens", 1607, "1607"], + ["caerulescens", 1996, "1996"], + ["adrian-moisa", 1688, "1688"], ["karthick-dkk", 1285, "1285"], - ["afbjorklund", 712, "712"], - ["spowelljr", 391, "391"], + ["afbjorklund", 921, "921"], + ["kundan2707", 652, "652"], + ["spowelljr", 432, "432"], ["jusito", 387, "387"], ["adolphTech", 277, "277"], + ["prezha", 251, "251"], ["mazzystr", 243, "243"], + ["gufertum", 219, "219"], ["iamprakash89", 176, "176"], - ["prezha", 160, "160"], + ["T-Lakshmi", 148, "148"], + ["64J0", 138, "138"], ["liangyuanpeng", 112, "112"], - ["fredjeck", 109, "109"], - ["yamaken1343", 71, "71"], - ["medyagh", 61, "61"], - ["pshiki", 60, "60"], - ["friedrich", 52, "52"], ]); @@ -427,7 +445,7 @@

Top Closers

function drawissueCloser() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of issues closed (excludes authored)', type: 'number'}, { role: 'annotation' }], - ["spowelljr", 18, "18"], + ["spowelljr", 19, "19"], ]); From f34ef00756fe22573d82feee98eaea72a792bbb2 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sun, 3 Mar 2024 02:01:34 +0000 Subject: [PATCH 22/41] bump kube-vip to v0.7.1 --- pkg/minikube/cluster/ha/kube-vip/kube-vip.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go index 8921ee7c92a8..3d36358bd533 100644 --- a/pkg/minikube/cluster/ha/kube-vip/kube-vip.go +++ b/pkg/minikube/cluster/ha/kube-vip/kube-vip.go @@ -52,12 +52,12 @@ spec: value: eth0 - name: vip_cidr value: "32" + - name: dns_mode + value: first - name: cp_enable value: "true" - name: cp_namespace value: kube-system - - name: vip_ddns - value: "false" - name: vip_leaderelection value: "true" - name: vip_leasename @@ -72,7 +72,7 @@ spec: value: {{ .VIP }} - name: prometheus_server value: :2112 - image: ghcr.io/kube-vip/kube-vip:v0.7.0 + image: ghcr.io/kube-vip/kube-vip:v0.7.1 imagePullPolicy: IfNotPresent name: kube-vip resources: {} From ca3119b1120c1b622eb56cdb50a01b33009fcdf9 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Sun, 3 Mar 2024 23:57:04 +0000 Subject: [PATCH 23/41] Revert "Update go from 1.21.6 to" This reverts commit 0919c459ba7ee155be6204da4f1de5ec2d24c247. --- .github/workflows/build.yml | 2 +- .github/workflows/docs.yml | 2 +- .github/workflows/functional_verified.yml | 2 +- .github/workflows/leaderboard.yml | 2 +- .github/workflows/master.yml | 2 +- .github/workflows/minikube-image-benchmark.yml | 2 +- .github/workflows/pr.yml | 2 +- .github/workflows/sync-minikube.yml | 2 +- .github/workflows/time-to-k8s-public-chart.yml | 2 +- .github/workflows/time-to-k8s.yml | 2 +- .github/workflows/translations.yml | 2 +- .github/workflows/update-buildkit-version.yml | 2 +- .github/workflows/update-calico-version.yml | 2 +- .github/workflows/update-cloud-spanner-emulator-version.yml | 2 +- .github/workflows/update-cni-plugins-version.yml | 2 +- .github/workflows/update-containerd-version.yml | 2 +- .github/workflows/update-cri-dockerd-version.yml | 2 +- .github/workflows/update-cri-o-version.yml | 2 +- .github/workflows/update-crictl-version.yml | 2 +- .github/workflows/update-docker-buildx-version.yml | 2 +- .github/workflows/update-docker-version.yml | 2 +- .github/workflows/update-docsy-version.yml | 2 +- .github/workflows/update-flannel-version.yml | 2 +- .github/workflows/update-gcp-auth-version.yml | 2 +- .github/workflows/update-gh-version.yml | 2 +- .github/workflows/update-go-github-version.yml | 2 +- .github/workflows/update-golang-version.yml | 2 +- .github/workflows/update-golint-version.yml | 2 +- .github/workflows/update-gopogh-version.yml | 2 +- .github/workflows/update-gotestsum-version.yml | 2 +- .github/workflows/update-hugo-version.yml | 2 +- .github/workflows/update-ingress-version.yml | 2 +- .github/workflows/update-inspektor-gadget-version.yml | 2 +- .github/workflows/update-iso-image-versions.yml | 2 +- .github/workflows/update-istio-operator.yml | 2 +- .github/workflows/update-k8s-versions.yml | 2 +- .github/workflows/update-kindnetd-version.yml | 2 +- .github/workflows/update-kong-ingress-controller-version.yml | 2 +- .github/workflows/update-kong-version.yml | 2 +- .github/workflows/update-kubeadm-constants.yml | 2 +- .github/workflows/update-kubectl-version.yml | 2 +- .github/workflows/update-kubernetes-versions-list.yml | 2 +- .github/workflows/update-metrics-server-version.yml | 2 +- .github/workflows/update-nerdctl-version.yml | 2 +- .github/workflows/update-nerdctld-version.yml | 2 +- .github/workflows/update-nvidia-device-plugin-version.yml | 2 +- .github/workflows/update-registry-version.yml | 2 +- .github/workflows/update-runc-version.yml | 2 +- .github/workflows/update-site-node-version.yml | 2 +- .github/workflows/update-ubuntu-version.yml | 2 +- .github/workflows/yearly-leaderboard.yml | 2 +- Makefile | 2 +- deploy/addons/auto-pause/Dockerfile | 2 +- deploy/iso/minikube-iso/go.hash | 1 - deploy/kicbase/Dockerfile | 2 +- go.mod | 2 +- hack/jenkins/installers/check_install_golang.ps1 | 2 +- hack/jenkins/installers/check_install_golang.sh | 2 +- 58 files changed, 57 insertions(+), 58 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0dfacae04ec0..ba0ec32b9182 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ on: - "!deploy/iso/**" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ee5dded9b6db..b11071987961 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -6,7 +6,7 @@ on: - master env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/functional_verified.yml b/.github/workflows/functional_verified.yml index 5e2988a92d7f..4b60e1d7463b 100644 --- a/.github/workflows/functional_verified.yml +++ b/.github/workflows/functional_verified.yml @@ -22,7 +22,7 @@ on: - deleted env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/leaderboard.yml b/.github/workflows/leaderboard.yml index 32be3ab0224f..14aa2eca67b6 100644 --- a/.github/workflows/leaderboard.yml +++ b/.github/workflows/leaderboard.yml @@ -6,7 +6,7 @@ on: - 'v*-beta.*' env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index fca2785b8fca..26a5c8daf348 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -14,7 +14,7 @@ on: - "!deploy/iso/**" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/minikube-image-benchmark.yml b/.github/workflows/minikube-image-benchmark.yml index 2ae77d8eb4bd..e546b86bbeb1 100644 --- a/.github/workflows/minikube-image-benchmark.yml +++ b/.github/workflows/minikube-image-benchmark.yml @@ -6,7 +6,7 @@ on: - cron: "0 2,14 * * *" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 79de759981c4..c3e167ea3db3 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -12,7 +12,7 @@ on: - "!deploy/iso/**" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/sync-minikube.yml b/.github/workflows/sync-minikube.yml index 2eb5eb3a39d0..bbe294b484c8 100644 --- a/.github/workflows/sync-minikube.yml +++ b/.github/workflows/sync-minikube.yml @@ -6,7 +6,7 @@ on: - cron: "0 2,14 * * *" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/time-to-k8s-public-chart.yml b/.github/workflows/time-to-k8s-public-chart.yml index 2777cef53602..a321b0c6a833 100644 --- a/.github/workflows/time-to-k8s-public-chart.yml +++ b/.github/workflows/time-to-k8s-public-chart.yml @@ -6,7 +6,7 @@ on: - cron: "0 2,14 * * *" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/time-to-k8s.yml b/.github/workflows/time-to-k8s.yml index f68ccd1c254a..ec3887b1b3cc 100644 --- a/.github/workflows/time-to-k8s.yml +++ b/.github/workflows/time-to-k8s.yml @@ -5,7 +5,7 @@ on: types: [released] env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/translations.yml b/.github/workflows/translations.yml index f60b0379e5df..a75a501e887c 100644 --- a/.github/workflows/translations.yml +++ b/.github/workflows/translations.yml @@ -6,7 +6,7 @@ on: - "translations/**" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-buildkit-version.yml b/.github/workflows/update-buildkit-version.yml index 89fd3caff8ab..a3891d21f3a6 100644 --- a/.github/workflows/update-buildkit-version.yml +++ b/.github/workflows/update-buildkit-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 3" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-calico-version.yml b/.github/workflows/update-calico-version.yml index f9cbfe4e9e29..e654b7eb722e 100644 --- a/.github/workflows/update-calico-version.yml +++ b/.github/workflows/update-calico-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-cloud-spanner-emulator-version.yml b/.github/workflows/update-cloud-spanner-emulator-version.yml index 282a067525f3..56a2d497bf5d 100644 --- a/.github/workflows/update-cloud-spanner-emulator-version.yml +++ b/.github/workflows/update-cloud-spanner-emulator-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-cni-plugins-version.yml b/.github/workflows/update-cni-plugins-version.yml index 3bb59ba9b371..76415249ef5e 100644 --- a/.github/workflows/update-cni-plugins-version.yml +++ b/.github/workflows/update-cni-plugins-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-containerd-version.yml b/.github/workflows/update-containerd-version.yml index ee7d9f3365f6..6970980875ec 100644 --- a/.github/workflows/update-containerd-version.yml +++ b/.github/workflows/update-containerd-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-cri-dockerd-version.yml b/.github/workflows/update-cri-dockerd-version.yml index 0be9601d6b61..c9eb62dd760a 100644 --- a/.github/workflows/update-cri-dockerd-version.yml +++ b/.github/workflows/update-cri-dockerd-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-cri-o-version.yml b/.github/workflows/update-cri-o-version.yml index e269c7e693c3..e03f5d72be72 100644 --- a/.github/workflows/update-cri-o-version.yml +++ b/.github/workflows/update-cri-o-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 5" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-crictl-version.yml b/.github/workflows/update-crictl-version.yml index 1100787f665f..09d50cb044a2 100644 --- a/.github/workflows/update-crictl-version.yml +++ b/.github/workflows/update-crictl-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 3" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-docker-buildx-version.yml b/.github/workflows/update-docker-buildx-version.yml index 7b3a56a6e41a..2786d3421118 100644 --- a/.github/workflows/update-docker-buildx-version.yml +++ b/.github/workflows/update-docker-buildx-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-docker-version.yml b/.github/workflows/update-docker-version.yml index 599fb266e9e9..a6f029092841 100644 --- a/.github/workflows/update-docker-version.yml +++ b/.github/workflows/update-docker-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 4" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-docsy-version.yml b/.github/workflows/update-docsy-version.yml index 0b72ea804cd0..858195f343e7 100644 --- a/.github/workflows/update-docsy-version.yml +++ b/.github/workflows/update-docsy-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-flannel-version.yml b/.github/workflows/update-flannel-version.yml index f5daa0d3ed91..3a761bac8e11 100644 --- a/.github/workflows/update-flannel-version.yml +++ b/.github/workflows/update-flannel-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-gcp-auth-version.yml b/.github/workflows/update-gcp-auth-version.yml index 60e9a4772387..cbea8ad351e8 100644 --- a/.github/workflows/update-gcp-auth-version.yml +++ b/.github/workflows/update-gcp-auth-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-gh-version.yml b/.github/workflows/update-gh-version.yml index 71d634ff0042..e0d7d00b2bac 100644 --- a/.github/workflows/update-gh-version.yml +++ b/.github/workflows/update-gh-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-go-github-version.yml b/.github/workflows/update-go-github-version.yml index c2b997128dcb..b9b15ffad2c1 100644 --- a/.github/workflows/update-go-github-version.yml +++ b/.github/workflows/update-go-github-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-golang-version.yml b/.github/workflows/update-golang-version.yml index 4d69bc1a8e50..e5bd60aa4bf1 100644 --- a/.github/workflows/update-golang-version.yml +++ b/.github/workflows/update-golang-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 9 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-golint-version.yml b/.github/workflows/update-golint-version.yml index 9cc8229ad4fa..5984310d6146 100644 --- a/.github/workflows/update-golint-version.yml +++ b/.github/workflows/update-golint-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-gopogh-version.yml b/.github/workflows/update-gopogh-version.yml index ce24edf62e56..7a151294a88a 100644 --- a/.github/workflows/update-gopogh-version.yml +++ b/.github/workflows/update-gopogh-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 9 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-gotestsum-version.yml b/.github/workflows/update-gotestsum-version.yml index ea6d29c3d959..85d5cbb86dd7 100644 --- a/.github/workflows/update-gotestsum-version.yml +++ b/.github/workflows/update-gotestsum-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-hugo-version.yml b/.github/workflows/update-hugo-version.yml index 2b59c4a0a1fe..e09ac2dc96fc 100644 --- a/.github/workflows/update-hugo-version.yml +++ b/.github/workflows/update-hugo-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-ingress-version.yml b/.github/workflows/update-ingress-version.yml index 376a70390c02..110e98bad196 100644 --- a/.github/workflows/update-ingress-version.yml +++ b/.github/workflows/update-ingress-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-inspektor-gadget-version.yml b/.github/workflows/update-inspektor-gadget-version.yml index d10ce11ac5ae..b389bb22339f 100644 --- a/.github/workflows/update-inspektor-gadget-version.yml +++ b/.github/workflows/update-inspektor-gadget-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-iso-image-versions.yml b/.github/workflows/update-iso-image-versions.yml index 4d3790b373ea..07f41eb4990a 100644 --- a/.github/workflows/update-iso-image-versions.yml +++ b/.github/workflows/update-iso-image-versions.yml @@ -3,7 +3,7 @@ on: workflow_dispatch: env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read jobs: diff --git a/.github/workflows/update-istio-operator.yml b/.github/workflows/update-istio-operator.yml index 443d3f07deee..1a34a861b0ad 100644 --- a/.github/workflows/update-istio-operator.yml +++ b/.github/workflows/update-istio-operator.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-k8s-versions.yml b/.github/workflows/update-k8s-versions.yml index a072e8c5190b..13e17c21790b 100644 --- a/.github/workflows/update-k8s-versions.yml +++ b/.github/workflows/update-k8s-versions.yml @@ -6,7 +6,7 @@ on: - cron: "0 8 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-kindnetd-version.yml b/.github/workflows/update-kindnetd-version.yml index 1f37b3006f5f..8b5ba11cb9f2 100644 --- a/.github/workflows/update-kindnetd-version.yml +++ b/.github/workflows/update-kindnetd-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read jobs: diff --git a/.github/workflows/update-kong-ingress-controller-version.yml b/.github/workflows/update-kong-ingress-controller-version.yml index 187364053f32..38922a1b7e85 100644 --- a/.github/workflows/update-kong-ingress-controller-version.yml +++ b/.github/workflows/update-kong-ingress-controller-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-kong-version.yml b/.github/workflows/update-kong-version.yml index a48f00c52b1b..049d3ce9819d 100644 --- a/.github/workflows/update-kong-version.yml +++ b/.github/workflows/update-kong-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-kubeadm-constants.yml b/.github/workflows/update-kubeadm-constants.yml index 21f3eae45caa..e7f052924c40 100644 --- a/.github/workflows/update-kubeadm-constants.yml +++ b/.github/workflows/update-kubeadm-constants.yml @@ -6,7 +6,7 @@ on: - cron: "0 6 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-kubectl-version.yml b/.github/workflows/update-kubectl-version.yml index a93e6c1f7a54..4313dd03dcd6 100644 --- a/.github/workflows/update-kubectl-version.yml +++ b/.github/workflows/update-kubectl-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-kubernetes-versions-list.yml b/.github/workflows/update-kubernetes-versions-list.yml index 9e4b3fbb0400..06e66f0677bd 100644 --- a/.github/workflows/update-kubernetes-versions-list.yml +++ b/.github/workflows/update-kubernetes-versions-list.yml @@ -6,7 +6,7 @@ on: - cron: "0 6 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-metrics-server-version.yml b/.github/workflows/update-metrics-server-version.yml index 71360e542d32..836a5fabcd37 100644 --- a/.github/workflows/update-metrics-server-version.yml +++ b/.github/workflows/update-metrics-server-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-nerdctl-version.yml b/.github/workflows/update-nerdctl-version.yml index b88fd1710bef..4d672f0dda17 100644 --- a/.github/workflows/update-nerdctl-version.yml +++ b/.github/workflows/update-nerdctl-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-nerdctld-version.yml b/.github/workflows/update-nerdctld-version.yml index 00e449bd0354..2d531798e380 100644 --- a/.github/workflows/update-nerdctld-version.yml +++ b/.github/workflows/update-nerdctld-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-nvidia-device-plugin-version.yml b/.github/workflows/update-nvidia-device-plugin-version.yml index a29c97dd6534..48204a1f4c89 100644 --- a/.github/workflows/update-nvidia-device-plugin-version.yml +++ b/.github/workflows/update-nvidia-device-plugin-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-registry-version.yml b/.github/workflows/update-registry-version.yml index 6a273f20cf68..ec19ca1505e0 100644 --- a/.github/workflows/update-registry-version.yml +++ b/.github/workflows/update-registry-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-runc-version.yml b/.github/workflows/update-runc-version.yml index ad6a1bdb9ee7..53ba1a80610b 100644 --- a/.github/workflows/update-runc-version.yml +++ b/.github/workflows/update-runc-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 2" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-site-node-version.yml b/.github/workflows/update-site-node-version.yml index fc63cf127d7e..9db39d6508bf 100644 --- a/.github/workflows/update-site-node-version.yml +++ b/.github/workflows/update-site-node-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/update-ubuntu-version.yml b/.github/workflows/update-ubuntu-version.yml index b2c7a400d10b..9597500d761a 100644 --- a/.github/workflows/update-ubuntu-version.yml +++ b/.github/workflows/update-ubuntu-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/.github/workflows/yearly-leaderboard.yml b/.github/workflows/yearly-leaderboard.yml index fda7f91c220c..2851e8964968 100644 --- a/.github/workflows/yearly-leaderboard.yml +++ b/.github/workflows/yearly-leaderboard.yml @@ -6,7 +6,7 @@ on: - cron: "0 0 2 * *" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.22.0' + GO_VERSION: '1.21.6' permissions: contents: read diff --git a/Makefile b/Makefile index 2e693e78a16e..f7ae3f5a1087 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ RPM_REVISION ?= 0 # used by hack/jenkins/release_build_and_upload.sh and KVM_BUILD_IMAGE, see also BUILD_IMAGE below # update this only by running `make update-golang-version` -GO_VERSION ?= 1.22.0 +GO_VERSION ?= 1.21.6 # update this only by running `make update-golang-version` GO_K8S_VERSION_PREFIX ?= v1.30.0 diff --git a/deploy/addons/auto-pause/Dockerfile b/deploy/addons/auto-pause/Dockerfile index b435b6fea6a6..7e58dcb0430c 100644 --- a/deploy/addons/auto-pause/Dockerfile +++ b/deploy/addons/auto-pause/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.0 AS builder +FROM golang:1.21.6 AS builder WORKDIR /app COPY go.mod go.sum ./ RUN go mod download diff --git a/deploy/iso/minikube-iso/go.hash b/deploy/iso/minikube-iso/go.hash index d9eb5bd9dd19..8f5e95d339ef 100644 --- a/deploy/iso/minikube-iso/go.hash +++ b/deploy/iso/minikube-iso/go.hash @@ -24,4 +24,3 @@ sha256 186f2b6f8c8b704e696821b09ab2041a5c1ee13dcbc3156a13adcf75931ee488 go1.21 sha256 47b26a83d2b65a3c1c1bcace273b69bee49a7a7b5168a7604ded3d26a37bd787 go1.21.4.src.tar.gz sha256 285cbbdf4b6e6e62ed58f370f3f6d8c30825d6e56c5853c66d3c23bcdb09db19 go1.21.5.src.tar.gz sha256 124926a62e45f78daabbaedb9c011d97633186a33c238ffc1e25320c02046248 go1.21.6.src.tar.gz -sha256 4d196c3d41a0d6c1dfc64d04e3cc1f608b0c436bd87b7060ce3e23234e1f4d5c go1.22.0.src.tar.gz diff --git a/deploy/kicbase/Dockerfile b/deploy/kicbase/Dockerfile index 04133ffb6d87..dcfd305ac201 100644 --- a/deploy/kicbase/Dockerfile +++ b/deploy/kicbase/Dockerfile @@ -21,7 +21,7 @@ # this ARG needs to be global to use it in `FROM` & is updated for new versions of ubuntu:jammy-* ARG UBUNTU_JAMMY_IMAGE="ubuntu:jammy-20240212" # multi-stage docker build so we can build auto-pause for arm64 -FROM golang:1.22.0 as auto-pause +FROM golang:1.21.6 as auto-pause WORKDIR /src # auto-pause depends on core minikube code so we need to pass the whole source code as the context # copy in the minimal amount of source code possible diff --git a/go.mod b/go.mod index e542dbfe5406..3a5e626b3d4d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module k8s.io/minikube -go 1.22 +go 1.21 require ( cloud.google.com/go/storage v1.38.0 diff --git a/hack/jenkins/installers/check_install_golang.ps1 b/hack/jenkins/installers/check_install_golang.ps1 index 6454d54791b8..08b0239c3938 100644 --- a/hack/jenkins/installers/check_install_golang.ps1 +++ b/hack/jenkins/installers/check_install_golang.ps1 @@ -31,7 +31,7 @@ AddToPathIfMissing -pathToAdd "C:\Program Files\Go\bin" -scope "Machine" AddToPathIfMissing -pathToAdd "$HOME\go\bin" -scope "User" # Download Go -$GoVersion = "1.22.0" +$GoVersion = "1.21.6" $CurrentGo = go version if ((!$?) -or ($CurrentGo -NotLike "*$GoVersion*")) { (New-Object Net.WebClient).DownloadFile("https://go.dev/dl/go$GoVersion.windows-amd64.zip", "$env:TEMP\golang.zip") diff --git a/hack/jenkins/installers/check_install_golang.sh b/hack/jenkins/installers/check_install_golang.sh index 444aae347c84..c3b730ee4224 100755 --- a/hack/jenkins/installers/check_install_golang.sh +++ b/hack/jenkins/installers/check_install_golang.sh @@ -22,7 +22,7 @@ if (($# < 1)); then exit 1 fi -VERSION_TO_INSTALL=1.22.0 +VERSION_TO_INSTALL=1.21.6 INSTALL_PATH=${1} function current_arch() { From 46ca79954fc6d493339dacea5a1d75a070e25bb6 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Mon, 4 Mar 2024 01:33:00 +0000 Subject: [PATCH 24/41] Reapply "Update go from 1.21.6 to" This reverts commit ca3119b1120c1b622eb56cdb50a01b33009fcdf9. --- .github/workflows/build.yml | 2 +- .github/workflows/docs.yml | 2 +- .github/workflows/functional_verified.yml | 2 +- .github/workflows/leaderboard.yml | 2 +- .github/workflows/master.yml | 2 +- .github/workflows/minikube-image-benchmark.yml | 2 +- .github/workflows/pr.yml | 2 +- .github/workflows/sync-minikube.yml | 2 +- .github/workflows/time-to-k8s-public-chart.yml | 2 +- .github/workflows/time-to-k8s.yml | 2 +- .github/workflows/translations.yml | 2 +- .github/workflows/update-buildkit-version.yml | 2 +- .github/workflows/update-calico-version.yml | 2 +- .github/workflows/update-cloud-spanner-emulator-version.yml | 2 +- .github/workflows/update-cni-plugins-version.yml | 2 +- .github/workflows/update-containerd-version.yml | 2 +- .github/workflows/update-cri-dockerd-version.yml | 2 +- .github/workflows/update-cri-o-version.yml | 2 +- .github/workflows/update-crictl-version.yml | 2 +- .github/workflows/update-docker-buildx-version.yml | 2 +- .github/workflows/update-docker-version.yml | 2 +- .github/workflows/update-docsy-version.yml | 2 +- .github/workflows/update-flannel-version.yml | 2 +- .github/workflows/update-gcp-auth-version.yml | 2 +- .github/workflows/update-gh-version.yml | 2 +- .github/workflows/update-go-github-version.yml | 2 +- .github/workflows/update-golang-version.yml | 2 +- .github/workflows/update-golint-version.yml | 2 +- .github/workflows/update-gopogh-version.yml | 2 +- .github/workflows/update-gotestsum-version.yml | 2 +- .github/workflows/update-hugo-version.yml | 2 +- .github/workflows/update-ingress-version.yml | 2 +- .github/workflows/update-inspektor-gadget-version.yml | 2 +- .github/workflows/update-iso-image-versions.yml | 2 +- .github/workflows/update-istio-operator.yml | 2 +- .github/workflows/update-k8s-versions.yml | 2 +- .github/workflows/update-kindnetd-version.yml | 2 +- .github/workflows/update-kong-ingress-controller-version.yml | 2 +- .github/workflows/update-kong-version.yml | 2 +- .github/workflows/update-kubeadm-constants.yml | 2 +- .github/workflows/update-kubectl-version.yml | 2 +- .github/workflows/update-kubernetes-versions-list.yml | 2 +- .github/workflows/update-metrics-server-version.yml | 2 +- .github/workflows/update-nerdctl-version.yml | 2 +- .github/workflows/update-nerdctld-version.yml | 2 +- .github/workflows/update-nvidia-device-plugin-version.yml | 2 +- .github/workflows/update-registry-version.yml | 2 +- .github/workflows/update-runc-version.yml | 2 +- .github/workflows/update-site-node-version.yml | 2 +- .github/workflows/update-ubuntu-version.yml | 2 +- .github/workflows/yearly-leaderboard.yml | 2 +- Makefile | 2 +- deploy/addons/auto-pause/Dockerfile | 2 +- deploy/iso/minikube-iso/go.hash | 1 + deploy/kicbase/Dockerfile | 2 +- go.mod | 2 +- hack/jenkins/installers/check_install_golang.ps1 | 2 +- hack/jenkins/installers/check_install_golang.sh | 2 +- 58 files changed, 58 insertions(+), 57 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ba0ec32b9182..0dfacae04ec0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ on: - "!deploy/iso/**" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b11071987961..ee5dded9b6db 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -6,7 +6,7 @@ on: - master env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/functional_verified.yml b/.github/workflows/functional_verified.yml index 4b60e1d7463b..5e2988a92d7f 100644 --- a/.github/workflows/functional_verified.yml +++ b/.github/workflows/functional_verified.yml @@ -22,7 +22,7 @@ on: - deleted env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/leaderboard.yml b/.github/workflows/leaderboard.yml index 14aa2eca67b6..32be3ab0224f 100644 --- a/.github/workflows/leaderboard.yml +++ b/.github/workflows/leaderboard.yml @@ -6,7 +6,7 @@ on: - 'v*-beta.*' env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 26a5c8daf348..fca2785b8fca 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -14,7 +14,7 @@ on: - "!deploy/iso/**" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/minikube-image-benchmark.yml b/.github/workflows/minikube-image-benchmark.yml index e546b86bbeb1..2ae77d8eb4bd 100644 --- a/.github/workflows/minikube-image-benchmark.yml +++ b/.github/workflows/minikube-image-benchmark.yml @@ -6,7 +6,7 @@ on: - cron: "0 2,14 * * *" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index c3e167ea3db3..79de759981c4 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -12,7 +12,7 @@ on: - "!deploy/iso/**" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/sync-minikube.yml b/.github/workflows/sync-minikube.yml index bbe294b484c8..2eb5eb3a39d0 100644 --- a/.github/workflows/sync-minikube.yml +++ b/.github/workflows/sync-minikube.yml @@ -6,7 +6,7 @@ on: - cron: "0 2,14 * * *" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/time-to-k8s-public-chart.yml b/.github/workflows/time-to-k8s-public-chart.yml index a321b0c6a833..2777cef53602 100644 --- a/.github/workflows/time-to-k8s-public-chart.yml +++ b/.github/workflows/time-to-k8s-public-chart.yml @@ -6,7 +6,7 @@ on: - cron: "0 2,14 * * *" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/time-to-k8s.yml b/.github/workflows/time-to-k8s.yml index ec3887b1b3cc..f68ccd1c254a 100644 --- a/.github/workflows/time-to-k8s.yml +++ b/.github/workflows/time-to-k8s.yml @@ -5,7 +5,7 @@ on: types: [released] env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/translations.yml b/.github/workflows/translations.yml index a75a501e887c..f60b0379e5df 100644 --- a/.github/workflows/translations.yml +++ b/.github/workflows/translations.yml @@ -6,7 +6,7 @@ on: - "translations/**" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-buildkit-version.yml b/.github/workflows/update-buildkit-version.yml index a3891d21f3a6..89fd3caff8ab 100644 --- a/.github/workflows/update-buildkit-version.yml +++ b/.github/workflows/update-buildkit-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 3" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-calico-version.yml b/.github/workflows/update-calico-version.yml index e654b7eb722e..f9cbfe4e9e29 100644 --- a/.github/workflows/update-calico-version.yml +++ b/.github/workflows/update-calico-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-cloud-spanner-emulator-version.yml b/.github/workflows/update-cloud-spanner-emulator-version.yml index 56a2d497bf5d..282a067525f3 100644 --- a/.github/workflows/update-cloud-spanner-emulator-version.yml +++ b/.github/workflows/update-cloud-spanner-emulator-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-cni-plugins-version.yml b/.github/workflows/update-cni-plugins-version.yml index 76415249ef5e..3bb59ba9b371 100644 --- a/.github/workflows/update-cni-plugins-version.yml +++ b/.github/workflows/update-cni-plugins-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-containerd-version.yml b/.github/workflows/update-containerd-version.yml index 6970980875ec..ee7d9f3365f6 100644 --- a/.github/workflows/update-containerd-version.yml +++ b/.github/workflows/update-containerd-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-cri-dockerd-version.yml b/.github/workflows/update-cri-dockerd-version.yml index c9eb62dd760a..0be9601d6b61 100644 --- a/.github/workflows/update-cri-dockerd-version.yml +++ b/.github/workflows/update-cri-dockerd-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-cri-o-version.yml b/.github/workflows/update-cri-o-version.yml index e03f5d72be72..e269c7e693c3 100644 --- a/.github/workflows/update-cri-o-version.yml +++ b/.github/workflows/update-cri-o-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 5" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-crictl-version.yml b/.github/workflows/update-crictl-version.yml index 09d50cb044a2..1100787f665f 100644 --- a/.github/workflows/update-crictl-version.yml +++ b/.github/workflows/update-crictl-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 3" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-docker-buildx-version.yml b/.github/workflows/update-docker-buildx-version.yml index 2786d3421118..7b3a56a6e41a 100644 --- a/.github/workflows/update-docker-buildx-version.yml +++ b/.github/workflows/update-docker-buildx-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-docker-version.yml b/.github/workflows/update-docker-version.yml index a6f029092841..599fb266e9e9 100644 --- a/.github/workflows/update-docker-version.yml +++ b/.github/workflows/update-docker-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 4" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-docsy-version.yml b/.github/workflows/update-docsy-version.yml index 858195f343e7..0b72ea804cd0 100644 --- a/.github/workflows/update-docsy-version.yml +++ b/.github/workflows/update-docsy-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-flannel-version.yml b/.github/workflows/update-flannel-version.yml index 3a761bac8e11..f5daa0d3ed91 100644 --- a/.github/workflows/update-flannel-version.yml +++ b/.github/workflows/update-flannel-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-gcp-auth-version.yml b/.github/workflows/update-gcp-auth-version.yml index cbea8ad351e8..60e9a4772387 100644 --- a/.github/workflows/update-gcp-auth-version.yml +++ b/.github/workflows/update-gcp-auth-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-gh-version.yml b/.github/workflows/update-gh-version.yml index e0d7d00b2bac..71d634ff0042 100644 --- a/.github/workflows/update-gh-version.yml +++ b/.github/workflows/update-gh-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-go-github-version.yml b/.github/workflows/update-go-github-version.yml index b9b15ffad2c1..c2b997128dcb 100644 --- a/.github/workflows/update-go-github-version.yml +++ b/.github/workflows/update-go-github-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-golang-version.yml b/.github/workflows/update-golang-version.yml index e5bd60aa4bf1..4d69bc1a8e50 100644 --- a/.github/workflows/update-golang-version.yml +++ b/.github/workflows/update-golang-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 9 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-golint-version.yml b/.github/workflows/update-golint-version.yml index 5984310d6146..9cc8229ad4fa 100644 --- a/.github/workflows/update-golint-version.yml +++ b/.github/workflows/update-golint-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-gopogh-version.yml b/.github/workflows/update-gopogh-version.yml index 7a151294a88a..ce24edf62e56 100644 --- a/.github/workflows/update-gopogh-version.yml +++ b/.github/workflows/update-gopogh-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 9 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-gotestsum-version.yml b/.github/workflows/update-gotestsum-version.yml index 85d5cbb86dd7..ea6d29c3d959 100644 --- a/.github/workflows/update-gotestsum-version.yml +++ b/.github/workflows/update-gotestsum-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-hugo-version.yml b/.github/workflows/update-hugo-version.yml index e09ac2dc96fc..2b59c4a0a1fe 100644 --- a/.github/workflows/update-hugo-version.yml +++ b/.github/workflows/update-hugo-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-ingress-version.yml b/.github/workflows/update-ingress-version.yml index 110e98bad196..376a70390c02 100644 --- a/.github/workflows/update-ingress-version.yml +++ b/.github/workflows/update-ingress-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-inspektor-gadget-version.yml b/.github/workflows/update-inspektor-gadget-version.yml index b389bb22339f..d10ce11ac5ae 100644 --- a/.github/workflows/update-inspektor-gadget-version.yml +++ b/.github/workflows/update-inspektor-gadget-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-iso-image-versions.yml b/.github/workflows/update-iso-image-versions.yml index 07f41eb4990a..4d3790b373ea 100644 --- a/.github/workflows/update-iso-image-versions.yml +++ b/.github/workflows/update-iso-image-versions.yml @@ -3,7 +3,7 @@ on: workflow_dispatch: env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read jobs: diff --git a/.github/workflows/update-istio-operator.yml b/.github/workflows/update-istio-operator.yml index 1a34a861b0ad..443d3f07deee 100644 --- a/.github/workflows/update-istio-operator.yml +++ b/.github/workflows/update-istio-operator.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-k8s-versions.yml b/.github/workflows/update-k8s-versions.yml index 13e17c21790b..a072e8c5190b 100644 --- a/.github/workflows/update-k8s-versions.yml +++ b/.github/workflows/update-k8s-versions.yml @@ -6,7 +6,7 @@ on: - cron: "0 8 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-kindnetd-version.yml b/.github/workflows/update-kindnetd-version.yml index 8b5ba11cb9f2..1f37b3006f5f 100644 --- a/.github/workflows/update-kindnetd-version.yml +++ b/.github/workflows/update-kindnetd-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read jobs: diff --git a/.github/workflows/update-kong-ingress-controller-version.yml b/.github/workflows/update-kong-ingress-controller-version.yml index 38922a1b7e85..187364053f32 100644 --- a/.github/workflows/update-kong-ingress-controller-version.yml +++ b/.github/workflows/update-kong-ingress-controller-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-kong-version.yml b/.github/workflows/update-kong-version.yml index 049d3ce9819d..a48f00c52b1b 100644 --- a/.github/workflows/update-kong-version.yml +++ b/.github/workflows/update-kong-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-kubeadm-constants.yml b/.github/workflows/update-kubeadm-constants.yml index e7f052924c40..21f3eae45caa 100644 --- a/.github/workflows/update-kubeadm-constants.yml +++ b/.github/workflows/update-kubeadm-constants.yml @@ -6,7 +6,7 @@ on: - cron: "0 6 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-kubectl-version.yml b/.github/workflows/update-kubectl-version.yml index 4313dd03dcd6..a93e6c1f7a54 100644 --- a/.github/workflows/update-kubectl-version.yml +++ b/.github/workflows/update-kubectl-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-kubernetes-versions-list.yml b/.github/workflows/update-kubernetes-versions-list.yml index 06e66f0677bd..9e4b3fbb0400 100644 --- a/.github/workflows/update-kubernetes-versions-list.yml +++ b/.github/workflows/update-kubernetes-versions-list.yml @@ -6,7 +6,7 @@ on: - cron: "0 6 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-metrics-server-version.yml b/.github/workflows/update-metrics-server-version.yml index 836a5fabcd37..71360e542d32 100644 --- a/.github/workflows/update-metrics-server-version.yml +++ b/.github/workflows/update-metrics-server-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-nerdctl-version.yml b/.github/workflows/update-nerdctl-version.yml index 4d672f0dda17..b88fd1710bef 100644 --- a/.github/workflows/update-nerdctl-version.yml +++ b/.github/workflows/update-nerdctl-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-nerdctld-version.yml b/.github/workflows/update-nerdctld-version.yml index 2d531798e380..00e449bd0354 100644 --- a/.github/workflows/update-nerdctld-version.yml +++ b/.github/workflows/update-nerdctld-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-nvidia-device-plugin-version.yml b/.github/workflows/update-nvidia-device-plugin-version.yml index 48204a1f4c89..a29c97dd6534 100644 --- a/.github/workflows/update-nvidia-device-plugin-version.yml +++ b/.github/workflows/update-nvidia-device-plugin-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-registry-version.yml b/.github/workflows/update-registry-version.yml index ec19ca1505e0..6a273f20cf68 100644 --- a/.github/workflows/update-registry-version.yml +++ b/.github/workflows/update-registry-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-runc-version.yml b/.github/workflows/update-runc-version.yml index 53ba1a80610b..ad6a1bdb9ee7 100644 --- a/.github/workflows/update-runc-version.yml +++ b/.github/workflows/update-runc-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 2" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-site-node-version.yml b/.github/workflows/update-site-node-version.yml index 9db39d6508bf..fc63cf127d7e 100644 --- a/.github/workflows/update-site-node-version.yml +++ b/.github/workflows/update-site-node-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/update-ubuntu-version.yml b/.github/workflows/update-ubuntu-version.yml index 9597500d761a..b2c7a400d10b 100644 --- a/.github/workflows/update-ubuntu-version.yml +++ b/.github/workflows/update-ubuntu-version.yml @@ -6,7 +6,7 @@ on: - cron: "0 10 * * 1" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/.github/workflows/yearly-leaderboard.yml b/.github/workflows/yearly-leaderboard.yml index 2851e8964968..fda7f91c220c 100644 --- a/.github/workflows/yearly-leaderboard.yml +++ b/.github/workflows/yearly-leaderboard.yml @@ -6,7 +6,7 @@ on: - cron: "0 0 2 * *" env: GOPROXY: https://proxy.golang.org - GO_VERSION: '1.21.6' + GO_VERSION: '1.22.0' permissions: contents: read diff --git a/Makefile b/Makefile index f7ae3f5a1087..2e693e78a16e 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ RPM_REVISION ?= 0 # used by hack/jenkins/release_build_and_upload.sh and KVM_BUILD_IMAGE, see also BUILD_IMAGE below # update this only by running `make update-golang-version` -GO_VERSION ?= 1.21.6 +GO_VERSION ?= 1.22.0 # update this only by running `make update-golang-version` GO_K8S_VERSION_PREFIX ?= v1.30.0 diff --git a/deploy/addons/auto-pause/Dockerfile b/deploy/addons/auto-pause/Dockerfile index 7e58dcb0430c..b435b6fea6a6 100644 --- a/deploy/addons/auto-pause/Dockerfile +++ b/deploy/addons/auto-pause/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.6 AS builder +FROM golang:1.22.0 AS builder WORKDIR /app COPY go.mod go.sum ./ RUN go mod download diff --git a/deploy/iso/minikube-iso/go.hash b/deploy/iso/minikube-iso/go.hash index 8f5e95d339ef..d9eb5bd9dd19 100644 --- a/deploy/iso/minikube-iso/go.hash +++ b/deploy/iso/minikube-iso/go.hash @@ -24,3 +24,4 @@ sha256 186f2b6f8c8b704e696821b09ab2041a5c1ee13dcbc3156a13adcf75931ee488 go1.21 sha256 47b26a83d2b65a3c1c1bcace273b69bee49a7a7b5168a7604ded3d26a37bd787 go1.21.4.src.tar.gz sha256 285cbbdf4b6e6e62ed58f370f3f6d8c30825d6e56c5853c66d3c23bcdb09db19 go1.21.5.src.tar.gz sha256 124926a62e45f78daabbaedb9c011d97633186a33c238ffc1e25320c02046248 go1.21.6.src.tar.gz +sha256 4d196c3d41a0d6c1dfc64d04e3cc1f608b0c436bd87b7060ce3e23234e1f4d5c go1.22.0.src.tar.gz diff --git a/deploy/kicbase/Dockerfile b/deploy/kicbase/Dockerfile index dcfd305ac201..04133ffb6d87 100644 --- a/deploy/kicbase/Dockerfile +++ b/deploy/kicbase/Dockerfile @@ -21,7 +21,7 @@ # this ARG needs to be global to use it in `FROM` & is updated for new versions of ubuntu:jammy-* ARG UBUNTU_JAMMY_IMAGE="ubuntu:jammy-20240212" # multi-stage docker build so we can build auto-pause for arm64 -FROM golang:1.21.6 as auto-pause +FROM golang:1.22.0 as auto-pause WORKDIR /src # auto-pause depends on core minikube code so we need to pass the whole source code as the context # copy in the minimal amount of source code possible diff --git a/go.mod b/go.mod index 3a5e626b3d4d..e542dbfe5406 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module k8s.io/minikube -go 1.21 +go 1.22 require ( cloud.google.com/go/storage v1.38.0 diff --git a/hack/jenkins/installers/check_install_golang.ps1 b/hack/jenkins/installers/check_install_golang.ps1 index 08b0239c3938..6454d54791b8 100644 --- a/hack/jenkins/installers/check_install_golang.ps1 +++ b/hack/jenkins/installers/check_install_golang.ps1 @@ -31,7 +31,7 @@ AddToPathIfMissing -pathToAdd "C:\Program Files\Go\bin" -scope "Machine" AddToPathIfMissing -pathToAdd "$HOME\go\bin" -scope "User" # Download Go -$GoVersion = "1.21.6" +$GoVersion = "1.22.0" $CurrentGo = go version if ((!$?) -or ($CurrentGo -NotLike "*$GoVersion*")) { (New-Object Net.WebClient).DownloadFile("https://go.dev/dl/go$GoVersion.windows-amd64.zip", "$env:TEMP\golang.zip") diff --git a/hack/jenkins/installers/check_install_golang.sh b/hack/jenkins/installers/check_install_golang.sh index c3b730ee4224..444aae347c84 100755 --- a/hack/jenkins/installers/check_install_golang.sh +++ b/hack/jenkins/installers/check_install_golang.sh @@ -22,7 +22,7 @@ if (($# < 1)); then exit 1 fi -VERSION_TO_INSTALL=1.21.6 +VERSION_TO_INSTALL=1.22.0 INSTALL_PATH=${1} function current_arch() { From 93175fde8814bcb017715883d88e06f3973272e6 Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Mon, 4 Mar 2024 06:10:14 +0000 Subject: [PATCH 25/41] update image constants for kubeadm images --- pkg/minikube/constants/constants_kubeadm_images.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/minikube/constants/constants_kubeadm_images.go b/pkg/minikube/constants/constants_kubeadm_images.go index 51373bfb3cfb..89a623db4abe 100644 --- a/pkg/minikube/constants/constants_kubeadm_images.go +++ b/pkg/minikube/constants/constants_kubeadm_images.go @@ -18,6 +18,11 @@ package constants var ( KubeadmImages = map[string]map[string]string{ + "v1.30.0-alpha.3": { + "coredns/coredns": "v1.11.1", + "etcd": "3.5.12-0", + "pause": "3.9", + }, "v1.29.2": { "coredns/coredns": "v1.11.1", "etcd": "3.5.10-0", From 711f4e4c50f4024e018e3330e8bace1554ee4020 Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Mon, 4 Mar 2024 06:18:45 +0000 Subject: [PATCH 26/41] update Kubernetes versions list --- pkg/minikube/constants/constants_kubernetes_versions.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/minikube/constants/constants_kubernetes_versions.go b/pkg/minikube/constants/constants_kubernetes_versions.go index cc0de084de57..6fca86f16f0a 100644 --- a/pkg/minikube/constants/constants_kubernetes_versions.go +++ b/pkg/minikube/constants/constants_kubernetes_versions.go @@ -21,6 +21,7 @@ package constants // ValidKubernetesVersions is a list of Kubernetes versions in order from newest to oldest // This is used when outputting Kubernetes versions and to select the latest patch version when unspecified var ValidKubernetesVersions = []string{ + "v1.30.0-alpha.3", "v1.30.0-alpha.2", "v1.30.0-alpha.1", "v1.29.2", From c7e19059e2d4eac6b24d46411b87c98d7e03e6d2 Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Mon, 4 Mar 2024 10:27:47 +0000 Subject: [PATCH 27/41] Addon nvidia-device-plugin: Update nvidia/k8s-device-plugin image from v0.14.4 to v0.14.5 --- pkg/minikube/assets/addons.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index 73ce9fd9172f..405c776cfe56 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -784,7 +784,7 @@ var Addons = map[string]*Addon{ MustBinAsset(addons.NvidiaDevicePlugin, "nvidia-device-plugin/nvidia-device-plugin.yaml.tmpl", vmpath.GuestAddonsDir, "nvidia-device-plugin.yaml", "0640"), }, false, "nvidia-device-plugin", "3rd party (NVIDIA)", "", "", map[string]string{ - "NvidiaDevicePlugin": "nvidia/k8s-device-plugin:v0.14.4@sha256:2388c1f792daf3e810a6b43cdf709047183b50f5ec3ed476fae6aa0a07e68acc", + "NvidiaDevicePlugin": "nvidia/k8s-device-plugin:v0.14.5@sha256:50aa9517d771e3b0ffa7fded8f1e988dba680a7ff5efce162ce31d1b5ec043e2", }, map[string]string{ "NvidiaDevicePlugin": "nvcr.io", }), From 5ddb71fe0a42bb2133c9d6493465817bfdb3ae9e Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Mon, 4 Mar 2024 10:34:05 +0000 Subject: [PATCH 28/41] Addon ingress: Update ingress-nginx/controller image from v1.9.6 to v1.10.0 --- pkg/minikube/assets/addons.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index 73ce9fd9172f..e3b4e4584eb1 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -277,11 +277,11 @@ var Addons = map[string]*Addon{ "0640"), }, false, "ingress", "Kubernetes", "", "https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/", map[string]string{ // https://github.com/kubernetes/ingress-nginx/blob/3476232f5c38383dd157ddaff3b4c7cebd57284e/deploy/static/provider/kind/deploy.yaml#L445 - "IngressController": "ingress-nginx/controller:v1.9.6@sha256:1405cc613bd95b2c6edd8b2a152510ae91c7e62aea4698500d23b2145960ab9c", + "IngressController": "ingress-nginx/controller:v1.10.0@sha256:42b3f0e5d0846876b1791cd3afeb5f1cbbe4259d6f35651dcc1b5c980925379c", // https://github.com/kubernetes/ingress-nginx/blob/3476232f5c38383dd157ddaff3b4c7cebd57284e/deploy/static/provider/kind/deploy.yaml#L552 - "KubeWebhookCertgenCreate": "ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06@sha256:25d6a5f11211cc5c3f9f2bf552b585374af287b4debf693cacbe2da47daa5084", + "KubeWebhookCertgenCreate": "ingress-nginx/kube-webhook-certgen:v1.4.0@sha256:44d1d0e9f19c63f58b380c5fddaca7cf22c7cee564adeff365225a5df5ef3334", // https://github.com/kubernetes/ingress-nginx/blob/3476232f5c38383dd157ddaff3b4c7cebd57284e/deploy/static/provider/kind/deploy.yaml#L601 - "KubeWebhookCertgenPatch": "ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06@sha256:25d6a5f11211cc5c3f9f2bf552b585374af287b4debf693cacbe2da47daa5084", + "KubeWebhookCertgenPatch": "ingress-nginx/kube-webhook-certgen:v1.4.0@sha256:44d1d0e9f19c63f58b380c5fddaca7cf22c7cee564adeff365225a5df5ef3334", }, map[string]string{ "IngressController": "registry.k8s.io", "KubeWebhookCertgenCreate": "registry.k8s.io", @@ -615,7 +615,7 @@ var Addons = map[string]*Addon{ "gcp-auth-webhook.yaml", "0640"), }, false, "gcp-auth", "Google", "", "https://minikube.sigs.k8s.io/docs/handbook/addons/gcp-auth/", map[string]string{ - "KubeWebhookCertgen": "ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06@sha256:25d6a5f11211cc5c3f9f2bf552b585374af287b4debf693cacbe2da47daa5084", + "KubeWebhookCertgen": "ingress-nginx/kube-webhook-certgen:v1.4.0@sha256:44d1d0e9f19c63f58b380c5fddaca7cf22c7cee564adeff365225a5df5ef3334", "GCPAuthWebhook": "k8s-minikube/gcp-auth-webhook:v0.1.1@sha256:01b0de782aa30e7fc91ac5a91b5cc35e95e9679dee7ef07af06457b471f88f32", }, map[string]string{ "GCPAuthWebhook": "gcr.io", From 569c7216d50bf2180926b59eb11afee82960b3d4 Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Mon, 4 Mar 2024 10:34:13 +0000 Subject: [PATCH 29/41] Update go-github from v59.0.0 to v60.0.0 --- cmd/minikube/cmd/config/kubernetes_version.go | 2 +- go.mod | 2 +- go.sum | 4 ++-- hack/preload-images/kubernetes.go | 2 +- hack/update/github.go | 2 +- hack/update/ingress_version/update_ingress_version.go | 2 +- hack/update/kubeadm_constants/update_kubeadm_constants.go | 2 +- .../update_kubernetes_versions_list.go | 2 +- hack/update/site_node_version/update_site_node_version.go | 2 +- pkg/perf/monitor/github.go | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cmd/minikube/cmd/config/kubernetes_version.go b/cmd/minikube/cmd/config/kubernetes_version.go index 7fcb3d45846f..d291af172691 100644 --- a/cmd/minikube/cmd/config/kubernetes_version.go +++ b/cmd/minikube/cmd/config/kubernetes_version.go @@ -20,7 +20,7 @@ import ( "context" "net/http" - "github.com/google/go-github/v59/github" + "github.com/google/go-github/v60/github" "golang.org/x/mod/semver" "k8s.io/minikube/pkg/minikube/constants" ) diff --git a/go.mod b/go.mod index e542dbfe5406..87a52e5fbe23 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/google/go-cmp v0.6.0 github.com/google/go-containerregistry v0.19.0 - github.com/google/go-github/v59 v59.0.0 + github.com/google/go-github/v60 v60.0.0 github.com/google/slowjam v1.1.0 github.com/google/uuid v1.6.0 github.com/hashicorp/go-getter v1.7.3 diff --git a/go.sum b/go.sum index 8ac5f5aa58ca..385f3eb166ca 100644 --- a/go.sum +++ b/go.sum @@ -860,8 +860,8 @@ github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYV github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-github/v59 v59.0.0 h1:7h6bgpF5as0YQLLkEiVqpgtJqjimMYhBkD4jT5aN3VA= -github.com/google/go-github/v59 v59.0.0/go.mod h1:rJU4R0rQHFVFDOkqGWxfLNo6vEk4dv40oDjhV/gH6wM= +github.com/google/go-github/v60 v60.0.0 h1:oLG98PsLauFvvu4D/YPxq374jhSxFYdzQGNCyONLfn8= +github.com/google/go-github/v60 v60.0.0/go.mod h1:ByhX2dP9XT9o/ll2yXAu2VD8l5eNVg8hD4Cr0S/LmQk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= diff --git a/hack/preload-images/kubernetes.go b/hack/preload-images/kubernetes.go index 510e66c9c0bf..52238f1bda90 100644 --- a/hack/preload-images/kubernetes.go +++ b/hack/preload-images/kubernetes.go @@ -20,7 +20,7 @@ import ( "context" "strings" - "github.com/google/go-github/v59/github" + "github.com/google/go-github/v60/github" "k8s.io/klog/v2" ) diff --git a/hack/update/github.go b/hack/update/github.go index 047130697e4e..8238afc93140 100644 --- a/hack/update/github.go +++ b/hack/update/github.go @@ -23,7 +23,7 @@ import ( "golang.org/x/mod/semver" - "github.com/google/go-github/v59/github" + "github.com/google/go-github/v60/github" ) const ( diff --git a/hack/update/ingress_version/update_ingress_version.go b/hack/update/ingress_version/update_ingress_version.go index cd9456f186a0..7aecebb43c6f 100644 --- a/hack/update/ingress_version/update_ingress_version.go +++ b/hack/update/ingress_version/update_ingress_version.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/google/go-github/v59/github" + "github.com/google/go-github/v60/github" "golang.org/x/mod/semver" "k8s.io/klog/v2" diff --git a/hack/update/kubeadm_constants/update_kubeadm_constants.go b/hack/update/kubeadm_constants/update_kubeadm_constants.go index a8b2afa3f1b6..695290373801 100644 --- a/hack/update/kubeadm_constants/update_kubeadm_constants.go +++ b/hack/update/kubeadm_constants/update_kubeadm_constants.go @@ -29,7 +29,7 @@ import ( "text/template" "time" - "github.com/google/go-github/v59/github" + "github.com/google/go-github/v60/github" "golang.org/x/mod/semver" "k8s.io/klog/v2" "k8s.io/minikube/hack/update" diff --git a/hack/update/kubernetes_versions_list/update_kubernetes_versions_list.go b/hack/update/kubernetes_versions_list/update_kubernetes_versions_list.go index 8bc94bdf8279..924a8b842136 100644 --- a/hack/update/kubernetes_versions_list/update_kubernetes_versions_list.go +++ b/hack/update/kubernetes_versions_list/update_kubernetes_versions_list.go @@ -23,7 +23,7 @@ import ( "sort" "time" - "github.com/google/go-github/v59/github" + "github.com/google/go-github/v60/github" "golang.org/x/mod/semver" "k8s.io/klog/v2" "k8s.io/minikube/hack/update" diff --git a/hack/update/site_node_version/update_site_node_version.go b/hack/update/site_node_version/update_site_node_version.go index 739cda8698cd..a8dac550f04a 100644 --- a/hack/update/site_node_version/update_site_node_version.go +++ b/hack/update/site_node_version/update_site_node_version.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/google/go-github/v59/github" + "github.com/google/go-github/v60/github" "golang.org/x/mod/semver" "k8s.io/klog/v2" "k8s.io/minikube/hack/update" diff --git a/pkg/perf/monitor/github.go b/pkg/perf/monitor/github.go index cf8ff3601a7a..608f1ac94985 100644 --- a/pkg/perf/monitor/github.go +++ b/pkg/perf/monitor/github.go @@ -22,7 +22,7 @@ import ( "os" "time" - "github.com/google/go-github/v59/github" + "github.com/google/go-github/v60/github" "github.com/pkg/errors" "golang.org/x/oauth2" ) From 5f6f4cadb78231b716b4cd0649e419ae7baf21ad Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Mon, 4 Mar 2024 10:34:30 +0000 Subject: [PATCH 30/41] Site: Update hugo from v0.123.3 to v0.123.7 --- netlify.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netlify.toml b/netlify.toml index a980a4800183..a6279688b1ce 100644 --- a/netlify.toml +++ b/netlify.toml @@ -5,7 +5,7 @@ command = "pwd && cd themes/docsy && npm install && git submodule update -f --in [build.environment] NODE_VERSION = "20.11.1" -HUGO_VERSION = "v0.123.3" +HUGO_VERSION = "v0.123.7" [context.production.environment] HUGO_ENV = "production" From 1cc0a78cd6dbe2d5e17509520d058a080572904b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 16:54:36 +0000 Subject: [PATCH 31/41] Build(deps): Bump golang.org/x/mod from 0.15.0 to 0.16.0 Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.15.0 to 0.16.0. - [Commits](https://github.com/golang/mod/compare/v0.15.0...v0.16.0) --- updated-dependencies: - dependency-name: golang.org/x/mod dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 87a52e5fbe23..72495f34073f 100644 --- a/go.mod +++ b/go.mod @@ -66,7 +66,7 @@ require ( golang.org/x/build v0.0.0-20190927031335-2835ba2e683f golang.org/x/crypto v0.20.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 - golang.org/x/mod v0.15.0 + golang.org/x/mod v0.16.0 golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.17.0 diff --git a/go.sum b/go.sum index 385f3eb166ca..848654414e15 100644 --- a/go.sum +++ b/go.sum @@ -1785,8 +1785,8 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From e3cf9d50ef027640463421132565a04904ae8dc7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 16:54:39 +0000 Subject: [PATCH 32/41] Build(deps): Bump actions/download-artifact from 4.1.3 to 4.1.4 Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.1.3 to 4.1.4. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/87c55149d96e628cc2ef7e6fc2aab372015aec85...c850b930e6ba138125429b7e5c93fc707a7f8427) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/functional_verified.yml | 4 ++-- .github/workflows/master.yml | 12 ++++++------ .github/workflows/pr.yml | 14 +++++++------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/functional_verified.yml b/.github/workflows/functional_verified.yml index 5e2988a92d7f..8ce5e7e4ec5a 100644 --- a/.github/workflows/functional_verified.yml +++ b/.github/workflows/functional_verified.yml @@ -118,7 +118,7 @@ jobs: hostname || true echo "--------------------------" - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -202,7 +202,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: download all extra reports - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 - name: upload all extra reports shell: bash {0} continue-on-error: true diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index fca2785b8fca..e465774f8c87 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -140,7 +140,7 @@ jobs: run: | go install github.com/medyagh/gopogh/cmd/gopogh@v0.26.0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -238,7 +238,7 @@ jobs: run: | go install github.com/medyagh/gopogh/cmd/gopogh@v0.26.0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -340,7 +340,7 @@ jobs: run: | go install github.com/medyagh/gopogh/cmd/gopogh@v0.26.0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -440,7 +440,7 @@ jobs: sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off sudo /usr/libexec/ApplicationFirewall/socketfilterfw -k - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -545,7 +545,7 @@ jobs: run: | sudo sysctl fs.protected_regular=0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -619,7 +619,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: download all reports - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 - name: upload all reports shell: bash {0} continue-on-error: true diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 79de759981c4..029f85380f77 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -138,7 +138,7 @@ jobs: run: | go install github.com/medyagh/gopogh/cmd/gopogh@v0.26.0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -237,7 +237,7 @@ jobs: run: | go install github.com/medyagh/gopogh/cmd/gopogh@v0.26.0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -354,7 +354,7 @@ jobs: run: | go install github.com/medyagh/gopogh/cmd/gopogh@v0.26.0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -457,7 +457,7 @@ jobs: run: | go install github.com/medyagh/gopogh/cmd/gopogh@v0.26.0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -558,7 +558,7 @@ jobs: sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off sudo /usr/libexec/ApplicationFirewall/socketfilterfw -k - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -664,7 +664,7 @@ jobs: run: | sudo sysctl fs.protected_regular=0 - name: Download Binaries - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 with: name: minikube_binaries path: minikube_binaries @@ -740,7 +740,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: download all reports - uses: actions/download-artifact@87c55149d96e628cc2ef7e6fc2aab372015aec85 + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 - name: upload all reports shell: bash {0} continue-on-error: true From 85fc33520d6d8c304e037f058d0b49c7c63f21e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 16:54:43 +0000 Subject: [PATCH 33/41] Build(deps): Bump libvirt.org/go/libvirt from 1.10000.0 to 1.10001.0 Bumps [libvirt.org/go/libvirt](https://gitlab.com/libvirt/libvirt-go-module) from 1.10000.0 to 1.10001.0. - [Commits](https://gitlab.com/libvirt/libvirt-go-module/compare/v1.10000.0...v1.10001.0) --- updated-dependencies: - dependency-name: libvirt.org/go/libvirt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 87a52e5fbe23..eb1e3b2e2486 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ require ( k8s.io/klog/v2 v2.120.1 k8s.io/kubectl v0.29.2 k8s.io/utils v0.0.0-20230726121419-3b25d923346b - libvirt.org/go/libvirt v1.10000.0 + libvirt.org/go/libvirt v1.10001.0 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 ) diff --git a/go.sum b/go.sum index 385f3eb166ca..64f2d5364370 100644 --- a/go.sum +++ b/go.sum @@ -2563,8 +2563,8 @@ k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -libvirt.org/go/libvirt v1.10000.0 h1:fPVWdvZz8TSmMrTnsStih9ETsHlrzIgSEEiFzOLbhO8= -libvirt.org/go/libvirt v1.10000.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ= +libvirt.org/go/libvirt v1.10001.0 h1:lEVDNE7xfzmZXiDEGIS8NvJSuaz11OjRXw+ufbQEtPY= +libvirt.org/go/libvirt v1.10001.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= From 26aa9b0f8996eed9873d0c3f727df7ffcb590ee9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 16:54:56 +0000 Subject: [PATCH 34/41] Build(deps): Bump cloud.google.com/go/storage from 1.38.0 to 1.39.0 Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.38.0 to 1.39.0. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.38.0...spanner/v1.39.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 87a52e5fbe23..4af1d39b2a7b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module k8s.io/minikube go 1.22 require ( - cloud.google.com/go/storage v1.38.0 + cloud.google.com/go/storage v1.39.0 contrib.go.opencensus.io/exporter/stackdriver v0.13.14 github.com/Delta456/box-cli-maker/v2 v2.3.0 github.com/GoogleCloudPlatform/cloudsql-proxy v1.34.0 @@ -89,10 +89,10 @@ require ( require ( cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.4 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.6 // indirect - cloud.google.com/go/monitoring v1.17.1 // indirect + cloud.google.com/go/monitoring v1.18.0 // indirect cloud.google.com/go/trace v1.10.5 // indirect git.sr.ht/~sbinet/gg v0.5.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -223,8 +223,8 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.16.1 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect google.golang.org/grpc v1.61.1 // indirect google.golang.org/protobuf v1.32.0 // indirect diff --git a/go.sum b/go.sum index 385f3eb166ca..6fe139d10aee 100644 --- a/go.sum +++ b/go.sum @@ -71,8 +71,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= -cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= @@ -129,8 +129,8 @@ cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHi cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/monitoring v1.17.1 h1:xqcNr+JXmFMCPXnent/i1r0De6zrcqzgcMy5X1xa5vg= -cloud.google.com/go/monitoring v1.17.1/go.mod h1:SJzPMakCF0GHOuKEH/r4hxVKF04zl+cRPQyc3d/fqII= +cloud.google.com/go/monitoring v1.18.0 h1:NfkDLQDG2UR3WYZVQE8kwSbUIEyIqJUPl+aOQdFH1T4= +cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= @@ -181,8 +181,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg= -cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY= +cloud.google.com/go/storage v1.39.0 h1:brbjUa4hbDHhpQf48tjqMaXEV+f1OGoaTmQau9tmCsA= +cloud.google.com/go/storage v1.39.0/go.mod h1:OAEj/WZwUYjA3YHQ10/YcN9ttGuEpLwvaoyBXIPikEk= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/trace v1.10.5 h1:0pr4lIKJ5XZFYD9GtxXEWr0KkVeigc3wlGpZco0X1oA= @@ -2360,10 +2360,10 @@ google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqw google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= From 5552167479e90960bfc27f03125f5360d5123d58 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 16:55:09 +0000 Subject: [PATCH 35/41] Build(deps): Bump golang.org/x/sys from 0.17.0 to 0.18.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.17.0 to 0.18.0. - [Commits](https://github.com/golang/sys/compare/v0.17.0...v0.18.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 87a52e5fbe23..eb750d8609d0 100644 --- a/go.mod +++ b/go.mod @@ -69,7 +69,7 @@ require ( golang.org/x/mod v0.15.0 golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.18.0 golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 gonum.org/v1/plot v0.14.0 diff --git a/go.sum b/go.sum index 385f3eb166ca..cf1ee37ed170 100644 --- a/go.sum +++ b/go.sum @@ -2061,8 +2061,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= From 6c28f40cf04b0437aa335807bc358aa8a69d4af8 Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Mon, 4 Mar 2024 17:44:38 +0000 Subject: [PATCH 36/41] Update auto-generated docs and translations --- site/content/en/docs/contrib/tests.en.md | 9 --------- 1 file changed, 9 deletions(-) diff --git a/site/content/en/docs/contrib/tests.en.md b/site/content/en/docs/contrib/tests.en.md index daf84afdfaec..7b89e6f8f38d 100644 --- a/site/content/en/docs/contrib/tests.en.md +++ b/site/content/en/docs/contrib/tests.en.md @@ -487,15 +487,6 @@ is a test case building with --build-env #### validateImageBuildWithDockerIgnore is a test case building with .dockerignore -## TestIngressAddonLegacy -tests ingress and ingress-dns addons with legacy k8s version <1.19 - -#### validateIngressAddonActivation -tests ingress addon activation - -#### validateIngressDNSAddonActivation -tests ingress-dns addon activation - ## TestJSONOutput makes sure json output works properly for the start, pause, unpause, and stop commands From caf3af88c11943ffcf82a85b16aaf03e60586ee6 Mon Sep 17 00:00:00 2001 From: Steven Powell Date: Mon, 4 Mar 2024 09:50:55 -0800 Subject: [PATCH 37/41] CI: Fix cni-plugins path --- hack/update/get_version/get_version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/update/get_version/get_version.go b/hack/update/get_version/get_version.go index eb72c405ad94..25dcbf127a2f 100644 --- a/hack/update/get_version/get_version.go +++ b/hack/update/get_version/get_version.go @@ -36,7 +36,7 @@ var dependencies = map[string]dependency{ "buildkit": {"deploy/iso/minikube-iso/arch/x86_64/package/buildkit-bin/buildkit-bin.mk", `BUILDKIT_BIN_VERSION = (.*)`}, "calico": {"pkg/minikube/bootstrapper/images/images.go", `calicoVersion = "(.*)"`}, "cloud-spanner": {addonsFile, `cloud-spanner-emulator/emulator:(.*)@`}, - "cni-plugins": {"deploy/iso/minikube-iso/arch/x86_64/package/cni-plugins/cni-plugins-latest.mk", `CNI_PLUGINS_VERSION = (.*)`}, + "cni-plugins": {"deploy/iso/minikube-iso/arch/x86_64/package/cni-plugins-latest/cni-plugins-latest.mk", `CNI_PLUGINS_VERSION = (.*)`}, "containerd": {"deploy/iso/minikube-iso/arch/x86_64/package/containerd-bin/containerd-bin.mk", `CONTAINERD_BIN_VERSION = (.*)`}, "cri-dockerd": {dockerfile, `CRI_DOCKERD_VERSION="(.*)"`}, "cri-o": {"deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk", `CRIO_BIN_VERSION = (.*)`}, From 9ba0c36c34299b9a62817154418e6804021a2cc1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 17:57:36 +0000 Subject: [PATCH 38/41] Build(deps): Bump github.com/shirou/gopsutil/v3 from 3.24.1 to 3.24.2 Bumps [github.com/shirou/gopsutil/v3](https://github.com/shirou/gopsutil) from 3.24.1 to 3.24.2. - [Release notes](https://github.com/shirou/gopsutil/releases) - [Commits](https://github.com/shirou/gopsutil/compare/v3.24.1...v3.24.2) --- updated-dependencies: - dependency-name: github.com/shirou/gopsutil/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 2d9f5ed4d09a..abbb362a3a6f 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( github.com/pkg/profile v1.7.0 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 - github.com/shirou/gopsutil/v3 v3.24.1 + github.com/shirou/gopsutil/v3 v3.24.2 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 @@ -211,7 +211,7 @@ require ( github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.3 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 // indirect diff --git a/go.sum b/go.sum index 14c35036c592..c5a8d0aa47f5 100644 --- a/go.sum +++ b/go.sum @@ -1464,8 +1464,8 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= -github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= +github.com/shirou/gopsutil/v3 v3.24.2 h1:kcR0erMbLg5/3LcInpw0X/rrPSqq4CDPyI6A6ZRC18Y= +github.com/shirou/gopsutil/v3 v3.24.2/go.mod h1:tSg/594BcA+8UdQU2XcW803GWYgdtauFFPgJCJKZlVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -1605,8 +1605,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -2060,7 +2060,7 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= From 9933b8907983b3b1bee2997b71c8c80f9c76595a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 18:02:36 +0000 Subject: [PATCH 39/41] Build(deps): Bump golang.org/x/term from 0.17.0 to 0.18.0 Bumps [golang.org/x/term](https://github.com/golang/term) from 0.17.0 to 0.18.0. - [Commits](https://github.com/golang/term/compare/v0.17.0...v0.18.0) --- updated-dependencies: - dependency-name: golang.org/x/term dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 57b5173bf554..90d739dc0863 100644 --- a/go.mod +++ b/go.mod @@ -70,7 +70,7 @@ require ( golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.18.0 - golang.org/x/term v0.17.0 + golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 gonum.org/v1/plot v0.14.0 google.golang.org/api v0.167.0 diff --git a/go.sum b/go.sum index 3fe55b6e84f8..f7b36a090e26 100644 --- a/go.sum +++ b/go.sum @@ -2071,8 +2071,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.0.0-20221017184919-83659145692c/go.mod h1:VTIZ7TEbF0BS9Sv9lPTvGbtW8i4z6GGbJBCM37uMCzY= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 8c4e1a0f8b9318dd26c2bbdca1104d823a34cf29 Mon Sep 17 00:00:00 2001 From: Predrag Rogic Date: Tue, 5 Mar 2024 01:16:08 +0000 Subject: [PATCH 40/41] workaround for cp bug 63245 and rename TestHA to TestMutliControlPlane --- pkg/minikube/machine/machine.go | 4 ++-- test/integration/ha_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/machine/machine.go b/pkg/minikube/machine/machine.go index 92329914e042..d006e4464464 100644 --- a/pkg/minikube/machine/machine.go +++ b/pkg/minikube/machine/machine.go @@ -168,7 +168,7 @@ func backup(h host.Host, files []string) error { errs := []error{} for _, src := range []string{"/etc/cni", "/etc/kubernetes"} { - if _, err := r.RunCmd(exec.Command("sudo", "cp", "--archive", "--parents", "--force", src, vmpath.GuestBackupDir)); err != nil { + if _, err := r.RunCmd(exec.Command("sudo", "rsync", "--archive", "--relative", src, vmpath.GuestBackupDir)); err != nil { errs = append(errs, errors.Errorf("failed to copy %q to %q (will continue): %v", src, vmpath.GuestBackupDir, err)) } } @@ -203,7 +203,7 @@ func restore(h host.Host) error { continue } src := path.Join(vmpath.GuestBackupDir, dst) - if _, err := r.RunCmd(exec.Command("sudo", "cp", "--archive", "--update", "--force", src, "/")); err != nil { + if _, err := r.RunCmd(exec.Command("sudo", "rsync", "--archive", "--update", src, "/")); err != nil { errs = append(errs, errors.Errorf("failed to copy %q to %q (will continue): %v", src, dst, err)) } } diff --git a/test/integration/ha_test.go b/test/integration/ha_test.go index 2b37b9858c6d..cdae834d97b4 100644 --- a/test/integration/ha_test.go +++ b/test/integration/ha_test.go @@ -35,8 +35,8 @@ import ( "k8s.io/minikube/pkg/util/retry" ) -// TestHA tests all ha (multi-control plane) cluster functionality -func TestHA(t *testing.T) { +// TestMutliControlPlane tests all ha (multi-control plane) cluster functionality +func TestMutliControlPlane(t *testing.T) { if NoneDriver() { t.Skip("none driver does not support multinode/ha(multi-control plane) cluster") } From 4069a7bee5bc02e7024a423d450602760b38bc6d Mon Sep 17 00:00:00 2001 From: minikube-bot Date: Wed, 6 Mar 2024 20:03:38 +0000 Subject: [PATCH 41/41] Update auto-generated docs and translations --- site/content/en/docs/commands/node.md | 4 +- site/content/en/docs/commands/start.md | 3 +- site/content/en/docs/contrib/tests.en.md | 55 +++++++++++++++++++++- translations/de.json | 35 ++++++++++++++ translations/es.json | 57 ++++++++++++++--------- translations/fr.json | 35 ++++++++++++++ translations/ja.json | 35 ++++++++++++++ translations/ko.json | 52 ++++++++++++++------- translations/pl.json | 57 ++++++++++++++--------- translations/ru.json | 58 ++++++++++++++--------- translations/strings.txt | 59 ++++++++++++++---------- translations/zh-CN.json | 44 ++++++++++++++---- 12 files changed, 373 insertions(+), 121 deletions(-) diff --git a/site/content/en/docs/commands/node.md b/site/content/en/docs/commands/node.md index b8dab281681d..cd6c432ece0a 100644 --- a/site/content/en/docs/commands/node.md +++ b/site/content/en/docs/commands/node.md @@ -56,9 +56,9 @@ minikube node add [flags] ### Options ``` - --control-plane This flag is currently unsupported. + --control-plane If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters. --delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false. - --worker If true, the added node will be marked for work. Defaults to true. (default true) + --worker If set, added node will be available as worker. Defaults to true. (default true) ``` ### Options inherited from parent commands diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index 7066002b7e6a..d33c63321a2b 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -58,6 +58,7 @@ minikube start [flags] --force Force minikube to perform possibly dangerous operations --force-systemd If set, force the container runtime to use systemd as cgroup manager. Defaults to false. -g, --gpus string Allow pods to use your NVIDIA GPUs. Options include: [all,nvidia] (Docker driver with Docker container-runtime only) + --ha Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work. --host-dns-resolver Enable host resolver for NAT DNS requests (virtualbox driver only) (default true) --host-only-cidr string The CIDR to be used for the minikube VM (virtualbox driver only) (default "192.168.59.1/24") --host-only-nic-type string NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio") @@ -100,7 +101,7 @@ minikube start [flags] --nfs-shares-root string Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only) (default "/nfsshares") --no-kubernetes If set, minikube VM/container will start without starting or configuring Kubernetes. (only works on new clusters) --no-vtx-check Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only) - -n, --nodes int The number of nodes to spin up. Defaults to 1. (default 1) + -n, --nodes int The total number of nodes to spin up. Defaults to 1. (default 1) -o, --output string Format to print stdout in. Options include: [text,json] (default "text") --ports strings List of ports that should be exposed (docker and podman driver only) --preload If set, download tarball of preloaded images if available to improve start time. Defaults to true. (default true) diff --git a/site/content/en/docs/contrib/tests.en.md b/site/content/en/docs/contrib/tests.en.md index 7b89e6f8f38d..a24c814c42a2 100644 --- a/site/content/en/docs/contrib/tests.en.md +++ b/site/content/en/docs/contrib/tests.en.md @@ -466,6 +466,59 @@ verifies files and packages installed inside minikube ISO/Base image ## TestGvisorAddon tests the functionality of the gVisor addon +## TestMutliControlPlane +tests all ha (multi-control plane) cluster functionality + +#### validateHAStartCluster +ensures ha (multi-control plane) cluster can start. + +#### validateHADeployApp +deploys an app to ha (multi-control plane) cluster and ensures all nodes can serve traffic. + +#### validateHAPingHostFromPods +uses app previously deplyed by validateDeployAppToHACluster to verify its pods, located on different nodes, can resolve "host.minikube.internal". + +#### validateHAAddWorkerNode +uses the minikube node add command to add a worker node to an existing ha (multi-control plane) cluster. + +#### validateHANodeLabels +check if all node labels were configured correctly. + +Steps: +- Get the node labels from the cluster with `kubectl get nodes` +- check if all node labels matches with the expected Minikube labels: `minikube.k8s.io/*` + +#### validateHAStatusHAppy +ensures minikube profile list outputs correct with ha (multi-control plane) clusters. + +#### validateHACopyFile +ensures minikube cp works with ha (multi-control plane) clusters. + +#### validateHAStopSecondaryNode +tests ha (multi-control plane) cluster by stopping a secondary control-plane node using minikube node stop command. + +#### validateHAStatusDegraded +ensures minikube profile list outputs correct with ha (multi-control plane) clusters. + +#### validateHARestartSecondaryNode +tests the minikube node start command on existing stopped secondary node. + +#### validateHARestartClusterKeepsNodes +restarts minikube cluster and checks if the reported node list is unchanged. + +#### validateHADeleteSecondaryNode +tests the minikube node delete command on secondary control-plane. +note: currently, 'minikube status' subcommand relies on primary control-plane node and storage-provisioner only runs on a primary control-plane node. + +#### validateHAStopCluster +runs minikube stop on a ha (multi-control plane) cluster. + +#### validateHARestartCluster +verifies a soft restart on a ha (multi-control plane) cluster works. + +#### validateHAAddSecondaryNode +uses the minikube node add command to add a secondary control-plane node to an existing ha (multi-control plane) cluster. + ## TestImageBuild makes sure the 'minikube image build' command works fine @@ -544,7 +597,7 @@ uses the minikube node add command to add a node to an existing cluster make sure minikube profile list outputs correct with multinode clusters #### validateCopyFileWithMultiNode -validateProfileListWithMultiNode make sure minikube profile list outputs correct with multinode clusters +make sure minikube cp works with multinode clusters. #### validateMultiNodeLabels check if all node labels were configured correctly diff --git a/translations/de.json b/translations/de.json index 0246b4c93152..c4fb3880700c 100644 --- a/translations/de.json +++ b/translations/de.json @@ -55,7 +55,9 @@ "Add, delete, or push a local image into minikube": "Lokales Image zu Minikube hinzufügen, löschen oder pushen", "Add, remove, or list additional nodes": "Hinzufügen, Löschen oder auflisten von zusätzlichen Nodes", "Adding a control-plane node is not yet supported, setting control-plane flag to false": "Das Hinzufügen eines Control-Plane Nodes wird derzeit noch nicht unterstützt, setze control-plane Parameter auf 'false'", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Adding node {{.name}} to cluster {{.cluster}}": "Node {{.name}} zu Cluster {{.cluster}} hinzufügen", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "Weitere Hilfe-Themen", "Adds a node to the given cluster config, and starts it.": "Fügt einen Node zur angegebenen Cluster-Konfiguration hinzu und startet es.", "Adds a node to the given cluster.": "Fügt einen Node zum angegebenen Cluster hinzu.", @@ -101,6 +103,8 @@ "Cannot use both --output and --format options": "--output und --format können nicht gleichzeitig verwendet werden", "Cannot use the option --no-kubernetes on the {{.name}} driver": "Die Option --no-kubernetes kann nicht mit dem {{.name}} Treiber verwendet werden", "Certificate {{.certPath}} has expired. Generating a new one...": "Das Zertifikat {{.certPath}} ist ausgelaufen. Generiere ein neues...", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "Prüfen Sie, ob sie unnötige PODs laufen haben, indem Sie folgenden Befehl ausführen: 'kubectl get po -A", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "Prüfen Sie die Ausgabe von 'journalctl -xeu kubelet', versuchen Sie --extra-config=kubelet.cgroup-driver=systemd beim Starten von Minikube zu verwenden", "Check that libvirt is setup properly": "Prüfen Sie, ob libvirt korrekt eingerichtet wurde", @@ -134,6 +138,7 @@ "Could not process errors from failed deletion": "Konnte die Fehler der fehlgeschlagenen Löschung nicht verarbeiten", "Could not resolve IP address": "Konnte IP-Adresse nicht auflösen", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Ländercode des zu verwendenden Image Mirror. Lassen Sie dieses Feld leer, um den globalen zu verwenden. Nutzer vom chinesischen Festland stellen cn ein.", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Creating mount {{.name}} ...": "Bereitstellung {{.name}} wird erstellt...", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) ...": "Erstelle {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Speicher={{.memory_size}}MB) ...", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Erstelle {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Speicher={{.memory_size}}MB, Disk={{.disk_size}}MB ...", @@ -223,6 +228,7 @@ "Error generating unset output": "Fehler beim Generieren der unset-Ausgabe", "Error getting cluster bootstrapper": "Fehler beim Holen des Cluster Bootstrapper", "Error getting cluster config": "Fehler beim Holen der Cluster Konfiguration", + "Error getting control-plane node": "", "Error getting host": "Fehler beim Holen des Hosts", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "Fehler beim Binden des Ports für den Treiber {{.driver_name}}: {{.error}}", "Error getting primary control plane": "Fehler beim Holen der primären Kontroll Ebene (primary control plane)", @@ -349,6 +355,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "Go Template Format String für die Ausgabe der Konfigurations-Ansicht Ausgabe. Das Format von Go Templates ist hier beschrieben: https://pkg.go.dev/text/template\nFür eine Liste der im Template verfügbaren Variablen, kann man die struct Werte hier einsehen: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "Go Template Format String für die Status Ausgabe. Das Format von Go Templates ist hier beschrieben: https://pkg.go.dev/text/template\nFür eine Liste der im Template verfügbaren Variablen, kann man die struct Werte hier einsehen: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status", "Group ID: {{.groupID}}": "Gruppen ID: {{.groupID}}", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\nminikube{{.profileArg}} addons enable metrics-server\t\n\n": "Headlamp kann detailiertere Informationen anzeigen, wenn der Metrics-Server installiert ist. Um ihn zu installieren, führen Sie folgenden Befehl aus:\n\nminikube{{.profileArg}} addons enable metrics-server\t\n\n", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "Hypervisor-Signatur vor dem Gast in minikube verbergen (nur kvm2-Treiber)", @@ -358,6 +365,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "IP Adresse, die benutzt werden soll um Ports zu exponieren (nur docker und podman Treiber)", "IP address (ssh driver only)": "IP Adresse (nur für den SSH-Treiber)", "If present, writes to the provided file instead of stdout.": "Falls gesetzt, wird in die angegebene Datei geschrieben anstatt auf stdout.", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "Falls gesetzt, werden alle Treiber automatisch auf die aktuellste Version geupdated. Default: true", "If set, delete the current cluster if start fails and try again. Defaults to false.": "Falls gesetzt, lösche den Cluster wenn der Start fehlschlägt und versuche erneut zu starten. Default: false", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "Falls gesetzt, werden Metric Reports (CPU und Speicher Verwendung) deaktiviert, dies kann die Verwendung der CPU verbessern. Default: false.", @@ -459,6 +468,8 @@ "Networking and Connectivity Commands:": "Netzwerk- und Verbindungs-Befehle:", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "Es wurde keine IP-Addresse angegeben. Verwernden Sie --ssh-ip-address oder lesen Sie https://minikube.sigs.k8s.io/docs/drivers/ssh/", "No changes required for the \"{{.context}}\" context": "Keine Anpassungen erforderlich für den Kontext \"{{.context}}\"", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No minikube profile was found. ": "Kein Minikube Profil gefunden. ", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "Kein möglicher Treiber gefunden. Versuchen Sie mit --driver anzugeben oder schauen Sie unter https://minikube.sigs.k8s.io/docs/start/", "No such addon {{.name}}": "Addon {{.name}} existiert nicht", @@ -655,6 +666,7 @@ "Specify arbitrary flags to pass to the build. (format: key=value)": "Spezifiziere arbiträre Flags an, die an den Build übergeben werden sollen. (Format: key=value)", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "Das Spezifizieren von extra Disks ist derzeit nur von den folgenden Treibern unterstützt: {{.supported_drivers}}. Wenn du dieses Feature beisteuern kannst, erstelle bitte einen PR.", "StartHost failed, but will try again: {{.error}}": "StartHost fehlgeschlagen, aber es wird noch einmal versucht: {{.error}}", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting control plane node {{.name}} in cluster {{.cluster}}": "Starte Control Plane Node {{.name}} in Cluster {{.cluster}}", "Starting minikube without Kubernetes in cluster {{.cluster}}": "Starte Minikube ohne Kubernetes in Cluster {{.cluster}}", "Starting minikube without Kubernetes {{.name}} in cluster {{.cluster}}": "Starte Minikube ohne Kubernetes {{.name}} in Cluster {{.cluster}}", @@ -733,6 +745,14 @@ "The control plane node \"{{.name}}\" does not exist.": "Die Kontroll-Ebene für \"{{.name}}\" existiert nicht.", "The control plane node is not running (state={{.state}})": "Der Kontroll-Ebenen-Node läuft nicht (state={{.state}})", "The control plane node must be running for this command": "Der Kontroll-Ebenen-Node muss für diesen Befehl laufen", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used": "Der zu verwendende Cri-Socket-Pfad", "The cri socket path to be used.": "Der zu verwendende Cri-Socket-Pfad.", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "Der docker-env Befehl ist inkompatibel mit multi-node Clustern. Bitte verwende das 'registry' Addon: https://minikube.sigs.k8s.io/docs/handbook/registry/", @@ -781,6 +801,7 @@ "The services namespace": "Der Namespace des Service", "The socket_vmnet network is only supported on macOS": "Das socket_vmnet Netzwerk wird nur unter macOS unterstützt.", "The time interval for each check that wait performs in seconds": "Der Zeitintervall für jeden Check, den wait ausführt, in Sekunden", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "Der mit --format angegebene Wert ist ungültig", "The value passed to --format is invalid: {{.error}}": "Der mit --format angegebene Wert ist ungültig: {{.error}}", "The {{.driver_name}} driver should not be used with root privileges.": "Der Treiber {{.driver_name}} sollte nicht mit Root-Rechten verwendet werden.", @@ -832,6 +853,7 @@ "Unable to detect the latest patch release for specified major.minor version v{{.majorminor}}": "Kann das letzte Release Patch für die angegebene major.minor Version v{{.majorminor}} nicht erkennen.", "Unable to enable dashboard": "Kann Dashboard nicht aktivieren", "Unable to fetch latest version info": "Kann aktuellste Versions-Info nicht laden", + "Unable to find any control-plane nodes": "", "Unable to find control plane": "Kann Kontroll-Ebene nicht finden", "Unable to generate docs": "Kann Dokumente nicht generieren", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "Kann Dokumentation nicht genieren. Stellen Sie sicher, dass der angegebene Pfad ein Verzeichnis ist, existiert und es geschrieben werden kann (Schreibrechte)", @@ -839,6 +861,14 @@ "Unable to get bootstrapper: {{.error}}": "Bootstrapper kann nicht abgerufen werden: {{.error}}", "Unable to get command runner": "Kann Command Runner nicht holen", "Unable to get control plane status: {{.error}}": "Kann Kontroll-Ebene Status nicht holen: {{.error}}", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "Kann aktuellen Benutzer nicht holen", "Unable to get forwarded endpoint": "Kann weitergeleiteten Endpoint nicht laden", "Unable to get machine status": "Kann Maschinen Status nicht holen", @@ -848,6 +878,8 @@ "Unable to load cached images from config file.": "Zwischengespeicherte Bilder können nicht aus der Konfigurationsdatei geladen werden.", "Unable to load cached images: {{.error}}": "Kann gecachete Images nicht laden: {{.error}}", "Unable to load config: {{.error}}": "Konfig kann nicht geladen werden: {{.error}}", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load host": "Kann Host nicht laden", "Unable to load profile: {{.error}}": "Kann Profil nicht laden: {{.error}}", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "\"{{.kubernetes_version}}\" kann nicht geparst werden: {{.error}}", @@ -858,6 +890,7 @@ "Unable to push cached images: {{.error}}": "Kann gecachete Image nicht veröffentlichen (push): {{.error}}", "Unable to remove machine directory": "Kann Maschinen Verzeichnis nicht entfernen", "Unable to restart cluster, will reset it: {{.error}}": "Kann den Cluster nicht neustarten, werde ihn zurücksetzen (reset): {{.error}}", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "Kann existierenden Kubernetes v{{.old}} Cluster nicht auf Version v{{.new}} downgraden", "Unable to stop VM": "Kann VM nicht stoppen", "Unable to update {{.driver}} driver: {{.error}}": "Kann Treiber {{.driver}} nicht aktualisieren: {{.error}}", @@ -944,6 +977,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "Die Anzahl der CPUs eines existierenden Minikube Clusters kann nicht geändert werden. Bitte löschen Sie den Cluster zuerst.", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "Die Plattengröße eines existierenden Minikube Clusters kann nicht geändert werden. Bitte löschen Sie den Cluster zuerst.", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "Die Speichergröße eines existierenden Minikube Clusters kann nicht geändert werden. Bitte löschen Sie den Cluster zuerst.", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "Es ist nicht möglich die statische IP eines existierenden Clusters zu ändern. Bitte löschen Sie den Cluster zuerst.", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "Sie können keine Addons in einem Cluster ohne Kubernetes aktivieren. Um Kubernetes in ihrem Cluster zu verwende, starten sie: minikube start --kubernetes-version=stable", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "Sie haben sich mit einem Service-Account authentifiziert, welcher keine JSON-Datei zugeordnet ist. Das GCP Auth Addon benötigt Zugangsdaten in einer JSON Datei um weitermachen zu können.", @@ -991,6 +1025,7 @@ "error creating clientset": "Fehler beim Anlegen des Clientsets", "error creating urls": "Fehler beim Erstellen der URLs", "error fetching Kubernetes version list from GitHub": "Fehler beim Laden der Kubernetes Versionliste von GitHub", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "Fehler beim Ermitteln der Default-Einstellungen: {{.error}}", "error getting primary control plane": "Fehler beim Ermitteln der primären Kontroll-Ebene", "error getting ssh port": "Fehler beim Ermitteln des ssh Ports", diff --git a/translations/es.json b/translations/es.json index fa4aca02730b..2b47ced89517 100644 --- a/translations/es.json +++ b/translations/es.json @@ -55,8 +55,9 @@ "Add machine IP to NO_PROXY environment variable": "Agregar una IP de máquina a la variable de entorno NO_PROXY", "Add, delete, or push a local image into minikube": "Agrega, elimina, o empuja una imagen local dentro de minikube, haciendo (add, delete, push) respectivamente.", "Add, remove, or list additional nodes": "Usa (add, remove, list) para agregar, eliminar o listar nodos adicionales.", - "Adding a control-plane node is not yet supported, setting control-plane flag to false": "", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Adding node {{.name}} to cluster {{.cluster}}": "Agregando el nodo {{.name}} al cluster {{.cluster}}.", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "Temas de ayuda adicionales", "Additional mount options, such as cache=fscache": "Opciones de montaje adicionales, por ejemplo cache=fscache", "Adds a node to the given cluster config, and starts it.": "Agrega un nodo a la configuración de cluster dada e iniciarlo.", @@ -102,6 +103,8 @@ "Cannot use both --output and --format options": "No se pueden usar ambas opciones (--output y --path)", "Cannot use the option --no-kubernetes on the {{.name}} driver": "", "Certificate {{.certPath}} has expired. Generating a new one...": "", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "Comprueba si tienes pods innecesarios corriendo, con el comando 'kubectl get pods -A'", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "Comprueba la salida de 'journalctl -xeu kubelet', intenta pasar --extra-config=kubelet.cgroup-driver=systemd a minikube start", "Check that libvirt is setup properly": "Comprueba que libvirt esté configurado correctamente", @@ -135,6 +138,7 @@ "Could not process errors from failed deletion": "No se pudieron procesar los errores de la eliminación fallida", "Could not resolve IP address": "No se puede resolver la dirección IP", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Código de país de la réplica de imagen que quieras utilizar. Déjalo en blanco para usar el valor global. Los usuarios de China continental deben definirlo como cn.", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Creating mount {{.name}} ...": "Montando {{.name}}...", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) ...": "Creando {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) ...", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Creando {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...", @@ -230,6 +234,7 @@ "Error generating unset output": "No se a podido unsetear la salida", "Error getting cluster bootstrapper": "No se ha podido obtener el bootstrapper del clúster", "Error getting cluster config": "No se a podido obtener la configuración del clúster", + "Error getting control-plane node": "", "Error getting host": "No se ha podido obtener el host", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "No se ha podido obtener el puerto de enlace para el controlador '{{.driver_name}}': {{.error}} ", "Error getting primary control plane": "No se ha podido obtener el control plane primario", @@ -351,6 +356,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "", "Group ID: {{.groupID}}": "", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "Permite ocultar la firma del hipervisor al invitado en minikube (solo con el controlador de kvm2)", "Hyper-V requires that memory MB be an even number, {{.memory}}MB was specified, try passing `--memory {{.suggestMemory}}`": "", @@ -359,6 +365,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "", "IP address (ssh driver only)": "", "If present, writes to the provided file instead of stdout.": "", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", "If set, delete the current cluster if start fails and try again. Defaults to false.": "", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "", @@ -377,7 +385,6 @@ "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", - "If true, the added node will be marked for work. Defaults to true.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If you are running minikube within a VM, consider using --driver=none:": "", "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:": "", @@ -459,7 +466,8 @@ "Networking and Connectivity Commands:": "", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "", "No changes required for the \"{{.context}}\" context": "", - "No minikube profile was found. ": "", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "No such addon {{.name}}": "", "No valid URL found for tunnel.": "", @@ -648,10 +656,9 @@ "Specify arbitrary flags to pass to the build. (format: key=value)": "", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "", "StartHost failed, but will try again: {{.error}}": "", - "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting minikube without Kubernetes in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", - "Starting worker node {{.name}} in cluster {{.cluster}}": "", "Starts a local Kubernetes cluster": "", "Starts a local kubernetes cluster": "Inicia un clúster de Kubernetes local", "Starts a node.": "", @@ -717,12 +724,15 @@ "The certificate hostname provided appears to be invalid (may be a minikube bug, try 'minikube delete')": "", "The cluster dns domain name used in the Kubernetes cluster": "", "The cluster dns domain name used in the kubernetes cluster": "El nombre de dominio de DNS del clúster de Kubernetes", - "The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use \"minikube node add\" to add nodes to an existing cluster.": "", "The container runtime to be used (docker, crio, containerd)": "El entorno de ejecución del contenedor (Docker, cri-o, containerd)", - "The control plane for \"{{.name}}\" is paused!": "", - "The control plane node \"{{.name}}\" does not exist.": "", - "The control plane node is not running (state={{.state}})": "", - "The control plane node must be running for this command": "", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used": "La ruta del socket de cri", "The cri socket path to be used.": "", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "", @@ -756,7 +766,6 @@ "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\t\t\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "", "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\t\t\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "", "The none driver with Kubernetes v1.24+ requires containernetworking-plugins.\n\n\t\tPlease install containernetworking-plugins using these instructions:\n\n\t\thttps://minikube.sigs.k8s.io/docs/faq/#how-do-i-install-containernetworking-plugins-for-none-driver": "", - "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", "The path on the file system where the error code docs in markdown need to be saved": "", @@ -770,6 +779,7 @@ "The services namespace": "", "The socket_vmnet network is only supported on macOS": "", "The time interval for each check that wait performs in seconds": "", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "", "The value passed to --format is invalid: {{.error}}": "", "The {{.driver_name}} driver should not be used with root privileges.": "El controlador {{.driver_name}} no se debe utilizar con privilegios de raíz.", @@ -781,11 +791,8 @@ "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "El proceso se puede automatizar si se define la variable de entorno CHANGE_MINIKUBE_NONE_USER=true", "This cluster was created before minikube v1.26.0 and doesn't have cri-docker installed. Please run 'minikube delete' and then start minikube again": "", - "This control plane is not running! (state={{.state}})": "", "This driver does not yet work on your architecture. Maybe try --driver=none": "", - "This flag is currently unsupported.": "", "This is a known issue with BTRFS storage driver, there is a workaround, please checkout the issue on GitHub": "", - "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "Se conservará el contexto de kubectl actual y se creará uno de minikube.", "This will start the mount daemon and automatically mount files into minikube": "Se iniciará el daemon de activación y se activarán automáticamente los archivos en minikube", "This will start the mount daemon and automatically mount files into minikube.": "", @@ -820,23 +827,28 @@ "Unable to detect the latest patch release for specified major.minor version v{{.majorminor}}": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", - "Unable to find control plane": "", + "Unable to find any control-plane nodes": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get CPU info: {{.err}}": "", "Unable to get bootstrapper: {{.error}}": "No se ha podido obtener el programa previo: {{.error}}", - "Unable to get command runner": "", - "Unable to get control plane status: {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "", - "Unable to get forwarded endpoint": "", - "Unable to get machine status": "", "Unable to get runtime": "", "Unable to kill mount process: {{.error}}": "", "Unable to list profiles: {{.error}}": "", "Unable to load cached images from config file.": "No se han podido cargar las imágenes almacenadas en caché del archivo de configuración.", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "No se ha podido cargar la configuración: {{.error}}", - "Unable to load host": "", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load profile: {{.error}}": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "No se ha podido analizar la versión \"{{.kubernetes_version}}\": {{.error}}", "Unable to parse memory '{{.memory}}': {{.error}}": "", @@ -845,7 +857,7 @@ "Unable to pull images, which may be OK: {{.error}}": "No se ha podido recuperar imágenes, que podrían estar en buen estado: {{.error}}", "Unable to push cached images: {{.error}}": "", "Unable to remove machine directory": "", - "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", @@ -931,6 +943,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "", @@ -975,8 +988,8 @@ "error creating clientset": "", "error creating urls": "", "error fetching Kubernetes version list from GitHub": "", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "", - "error getting primary control plane": "", "error getting ssh port": "", "error initializing tracing: {{.Error}}": "", "error parsing the input ip address for mount": "", diff --git a/translations/fr.json b/translations/fr.json index c31b9fd1047a..87a87b5bb0a7 100644 --- a/translations/fr.json +++ b/translations/fr.json @@ -57,7 +57,9 @@ "Add, delete, or push a local image into minikube": "Ajouter, supprimer ou pousser une image locale dans minikube", "Add, remove, or list additional nodes": "Ajouter, supprimer ou lister des nœuds supplémentaires", "Adding a control-plane node is not yet supported, setting control-plane flag to false": "L'ajout d'un nœud de plan de contrôle n'est pas encore pris en charge, définition de l'indicateur control-plane à false", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Adding node {{.name}} to cluster {{.cluster}}": "Ajout du nœud {{.name}} au cluster {{.cluster}}", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "Rubriques d'aide supplémentaires", "Additional mount options, such as cache=fscache": "Options de montage supplémentaires, telles que cache=fscache", "Adds a node to the given cluster config, and starts it.": "Ajoute un nœud à la configuration du cluster et démarre le cluster.", @@ -103,6 +105,8 @@ "Cannot use both --output and --format options": "Impossible d'utiliser à la fois les options --output et --format", "Cannot use the option --no-kubernetes on the {{.name}} driver": "Impossible d'utiliser l'option --no-kubernetes sur le pilote {{.name}}", "Certificate {{.certPath}} has expired. Generating a new one...": "Le certificat {{.certPath}} a expiré. Génération d'un nouveau...", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "Vérifiez si vous avez des pods inutiles en cours d'exécution en exécutant 'kubectl get po -A'", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "Vérifiez la sortie de 'journalctl -xeu kubelet', essayez de passer --extra-config=kubelet.cgroup-driver=systemd au démarrage de minikube", "Check that libvirt is setup properly": "Vérifiez que libvirt est correctement configuré", @@ -138,6 +142,7 @@ "Could not process errors from failed deletion": "Impossible de traiter les erreurs dues à l'échec de la suppression", "Could not resolve IP address": "Impossible de résoudre l'adresse IP", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "Code pays du miroir d'images à utiliser. Laissez ce paramètre vide pour utiliser le miroir international. Pour les utilisateurs situés en Chine continentale, définissez sa valeur sur \"cn\".", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Creating mount {{.name}} ...": "Création de l'installation {{.name}}…", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) ...": "Création de {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}Mo) ...", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "Création de {{.machine_type}} {{.driver_name}} (CPUs={{.number_of_cpus}}, Mémoire={{.memory_size}}MB, Disque={{.disk_size}}MB)...", @@ -224,6 +229,7 @@ "Error generating unset output": "Erreur lors de la génération unset output", "Error getting cluster bootstrapper": "Erreur lors de l'obtention du programme d'amorçage du cluster", "Error getting cluster config": "Erreur lors de l'obtention de la configuration du cluster", + "Error getting control-plane node": "", "Error getting host": "Erreur lors de l'obtention de l'hôte", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "Erreur lors de l'obtention de la liaison de port pour le pilote '{{.driver_name}} : {{.error}}", "Error getting primary control plane": "Erreur lors de l'obtention du plan de contrôle principal", @@ -343,6 +349,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "Go chaîne de format de modèle pour la sortie de la vue de configuration. Le format des modèles Go peut être trouvé ici : https://pkg.go.dev/text/template\nPour la liste des variables accessibles pour le modèle, voir les valeurs de structure ici : https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "Go chaîne de format de modèle pour la sortie d'état. Le format des modèles Go peut être trouvé ici : https://pkg.go.dev/text/template\nPour la liste des variables accessibles pour le modèle, consultez les valeurs de structure ici : https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status", "Group ID: {{.groupID}}": "Identifiant du groupe: {{.groupID}}", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "Headlamp peut afficher des informations plus détaillées lorsque metrics-server est installé. Pour l'installer, exécutez :\n\n\tminikube{{.profileArg}} addons enable metrics-server\n", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\nminikube{{.profileArg}} addons enable metrics-server\t\n\n": "Headlamp peut afficher des informations plus détaillées lorsque metrics-server est installé. Pour l'installer, exécutez :\n\nminikube{{.profileArg}} addons enable metrics-server\t\n\n", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "Masque la signature de l'hyperviseur de l'invité dans minikube (pilote kvm2 uniquement).", @@ -353,6 +360,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "Adresse IP à utiliser pour exposer les ports (pilote docker et podman uniquement)", "IP address (ssh driver only)": "Adresse IP (pilote ssh uniquement)", "If present, writes to the provided file instead of stdout.": "S'il est présent, écrit dans le fichier fourni au lieu de la sortie standard.", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "Si défini, met automatiquement à jour les pilotes vers la dernière version. La valeur par défaut est true.", "If set, delete the current cluster if start fails and try again. Defaults to false.": "Si défini, supprime le cluster actuel si le démarrage échoue et réessaye. La valeur par défaut est false.", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "S'il est défini, désactive les rapports de métriques (utilisation du processeur et de la mémoire), cela peut améliorer l'utilisation du processeur. La valeur par défaut est false.", @@ -452,6 +461,8 @@ "Networking and Connectivity Commands:": "Commandes de mise en réseau et de connectivité :", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "Aucune adresse IP fournie. Essayez de spécifier --ssh-ip-address, ou consultez https://minikube.sigs.k8s.io/docs/drivers/ssh/", "No changes required for the \"{{.context}}\" context": "Aucune modification requise pour le contexte \"{{.context}}\"", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No minikube profile was found. ": "Aucun profil minikube n'a été trouvé.", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "Aucun pilote possible n'a été détecté. Essayez de spécifier --driver, ou consultez https://minikube.sigs.k8s.io/docs/start/", "No such addon {{.name}}": "Aucun module de ce type {{.name}}", @@ -651,6 +662,7 @@ "Specify the port that the mount should be setup on, where 0 means any free port.": "Spécifiez le port sur lequel le montage doit être configuré, où 0 signifie tout port libre.", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "La spécification de disques supplémentaires n'est actuellement prise en charge que pour les pilotes suivants : {{.supported_drivers}}. Si vous pouvez contribuer à ajouter cette fonctionnalité, veuillez créer un PR.", "StartHost failed, but will try again: {{.error}}": "StartHost a échoué, mais va réessayer : {{.error}}", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting control plane node {{.name}} in cluster {{.cluster}}": "Démarrage du noeud de plan de contrôle {{.name}} dans le cluster {{.cluster}}", "Starting minikube without Kubernetes in cluster {{.cluster}}": "Démarrage de minikube sans Kubernetes dans le cluster {{.cluster}}", "Starting minikube without Kubernetes {{.name}} in cluster {{.cluster}}": "Démarrage de minikube sans Kubernetes {{.name}} dans le cluster {{.cluster}}", @@ -720,6 +732,14 @@ "The control plane node \"{{.name}}\" does not exist.": "Le nœud du plan de contrôle \"{{.name}}\" n'existe pas.", "The control plane node is not running (state={{.state}})": "Le nœud du plan de contrôle n'est pas en cours d'exécution (state={{.state}})", "The control plane node must be running for this command": "Le nœud du plan de contrôle doit être en cours d'exécution pour cette commande", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used.": "Le chemin de socket cri à utiliser.", "The default network for QEMU will change from 'user' to 'socket_vmnet' in a future release": "Le réseau par défaut pour QEMU passera de 'user' à 'socket_vmnet' dans une version future", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "La commande docker-env est incompatible avec les clusters multi-nœuds. Utilisez le module 'registry' : https://minikube.sigs.k8s.io/docs/handbook/registry/", @@ -767,6 +787,7 @@ "The services namespace": "L'espace de noms des services", "The socket_vmnet network is only supported on macOS": "Le réseau socket_vmnet n'est pris en charge que sur macOS", "The time interval for each check that wait performs in seconds": "L'intervalle de temps pour chaque contrôle que wait effectue en secondes", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "La valeur passée à --format n'est pas valide", "The value passed to --format is invalid: {{.error}}": "La valeur passée à --format n'est pas valide : {{.error}}", "There are a couple ways to enable the required file sharing:\n1. Enable \"Use the WSL 2 based engine\" in Docker Desktop\nor\n2. Enable file sharing in Docker Desktop for the %s%s directory": "Il existe plusieurs manières d'activer le partage de fichiers requis :\n1. Activez \"Utiliser le moteur basé sur WSL 2\" dans Docker Desktop\nou\n2. Activer le partage de fichiers dans Docker Desktop pour le répertoire %s%s", @@ -815,12 +836,21 @@ "Unable to detect the latest patch release for specified major.minor version v{{.majorminor}}": "Impossible de détecter la dernière version du correctif pour la version major.minor spécifiée v{{.majorminor}}", "Unable to enable dashboard": "Impossible d'activer le tableau de bord", "Unable to fetch latest version info": "Impossible de récupérer les informations sur la dernière version", + "Unable to find any control-plane nodes": "", "Unable to find control plane": "Impossible de trouver le plan de contrôle", "Unable to generate docs": "Impossible de générer des documents", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "Impossible de générer la documentation. Veuillez vous assurer que le chemin spécifié est un répertoire, existe \u0026 vous avez la permission d'y écrire.", "Unable to get CPU info: {{.err}}": "Impossible d'obtenir les informations sur le processeur : {{.err}}", "Unable to get command runner": "Impossible d'obtenir le lanceur de commandes", "Unable to get control plane status: {{.error}}": "Impossible d'obtenir l'état du plan de contrôle : {{.error}}", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "Impossible d'obtenir l'utilisateur actuel", "Unable to get forwarded endpoint": "Impossible d'obtenir le point de terminaison transféré", "Unable to get machine status": "Impossible d'obtenir l'état de la machine", @@ -829,6 +859,8 @@ "Unable to list profiles: {{.error}}": "Impossible de répertorier les profils : {{.error}}", "Unable to load cached images: {{.error}}": "Impossible de charger les images mises en cache : {{.error}}", "Unable to load config: {{.error}}": "Impossible de charger la configuration : {{.error}}", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load host": "Impossible de charger l'hôte", "Unable to load profile: {{.error}}": "Impossible de charger le profil : {{.error}}", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "Impossible d'analyser la version \"{{.kubernetes_version}}\" : {{.error}}", @@ -840,6 +872,7 @@ "Unable to push cached images: {{.error}}": "Impossible de pousser les images mises en cache : {{.error}}", "Unable to remove machine directory": "Impossible de supprimer le répertoire de la machine", "Unable to restart cluster, will reset it: {{.error}}": "Impossible de redémarrer le cluster, va être réinitialisé : {{.error}}", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "Impossible de rétrograder en toute sécurité le cluster Kubernetes v{{.old}} existant vers v{{.new}}", "Unable to stop VM": "Impossible d'arrêter la VM", "Unable to update {{.driver}} driver: {{.error}}": "Impossible de mettre à jour le pilote {{.driver}} : {{.error}}", @@ -926,6 +959,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "Vous ne pouvez pas modifier les processeurs d'un cluster minikube existant. Veuillez d'abord supprimer le cluster.", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "Vous ne pouvez pas modifier la taille du disque pour un cluster minikube existant. Veuillez d'abord supprimer le cluster.", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "Vous ne pouvez pas modifier la taille de la mémoire d'un cluster minikube existant. Veuillez d'abord supprimer le cluster.", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "Vous ne pouvez pas modifier l'adresse IP statique d'un cluster minikube existant. Veuillez d'abord supprimer le cluster.", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "Vous ne pouvez pas activer les addons sur un cluster sans Kubernetes, pour activer Kubernetes sur votre cluster, exécutez : minikube start --kubernetes-version=stable", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "Vous vous êtes authentifié avec un compte de service qui n'a pas de fichier JSON associé. Le module complémentaire GCP Auth nécessite des informations d'identification avec un fichier JSON pour continuer.", @@ -976,6 +1010,7 @@ "error creating clientset": "erreur lors de la création de l'ensemble de clients", "error creating urls": "erreur lors de la création d'urls", "error fetching Kubernetes version list from GitHub": "erreur lors de la récupération de la liste des versions de Kubernetes à partir de GitHub", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "erreur lors de l'obtention des valeurs par défaut : {{.error}}", "error getting primary control plane": "erreur lors de l'obtention du plan de contrôle principal", "error getting ssh port": "erreur lors de l'obtention du port ssh", diff --git a/translations/ja.json b/translations/ja.json index e1ad7f9b28df..691d2967fe66 100644 --- a/translations/ja.json +++ b/translations/ja.json @@ -52,7 +52,9 @@ "Add machine IP to NO_PROXY environment variable": "マシンの IP アドレスを NO_PROXY 環境変数に追加します", "Add, remove, or list additional nodes": "追加のノードを追加、削除またはリストアップします", "Adding a control-plane node is not yet supported, setting control-plane flag to false": "コントロールプレーンノードの追加はサポートされていません。control-plane フラグを false に設定します", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Adding node {{.name}} to cluster {{.cluster}}": "{{.name}} ノードを {{.cluster}} クラスターに追加します", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "追加のトピック", "Adds a node to the given cluster config, and starts it.": "ノードをクラスターの設定に追加して、起動します。", "Adds a node to the given cluster.": "ノードをクラスターに追加します。", @@ -97,6 +99,8 @@ "Cannot use both --output and --format options": "--output と --format オプションの両方を使用することはできません", "Cannot use the option --no-kubernetes on the {{.name}} driver": "{{.name}} ドライバーでは、オプション --no-kubernetes は使用できません", "Certificate {{.certPath}} has expired. Generating a new one...": "証明書 {{.certPath}} の有効期限が切れています。新しい証明書を生成しています...", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "不要な Pod が実行されていないかどうか、'kubectl get po -A' を実行して確認してください", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "'journalctl -xeu kubelet' の出力を確認し、minikube start に --extra-config=kubelet.cgroup-driver=systemd を指定してみてください", "Check that libvirt is setup properly": "libvirt が正しくセットアップされていることを確認してください", @@ -130,6 +134,7 @@ "Could not process errors from failed deletion": "削除の失敗によるエラーを処理できませんでした", "Could not resolve IP address": "IP アドレスの解決ができませんでした", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "使用するイメージミラーの国コード。グローバルのものを使用する場合は空のままにします。中国本土のユーザーの場合は、cn に設定します。", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Creating mount {{.name}} ...": "マウント {{.name}} を作成しています...", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) ...": "{{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB) を作成しています...", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "{{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) を作成しています...", @@ -212,6 +217,7 @@ "Error generating unset output": "unset の出力を生成中にエラーが発生しました", "Error getting cluster bootstrapper": "クラスターのブートストラッパーを取得中にエラーが発生しました", "Error getting cluster config": "クラスターの設定を取得中にエラーが発生しました", + "Error getting control-plane node": "", "Error getting host": "ホストを取得中にエラーが発生しました", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "'{{.driver_name}}' ドライバー用のポートをバインディング中にエラーが発生しました: {{.error}}", "Error getting primary control plane": "最初のコントロールプレーンを取得中にエラーが発生しました", @@ -329,6 +335,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "設定ビュー出力用の Go テンプレートフォーマット文字列。Go テンプレートのフォーマットはこちら: https://pkg.go.dev/text/template\nテンプレートでアクセス可能な変数の一覧は、こちらの構造化変数を参照してください: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "状態出力用の Go テンプレートフォーマット文字列。Go テンプレートのフォーマットはこちら: https://pkg.go.dev/text/template\nテンプレートでアクセス可能な変数の一覧は、こちらの構造化変数を参照してください: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status", "Group ID: {{.groupID}}": "グループ ID: {{.groupID}}", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\nminikube{{.profileArg}} addons enable metrics-server\t\n\n": "metrics-server がインストールされていると、Headlamp はより詳細な情報を表示できます。インストールするには、次のコマンドを実行します:\n\nminikube{{.profileArg}} addons enable metrics-server\t\n\n", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "minikube 中のゲストに対してハイパーバイザー署名を非表示にします (kvm2 ドライバーのみ)", @@ -338,6 +345,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "ポートの expose に使用する IP アドレス (docker, podman ドライバーのみ)", "IP address (ssh driver only)": "IP アドレス (SSH ドライバーのみ)", "If present, writes to the provided file instead of stdout.": "指定すると、標準出力の代わりに指定されたファイルに出力します。", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "設定すると、自動的にドライバーを最新バージョンに更新します。デフォルトは true です。", "If set, delete the current cluster if start fails and try again. Defaults to false.": "設定すると、現在のクラスターの起動に失敗した場合はクラスターを削除して再度試行します。デフォルトは false です。", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "設定すると、メトリクス報告 (CPU とメモリー使用量) を無効化します。これは CPU 使用量を改善できます。デフォルト値は false です。", @@ -434,6 +443,8 @@ "Networking and Connectivity Commands:": "ネットワーキングおよび接続性コマンド:", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "IP アドレスが提供されていません。--ssh-ip-address 指定を試すか、https://minikube.sigs.k8s.io/docs/drivers/ssh/ を参照してください", "No changes required for the \"{{.context}}\" context": "「{{.context}}」コンテキストに必要な変更がありません", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No minikube profile was found. ": "minikube プロファイルが見つかりませんでした。", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "利用可能なドライバーが検出されませんでした。--driver 指定を試すか、https://minikube.sigs.k8s.io/docs/start/ を参照してください", "No such addon {{.name}}": "{{.name}} というアドオンはありません", @@ -623,6 +634,7 @@ "Specify arbitrary flags to pass to the build. (format: key=value)": "ビルドに渡す任意のフラグを指定します (形式: key=value)。", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "追加ディスク指定は現在 {{.supported_drivers}} ドライバーのみ対応しています。本機能の追加に貢献可能な場合、PR を作成してください。", "StartHost failed, but will try again: {{.error}}": "StartHost に失敗しましたが、再度試してみます: {{.error}}", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting control plane node {{.name}} in cluster {{.cluster}}": "{{.cluster}} クラスター中のコントロールプレーンの {{.name}} ノードを起動しています", "Starting minikube without Kubernetes in cluster {{.cluster}}": "{{.cluster}} クラスター中の Kubernetes なしで minikube を起動しています", "Starting minikube without Kubernetes {{.name}} in cluster {{.cluster}}": "{{.cluster}} クラスター中の Kubernetes なしで minikube {{.name}} を起動しています", @@ -689,6 +701,14 @@ "The control plane node \"{{.name}}\" does not exist.": "「{{.name}}」コントロールプレーンノードが存在しません。", "The control plane node is not running (state={{.state}})": "コントロールプレーンノードは実行中ではありません (state={{.state}})", "The control plane node must be running for this command": "このコマンドではコントロールプレーンノードが実行中でなければなりません", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used.": "使用される CRI ソケットパス。", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "docker-env コマンドはマルチノードクラスターと互換性がありません。'registry' アドオンを使用してください: https://minikube.sigs.k8s.io/docs/handbook/registry/", "The docker-env command is only compatible with the \"docker\" runtime, but this cluster was configured to use the \"{{.runtime}}\" runtime.": "docker-env コマンドは「docker」ランタイムとだけ互換性がありますが、このクラスターは「{{.runtime}}」ランタイムを使用するよう設定されています。", @@ -734,6 +754,7 @@ "The services namespace": "サービスネームスペース", "The socket_vmnet network is only supported on macOS": "socket_vmnet ネットワークは macOS でのみサポートされます", "The time interval for each check that wait performs in seconds": "実行待機チェックの時間間隔 (秒)", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "--format の値が無効です", "The value passed to --format is invalid: {{.error}}": "--format の値が無効です: {{.error}}", "There are a couple ways to enable the required file sharing:\n1. Enable \"Use the WSL 2 based engine\" in Docker Desktop\nor\n2. Enable file sharing in Docker Desktop for the %s%s directory": "必要なファイル共有を有効にする方法が 2 つあります:\n1. Docker Desktop 中の「Use the WSL 2 based engine」を有効にする\nまたは\n2. %s%s ディレクトリー用の Docker Desktop でファイル共有を有効にする", @@ -779,12 +800,21 @@ "Unable to detect the latest patch release for specified major.minor version v{{.majorminor}}": "", "Unable to enable dashboard": "ダッシュボードが有効になりません", "Unable to fetch latest version info": "最新バージョン情報を取得できません", + "Unable to find any control-plane nodes": "", "Unable to find control plane": "コントロールプレーンが見つかりません", "Unable to generate docs": "ドキュメントを生成できません", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "ドキュメントを生成できません。指定されたパスが、書き込み権限が付与された既存のディレクトリーかどうか確認してください。", "Unable to get CPU info: {{.err}}": "CPU 情報が取得できません: {{.err}}", "Unable to get command runner": "コマンドランナーを取得できません", "Unable to get control plane status: {{.error}}": "コントロールプレーンの状態を取得できません: {{.error}}", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "現在のユーザーを取得できません", "Unable to get forwarded endpoint": "フォワードされたエンドポイントを取得できません", "Unable to get machine status": "マシンの状態を取得できません", @@ -793,6 +823,8 @@ "Unable to list profiles: {{.error}}": "プロファイルのリストを作成できません: {{.error}}", "Unable to load cached images: {{.error}}": "キャッシュされたイメージを読み込めません: {{.error}}", "Unable to load config: {{.error}}": "設定を読み込めません: {{.error}}", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load host": "ホストを読み込めません", "Unable to load profile: {{.error}}": "プロファイルを読み込めません: {{.error}}", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "「{{.kubernetes_version}}」を解析できません: {{.error}}", @@ -802,6 +834,7 @@ "Unable to push cached images: {{.error}}": "キャッシュされたイメージを登録できません: {{.error}}", "Unable to remove machine directory": "マシンディレクトリーを削除できません", "Unable to restart cluster, will reset it: {{.error}}": "クラスターを再起動できません (リセットします): {{.error}}", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "既存の Kubernetes v{{.old}} クラスターを v{{.new}} に安全にバージョンダウンできません", "Unable to stop VM": "VM を停止できません", "Unable to update {{.driver}} driver: {{.error}}": "{{.driver}} ドライバーを更新できません: {{.error}}", @@ -883,6 +916,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "既存の minikube クラスターに対して、CPU を変更できません。最初にクラスターを削除してください。", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "既存の minikube クラスターに対して、ディスクサイズを変更できません。最初にクラスターを削除してください。", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "既存の minikube クラスターに対して、メモリサイズを変更できません。最初にクラスターを削除してください。", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "既存の minikube クラスターに対して、静的 IP を変更できません。最初にクラスターを削除してください。", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "クラスター上で Kubernetes なしでアドオンを有効にすることはできません、クラスター上で Kubernetes を有効にするには、 minikube start --kubernetes-version=stable を実行してください", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "関連する JSON ファイルがないサービスアカウントで認証しています。GCP Auth アドオンは、作業を続行するために JSON ファイル付きクレデンシャルを要求します。", @@ -929,6 +963,7 @@ "error creating clientset": "clientset 作成中にエラー", "error creating urls": "URL 作成でエラー", "error fetching Kubernetes version list from GitHub": "", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "デフォルト取得中にエラー: {{.error}}", "error getting primary control plane": "最初のコントロールプレーン取得中にエラー", "error getting ssh port": "SSH ポートを取得中にエラー", diff --git a/translations/ko.json b/translations/ko.json index 1dc938b6c99e..36fe92561e63 100644 --- a/translations/ko.json +++ b/translations/ko.json @@ -57,7 +57,9 @@ "Add, delete, or push a local image into minikube": "minikube에 로컬 이미지를 추가하거나 삭제, 푸시합니다", "Add, remove, or list additional nodes": "노드를 추가하거나 삭제, 나열합니다", "Adding a control-plane node is not yet supported, setting control-plane flag to false": "control-plane 노드를 추가하는 것은 아직 지원되지 않습니다. control-plane 플래그를 false로 설정합니다", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Adding node {{.name}} to cluster {{.cluster}}": "노드 {{.name}} 를 클러스터 {{.cluster}} 에 추가합니다", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "추가적인 도움말 주제", "Additional mount options, such as cache=fscache": "cache=fscache 와 같은 추가적인 마운트 옵션", "Adds a node to the given cluster config, and starts it.": "주어진 클러스터 구성에 노드 하나를 추가하고 시작합니다", @@ -105,6 +107,8 @@ "Cannot use both --output and --format options": "--output 과 --format 옵션을 함께 사용할 수 없습니다", "Cannot use the option --no-kubernetes on the {{.name}} driver": "{{.name}} 드라이버에서 --no-kubernetes 옵션을 사용할 수 없습니다", "Certificate {{.certPath}} has expired. Generating a new one...": "{{.certPath}} 인증서가 만료되었습니다. 새로운 것을 생성하는 중...", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "'kubectl get po -A' 를 실행하여 불필요한 pod 가 실행 중인지 확인하세요", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "'journalctl -xeu kubelet' 의 출력을 확인하고, minikube start 에 --extra-config=kubelet.cgroup-driver=systemd 를 전달해보세요", "Check that libvirt is setup properly": "libvirt 가 올바르게 설정되었는지 확인하세요", @@ -139,6 +143,7 @@ "Could not process errors from failed deletion": "삭제 실패로 인한 오류를 처리할 수 없습니다", "Could not resolve IP address": "IP 주소를 확인할 수 없습니다", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Creating Kubernetes in {{.driver_name}} {{.machine_type}} with (CPUs={{.number_of_cpus}}) ({{.number_of_host_cpus}} available), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "{{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}} ({{.number_of_host_cpus}}MB 유효한), Memory={{.memory_size}}MB ({{.host_memory_size}}MB 유효한) ...", "Creating mount {{.name}} ...": "마운트 {{.name}} 를 생성하는 중 ...", "Creating {{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "{{.driver_name}} VM (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) 를 생성하는 중 ...", @@ -235,12 +240,12 @@ "Error getting cluster bootstrapper": "클러스터 부트스트래퍼 조회 오류", "Error getting cluster config": "클러스터 컨피그 조회 오류", "Error getting config": "컨피그 조회 오류", + "Error getting control-plane node": "", "Error getting host": "호스트 조회 오류", "Error getting host IP": "호스트 IP 조회 오류", "Error getting host status": "호스트 상태 조회 오류", "Error getting machine logs": "머신 로그 조회 오류", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", - "Error getting primary control plane": "", "Error getting service status": "서비스 상태 조회 오류", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "ssh 클라이언트 조회 오류", @@ -367,6 +372,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "", "Group ID: {{.groupID}}": "", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Have you set up libvirt correctly?": "libvirt 설정을 알맞게 하셨습니까?", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "", @@ -376,6 +382,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "", "IP address (ssh driver only)": "", "If present, writes to the provided file instead of stdout.": "", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", "If set, delete the current cluster if start fails and try again. Defaults to false.": "", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "", @@ -393,7 +401,6 @@ "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", - "If true, the added node will be marked for work. Defaults to true.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If you are running minikube within a VM, consider using --driver=none:": "", "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:": "", @@ -475,7 +482,8 @@ "Networking and Connectivity Commands:": "", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "", "No changes required for the \"{{.context}}\" context": "", - "No minikube profile was found. ": "", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "No such addon {{.name}}": "", "No valid URL found for tunnel.": "", @@ -661,12 +669,12 @@ "Specify arbitrary flags to pass to the build. (format: key=value)": "", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "", "StartHost failed, but will try again: {{.error}}": "", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting control plane node {{.name}} in cluster {{.cluster}}": "{{.cluster}} 클러스터의 {{.name}} 컨트롤 플레인 노드를 시작하는 중", "Starting minikube without Kubernetes in cluster {{.cluster}}": "", "Starting node": "노드를 시작하는 중", "Starting node {{.name}} in cluster {{.cluster}}": "{{.cluster}} 클러스터의 {{.name}} 노드를 시작하는 중", "Starting tunnel for service {{.service}}.": "{{.service}} 서비스의 터널을 시작하는 중", - "Starting worker node {{.name}} in cluster {{.cluster}}": "", "Starts a local Kubernetes cluster": "로컬 쿠버네티스 클러스터를 시작합니다", "Starts a local kubernetes cluster": "로컬 쿠버네티스 클러스터를 시작합니다", "Starts a node.": "노드를 시작합니다", @@ -726,11 +734,18 @@ "The base image to use for docker/podman drivers. Intended for local development.": "", "The certificate hostname provided appears to be invalid (may be a minikube bug, try 'minikube delete')": "", "The cluster dns domain name used in the Kubernetes cluster": "", - "The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use \"minikube node add\" to add nodes to an existing cluster.": "", "The control plane for \"{{.name}}\" is paused!": "\"{{.name}}\"의 컨트롤 플레인이 중지되었습니다!", "The control plane node \"{{.name}}\" does not exist.": "\"{{.name}}\" 컨트롤 플레인 노드가 존재하지 않습니다.", "The control plane node is not running (state={{.state}})": "컨트롤 플레인 노드가 실행 상태가 아닙니다 (상태={{.state}})", "The control plane node must be running for this command": "컨트롤 플레인 노드는 실행 상태여야 합니다", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used.": "", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "", "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "", @@ -761,7 +776,6 @@ "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\t\t\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "", "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\t\t\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "", "The none driver with Kubernetes v1.24+ requires containernetworking-plugins.\n\n\t\tPlease install containernetworking-plugins using these instructions:\n\n\t\thttps://minikube.sigs.k8s.io/docs/faq/#how-do-i-install-containernetworking-plugins-for-none-driver": "", - "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", "The path on the file system where the error code docs in markdown need to be saved": "", @@ -775,6 +789,7 @@ "The services namespace": "", "The socket_vmnet network is only supported on macOS": "", "The time interval for each check that wait performs in seconds": "", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "", "The value passed to --format is invalid: {{.error}}": "", "There are a couple ways to enable the required file sharing:\n1. Enable \"Use the WSL 2 based engine\" in Docker Desktop\nor\n2. Enable file sharing in Docker Desktop for the %s%s directory": "", @@ -784,11 +799,8 @@ "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", "This cluster was created before minikube v1.26.0 and doesn't have cri-docker installed. Please run 'minikube delete' and then start minikube again": "", - "This control plane is not running! (state={{.state}})": "", "This driver does not yet work on your architecture. Maybe try --driver=none": "", - "This flag is currently unsupported.": "", "This is a known issue with BTRFS storage driver, there is a workaround, please checkout the issue on GitHub": "", - "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "", "This will start the mount daemon and automatically mount files into minikube.": "", "This {{.type}} is having trouble accessing https://{{.repository}}": "", @@ -819,16 +831,20 @@ "Unable to detect the latest patch release for specified major.minor version v{{.majorminor}}": "", "Unable to enable dashboard": "대시보드를 활성화할 수 없습니다", "Unable to fetch latest version info": "최신 버전 정보를 가져올 수 없습니다", - "Unable to find control plane": "", + "Unable to find any control-plane nodes": "", "Unable to generate docs": "문서를 생성할 수 없습니다", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get CPU info: {{.err}}": "", "Unable to get VM IP address": "가상 머신 IP 주소를 조회할 수 없습니다", - "Unable to get command runner": "", - "Unable to get control plane status: {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "현재 사용자를 조회할 수 없습니다", - "Unable to get forwarded endpoint": "", - "Unable to get machine status": "", "Unable to get runtime": "런타임을 조회할 수 없습니다", "Unable to get the status of the {{.name}} cluster.": "{{.name}} 클러스터의 상태를 조회할 수 없습니다", "Unable to kill mount process: {{.error}}": "마운트 프로세스를 중지할 수 없습니다: {{.error}}", @@ -836,7 +852,8 @@ "Unable to load cached images from config file.": "컨피그 파일로부터 캐시된 이미지를 로드할 수 없습니다", "Unable to load cached images: {{.error}}": "캐시된 이미지를 로드할 수 없습니다: {{.error}}", "Unable to load config: {{.error}}": "컨피그를 로드할 수 없습니다: {{.error}}", - "Unable to load host": "", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load profile: {{.error}}": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": " \"{{.kubernetes_version}}\" 를 파싱할 수 없습니다: {{.error}}", "Unable to parse memory '{{.memory}}': {{.error}}": "", @@ -845,7 +862,7 @@ "Unable to push cached images: {{.error}}": "", "Unable to remove machine directory": "", "Unable to remove machine directory: %v": "머신 디렉토리를 제거할 수 없습니다: %v", - "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "", "Unable to start VM. Please investigate and run 'minikube delete' if possible": "가상 머신을 시작할 수 없습니다. 확인 후 가능하면 'minikube delete' 를 실행하세요", "Unable to stop VM": "가상 머신을 중지할 수 없습니다", @@ -932,6 +949,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "", @@ -980,8 +998,8 @@ "error creating machine client": "머신 client 생성 오류", "error creating urls": "", "error fetching Kubernetes version list from GitHub": "", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "", - "error getting primary control plane": "", "error getting ssh port": "ssh 포트 조회 오류", "error initializing tracing: {{.Error}}": "", "error parsing the input ip address for mount": "", diff --git a/translations/pl.json b/translations/pl.json index 03de5dad4599..e1b567a5e23f 100644 --- a/translations/pl.json +++ b/translations/pl.json @@ -54,8 +54,9 @@ "Add machine IP to NO_PROXY environment variable": "Dodaj IP serwera do zmiennej środowiskowej NO_PROXY", "Add, delete, or push a local image into minikube": "Dodaj, usuń lub wypchnij lokalny obraz do minikube", "Add, remove, or list additional nodes": "Dodaj, usuń lub wylistuj pozostałe węzły", - "Adding a control-plane node is not yet supported, setting control-plane flag to false": "", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Adding node {{.name}} to cluster {{.cluster}}": "Dodawanie węzła {{.name}} do klastra {{.cluster}}", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "Dodatkowe tematy pomocy", "Additional mount options, such as cache=fscache": "Dodatkowe opcje montowania, jak na przykład cache=fscache", "Adds a node to the given cluster config, and starts it.": "Dodaje węzeł do konfiguracji danego klastra i wystartowuje go", @@ -101,6 +102,8 @@ "Cannot use both --output and --format options": "Nie można użyć obydwu opcji --output i --format jednocześnie", "Cannot use the option --no-kubernetes on the {{.name}} driver": "", "Certificate {{.certPath}} has expired. Generating a new one...": "", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "Sprawdź czy są uruchomione jakieś niepotrzebne pody za pomocą komendy: 'kubectl get pod -A' ", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that libvirt is setup properly": "Sprawdź czy bibliteka libvirt jest poprawnie zainstalowana", @@ -135,6 +138,7 @@ "Could not process errors from failed deletion": "", "Could not resolve IP address": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Created a new profile : {{.profile_name}}": "Stworzono nowy profil : {{.profile_name}}", "Creating a new profile failed": "Tworzenie nowego profilu nie powiodło się", "Creating mount {{.name}} ...": "", @@ -228,9 +232,9 @@ "Error generating unset output": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", + "Error getting control-plane node": "", "Error getting host": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", - "Error getting primary control plane": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", @@ -350,6 +354,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "", "Group ID: {{.groupID}}": "", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Have you set up libvirt correctly?": "Czy napewno skonfigurowano libvirt w sposób prawidłowy?", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "", @@ -359,6 +364,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "", "IP address (ssh driver only)": "", "If present, writes to the provided file instead of stdout.": "", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", "If set, delete the current cluster if start fails and try again. Defaults to false.": "", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "", @@ -376,7 +383,6 @@ "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", - "If true, the added node will be marked for work. Defaults to true.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If using the none driver, ensure that systemctl is installed": "Jeśli użyto sterownika 'none', upewnij się że systemctl jest zainstalowany", "If you are running minikube within a VM, consider using --driver=none:": "", @@ -461,6 +467,8 @@ "Networking and Connectivity Commands:": "", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "Nie znaleziono adresu IP. Spróbuj przekazać adres IP za pomocą flagi --ssh-ip-address lub odwiedź https://minikube.sigs.k8s.io/docs/drivers/ssh/", "No changes required for the \"{{.context}}\" context": "Żadne zmiany nie są wymagane dla kontekstu \"{{.context}}\"", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No minikube profile was found. ": "Nie znaleziono żadnego profilu minikube", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "Nie znaleziono żadnego możliwego sterownika. Spróbuj przekazać sterownik za pomocą flagi --driver lub odwiedź https://minikube.sigs.k8s.io/docs/start/", "No such addon {{.name}}": "Nie istnieje addon {{.name}}", @@ -660,10 +668,9 @@ "Specify arbitrary flags to pass to the build. (format: key=value)": "", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "", "StartHost failed, but will try again: {{.error}}": "", - "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting minikube without Kubernetes in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", - "Starting worker node {{.name}} in cluster {{.cluster}}": "", "Starts a local Kubernetes cluster": "", "Starts a local kubernetes cluster": "Uruchamianie lokalnego klastra kubernetesa", "Starts a node.": "", @@ -728,12 +735,15 @@ "The certificate hostname provided appears to be invalid (may be a minikube bug, try 'minikube delete')": "", "The cluster dns domain name used in the Kubernetes cluster": "", "The cluster dns domain name used in the kubernetes cluster": "Domena dns klastra użyta przez kubernetesa", - "The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use \"minikube node add\" to add nodes to an existing cluster.": "", "The container runtime to be used (docker, crio, containerd)": "Runtime konteneryzacji (docker, crio, containerd).", - "The control plane for \"{{.name}}\" is paused!": "", - "The control plane node \"{{.name}}\" does not exist.": "", - "The control plane node is not running (state={{.state}})": "", - "The control plane node must be running for this command": "", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used.": "", "The docker service is currently not active": "Serwis docker jest nieaktywny", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "", @@ -768,7 +778,6 @@ "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\t\t\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "", "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\t\t\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "", "The none driver with Kubernetes v1.24+ requires containernetworking-plugins.\n\n\t\tPlease install containernetworking-plugins using these instructions:\n\n\t\thttps://minikube.sigs.k8s.io/docs/faq/#how-do-i-install-containernetworking-plugins-for-none-driver": "", - "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", "The path on the file system where the error code docs in markdown need to be saved": "", @@ -782,6 +791,7 @@ "The services namespace": "", "The socket_vmnet network is only supported on macOS": "", "The time interval for each check that wait performs in seconds": "", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "Wartość przekazana do --format jest nieprawidłowa", "The value passed to --format is invalid: {{.error}}": "Wartość przekazana do --format jest nieprawidłowa: {{.error}}", "The {{.driver_name}} driver should not be used with root privileges.": "{{.driver_name}} nie powinien być używany z przywilejami root'a.", @@ -792,11 +802,8 @@ "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", "This cluster was created before minikube v1.26.0 and doesn't have cri-docker installed. Please run 'minikube delete' and then start minikube again": "", - "This control plane is not running! (state={{.state}})": "", "This driver does not yet work on your architecture. Maybe try --driver=none": "", - "This flag is currently unsupported.": "", "This is a known issue with BTRFS storage driver, there is a workaround, please checkout the issue on GitHub": "", - "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "", "This will start the mount daemon and automatically mount files into minikube.": "", "This {{.type}} is having trouble accessing https://{{.repository}}": "", @@ -830,21 +837,26 @@ "Unable to detect the latest patch release for specified major.minor version v{{.majorminor}}": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", - "Unable to find control plane": "", + "Unable to find any control-plane nodes": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get CPU info: {{.err}}": "", - "Unable to get command runner": "", - "Unable to get control plane status: {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "", - "Unable to get forwarded endpoint": "", - "Unable to get machine status": "", "Unable to get runtime": "", "Unable to kill mount process: {{.error}}": "", "Unable to list profiles: {{.error}}": "", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "", - "Unable to load host": "", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load profile: {{.error}}": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "", "Unable to parse memory '{{.memory}}': {{.error}}": "", @@ -852,7 +864,7 @@ "Unable to pick a default driver. Here is what was considered, in preference order:": "", "Unable to push cached images: {{.error}}": "", "Unable to remove machine directory": "", - "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "", "Unable to start VM": "Nie można uruchomić maszyny wirtualnej", "Unable to stop VM": "Nie można zatrzymać maszyny wirtualnej", @@ -940,6 +952,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "", @@ -985,8 +998,8 @@ "error creating clientset": "", "error creating urls": "", "error fetching Kubernetes version list from GitHub": "", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "", - "error getting primary control plane": "", "error getting ssh port": "", "error initializing tracing: {{.Error}}": "", "error parsing the input ip address for mount": "", diff --git a/translations/ru.json b/translations/ru.json index be326db47f36..5d1ce688e6e8 100644 --- a/translations/ru.json +++ b/translations/ru.json @@ -48,8 +48,8 @@ "Add image to cache for all running minikube clusters": "", "Add machine IP to NO_PROXY environment variable": "", "Add, remove, or list additional nodes": "", - "Adding a control-plane node is not yet supported, setting control-plane flag to false": "", - "Adding node {{.name}} to cluster {{.cluster}}": "", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "", "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", @@ -93,6 +93,8 @@ "Cannot use both --output and --format options": "", "Cannot use the option --no-kubernetes on the {{.name}} driver": "", "Certificate {{.certPath}} has expired. Generating a new one...": "", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that libvirt is setup properly": "", @@ -124,6 +126,7 @@ "Could not process errors from failed deletion": "", "Could not resolve IP address": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Creating mount {{.name}} ...": "", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{if not .number_of_cpus}}no-limit{{else}}{{.number_of_cpus}}{{end}}, Memory={{if not .memory_size}}no-limit{{else}}{{.memory_size}}MB{{end}}) ...": "", @@ -205,9 +208,9 @@ "Error generating unset output": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", + "Error getting control-plane node": "", "Error getting host": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", - "Error getting primary control plane": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", @@ -317,6 +320,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "", "Group ID: {{.groupID}}": "", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "", "Hyper-V requires that memory MB be an even number, {{.memory}}MB was specified, try passing `--memory {{.suggestMemory}}`": "", @@ -325,6 +329,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "", "IP address (ssh driver only)": "", "If present, writes to the provided file instead of stdout.": "", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", "If set, delete the current cluster if start fails and try again. Defaults to false.": "", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "", @@ -342,7 +348,6 @@ "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", - "If true, the added node will be marked for work. Defaults to true.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If you are running minikube within a VM, consider using --driver=none:": "", "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:": "", @@ -421,7 +426,8 @@ "Networking and Connectivity Commands:": "", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "", "No changes required for the \"{{.context}}\" context": "", - "No minikube profile was found. ": "", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "No such addon {{.name}}": "", "No valid URL found for tunnel.": "", @@ -604,10 +610,10 @@ "Specify arbitrary flags to pass to the build. (format: key=value)": "", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "", "StartHost failed, but will try again: {{.error}}": "", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting control plane node {{.name}} in cluster {{.cluster}}": "Запускается control plane узел {{.name}} в кластере {{.cluster}}", "Starting minikube without Kubernetes in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", - "Starting worker node {{.name}} in cluster {{.cluster}}": "", "Starts a local Kubernetes cluster": "", "Starts a node.": "", "Starts an existing stopped node in a cluster.": "", @@ -664,11 +670,14 @@ "The base image to use for docker/podman drivers. Intended for local development.": "", "The certificate hostname provided appears to be invalid (may be a minikube bug, try 'minikube delete')": "", "The cluster dns domain name used in the Kubernetes cluster": "", - "The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use \"minikube node add\" to add nodes to an existing cluster.": "", - "The control plane for \"{{.name}}\" is paused!": "", - "The control plane node \"{{.name}}\" does not exist.": "", - "The control plane node is not running (state={{.state}})": "", - "The control plane node must be running for this command": "", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used.": "", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "", "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "", @@ -699,7 +708,6 @@ "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\t\t\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "", "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\t\t\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "", "The none driver with Kubernetes v1.24+ requires containernetworking-plugins.\n\n\t\tPlease install containernetworking-plugins using these instructions:\n\n\t\thttps://minikube.sigs.k8s.io/docs/faq/#how-do-i-install-containernetworking-plugins-for-none-driver": "", - "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", "The path on the file system where the error code docs in markdown need to be saved": "", @@ -713,6 +721,7 @@ "The services namespace": "", "The socket_vmnet network is only supported on macOS": "", "The time interval for each check that wait performs in seconds": "", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "", "The value passed to --format is invalid: {{.error}}": "", "There are a couple ways to enable the required file sharing:\n1. Enable \"Use the WSL 2 based engine\" in Docker Desktop\nor\n2. Enable file sharing in Docker Desktop for the %s%s directory": "", @@ -722,11 +731,8 @@ "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", "This cluster was created before minikube v1.26.0 and doesn't have cri-docker installed. Please run 'minikube delete' and then start minikube again": "", - "This control plane is not running! (state={{.state}})": "", "This driver does not yet work on your architecture. Maybe try --driver=none": "", - "This flag is currently unsupported.": "", "This is a known issue with BTRFS storage driver, there is a workaround, please checkout the issue on GitHub": "", - "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "", "This will start the mount daemon and automatically mount files into minikube.": "", "This {{.type}} is having trouble accessing https://{{.repository}}": "", @@ -757,21 +763,26 @@ "Unable to detect the latest patch release for specified major.minor version v{{.majorminor}}": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", - "Unable to find control plane": "", + "Unable to find any control-plane nodes": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get CPU info: {{.err}}": "", - "Unable to get command runner": "", - "Unable to get control plane status: {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "", - "Unable to get forwarded endpoint": "", - "Unable to get machine status": "", "Unable to get runtime": "", "Unable to kill mount process: {{.error}}": "", "Unable to list profiles: {{.error}}": "", "Unable to load cached images: {{.error}}": "Невозможно загрузить образы из кэша: {{.error}}", "Unable to load config: {{.error}}": "", - "Unable to load host": "", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load profile: {{.error}}": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "", "Unable to parse memory '{{.memory}}': {{.error}}": "", @@ -779,7 +790,7 @@ "Unable to pick a default driver. Here is what was considered, in preference order:": "", "Unable to push cached images: {{.error}}": "", "Unable to remove machine directory": "", - "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", @@ -861,6 +872,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "", @@ -905,8 +917,8 @@ "error creating clientset": "", "error creating urls": "", "error fetching Kubernetes version list from GitHub": "", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "", - "error getting primary control plane": "", "error getting ssh port": "", "error initializing tracing: {{.Error}}": "", "error parsing the input ip address for mount": "", diff --git a/translations/strings.txt b/translations/strings.txt index e58232a6e9d0..fac28c491b9e 100644 --- a/translations/strings.txt +++ b/translations/strings.txt @@ -48,8 +48,8 @@ "Add image to cache for all running minikube clusters": "", "Add machine IP to NO_PROXY environment variable": "", "Add, remove, or list additional nodes": "", - "Adding a control-plane node is not yet supported, setting control-plane flag to false": "", - "Adding node {{.name}} to cluster {{.cluster}}": "", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "", "Adds a node to the given cluster config, and starts it.": "", "Adds a node to the given cluster.": "", @@ -93,6 +93,8 @@ "Cannot use both --output and --format options": "", "Cannot use the option --no-kubernetes on the {{.name}} driver": "", "Certificate {{.certPath}} has expired. Generating a new one...": "", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "", "Check that libvirt is setup properly": "", @@ -124,6 +126,7 @@ "Could not process errors from failed deletion": "", "Could not resolve IP address": "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Creating mount {{.name}} ...": "", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{.number_of_cpus}}, Memory={{.memory_size}}MB, Disk={{.disk_size}}MB) ...": "", "Creating {{.driver_name}} {{.machine_type}} (CPUs={{if not .number_of_cpus}}no-limit{{else}}{{.number_of_cpus}}{{end}}, Memory={{if not .memory_size}}no-limit{{else}}{{.memory_size}}MB{{end}}) ...": "", @@ -205,9 +208,9 @@ "Error generating unset output": "", "Error getting cluster bootstrapper": "", "Error getting cluster config": "", + "Error getting control-plane node": "", "Error getting host": "", "Error getting port binding for '{{.driver_name}} driver: {{.error}}": "", - "Error getting primary control plane": "", "Error getting service with namespace: {{.namespace}} and labels {{.labelName}}:{{.addonName}}: {{.error}}": "", "Error getting ssh client": "", "Error getting the host IP address to use from within the VM": "", @@ -317,6 +320,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "", "Group ID: {{.groupID}}": "", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "", "Hyper-V requires that memory MB be an even number, {{.memory}}MB was specified, try passing `--memory {{.suggestMemory}}`": "", @@ -325,6 +329,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "", "IP address (ssh driver only)": "", "If present, writes to the provided file instead of stdout.": "", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "", "If set, delete the current cluster if start fails and try again. Defaults to false.": "", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "", @@ -342,7 +348,6 @@ "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", - "If true, the added node will be marked for work. Defaults to true.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If you are running minikube within a VM, consider using --driver=none:": "", "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:": "", @@ -421,7 +426,8 @@ "Networking and Connectivity Commands:": "", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "", "No changes required for the \"{{.context}}\" context": "", - "No minikube profile was found. ": "", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "", "No such addon {{.name}}": "", "No valid URL found for tunnel.": "", @@ -603,10 +609,9 @@ "Specify arbitrary flags to pass to the build. (format: key=value)": "", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "", "StartHost failed, but will try again: {{.error}}": "", - "Starting control plane node {{.name}} in cluster {{.cluster}}": "", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting minikube without Kubernetes in cluster {{.cluster}}": "", "Starting tunnel for service {{.service}}.": "", - "Starting worker node {{.name}} in cluster {{.cluster}}": "", "Starts a local Kubernetes cluster": "", "Starts a node.": "", "Starts an existing stopped node in a cluster.": "", @@ -663,11 +668,14 @@ "The base image to use for docker/podman drivers. Intended for local development.": "", "The certificate hostname provided appears to be invalid (may be a minikube bug, try 'minikube delete')": "", "The cluster dns domain name used in the Kubernetes cluster": "", - "The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use \"minikube node add\" to add nodes to an existing cluster.": "", - "The control plane for \"{{.name}}\" is paused!": "", - "The control plane node \"{{.name}}\" does not exist.": "", - "The control plane node is not running (state={{.state}})": "", - "The control plane node must be running for this command": "", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used.": "", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "", "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}": "", @@ -698,7 +706,6 @@ "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\t\t\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "", "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\t\t\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "", "The none driver with Kubernetes v1.24+ requires containernetworking-plugins.\n\n\t\tPlease install containernetworking-plugins using these instructions:\n\n\t\thttps://minikube.sigs.k8s.io/docs/faq/#how-do-i-install-containernetworking-plugins-for-none-driver": "", - "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "", "The path on the file system where the docs in markdown need to be saved": "", "The path on the file system where the error code docs in markdown need to be saved": "", @@ -712,6 +719,7 @@ "The services namespace": "", "The socket_vmnet network is only supported on macOS": "", "The time interval for each check that wait performs in seconds": "", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "", "The value passed to --format is invalid: {{.error}}": "", "There are a couple ways to enable the required file sharing:\n1. Enable \"Use the WSL 2 based engine\" in Docker Desktop\nor\n2. Enable file sharing in Docker Desktop for the %s%s directory": "", @@ -721,11 +729,8 @@ "This addon does not have an endpoint defined for the 'addons open' command.\nYou can add one by annotating a service with the label {{.labelName}}:{{.addonName}}": "", "This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true": "", "This cluster was created before minikube v1.26.0 and doesn't have cri-docker installed. Please run 'minikube delete' and then start minikube again": "", - "This control plane is not running! (state={{.state}})": "", "This driver does not yet work on your architecture. Maybe try --driver=none": "", - "This flag is currently unsupported.": "", "This is a known issue with BTRFS storage driver, there is a workaround, please checkout the issue on GitHub": "", - "This is unusual - you may want to investigate using \"{{.command}}\"": "", "This will keep the existing kubectl context and will create a minikube context.": "", "This will start the mount daemon and automatically mount files into minikube.": "", "This {{.type}} is having trouble accessing https://{{.repository}}": "", @@ -756,21 +761,26 @@ "Unable to detect the latest patch release for specified major.minor version v{{.majorminor}}": "", "Unable to enable dashboard": "", "Unable to fetch latest version info": "", - "Unable to find control plane": "", + "Unable to find any control-plane nodes": "", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", "Unable to get CPU info: {{.err}}": "", - "Unable to get command runner": "", - "Unable to get control plane status: {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "", - "Unable to get forwarded endpoint": "", - "Unable to get machine status": "", "Unable to get runtime": "", "Unable to kill mount process: {{.error}}": "", "Unable to list profiles: {{.error}}": "", "Unable to load cached images: {{.error}}": "", "Unable to load config: {{.error}}": "", - "Unable to load host": "", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load profile: {{.error}}": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "", "Unable to parse memory '{{.memory}}': {{.error}}": "", @@ -778,7 +788,7 @@ "Unable to pick a default driver. Here is what was considered, in preference order:": "", "Unable to push cached images: {{.error}}": "", "Unable to remove machine directory": "", - "Unable to restart cluster, will reset it: {{.error}}": "", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "", "Unable to stop VM": "", "Unable to update {{.driver}} driver: {{.error}}": "", @@ -860,6 +870,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "", @@ -904,8 +915,8 @@ "error creating clientset": "", "error creating urls": "", "error fetching Kubernetes version list from GitHub": "", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "", - "error getting primary control plane": "", "error getting ssh port": "", "error initializing tracing: {{.Error}}": "", "error parsing the input ip address for mount": "", diff --git a/translations/zh-CN.json b/translations/zh-CN.json index fbc1ccaa3182..79b712fcfeca 100644 --- a/translations/zh-CN.json +++ b/translations/zh-CN.json @@ -64,7 +64,9 @@ "Add or delete an image from the local cache.": "在本地缓存中添加或删除 image。", "Add, remove, or list additional nodes": "添加,删除或者列出其他的节点", "Adding a control-plane node is not yet supported, setting control-plane flag to false": "不支持添加控制平面节点,将控制平面标志设置为false", + "Adding a control-plane node to a non-HA (non-multi-control plane) cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Adding node {{.name}} to cluster {{.cluster}}": "添加节点 {{.name}} 至集群 {{.cluster}}", + "Adding node {{.name}} to cluster {{.cluster}} as {{.roles}}": "", "Additional help topics": "其他帮助", "Additional mount options, such as cache=fscache": "其他挂载选项,例如:cache=fscache", "Adds a node to the given cluster config, and starts it.": "将节点添加到给定的集群配置中,然后启动它", @@ -117,6 +119,8 @@ "Cannot use both --output and --format options": "不能同时使用 --output 和 --format 选项", "Cannot use the option --no-kubernetes on the {{.name}} driver": "无法使用 {{.name}} 驱动程序上的 -no-kubernetes 选项", "Certificate {{.certPath}} has expired. Generating a new one...": "证书 {{.certPath}} 已过期,生成一个新证书...", + "Changing the API server port of an existing minikube HA (multi-control plane) cluster is not currently supported. Please first delete the cluster.": "", + "Changing the HA (multi-control plane) mode of an existing minikube cluster is not currently supported. Please first delete the cluster and use 'minikube start --ha' to create new one.": "", "Check if you have unnecessary pods running by running 'kubectl get po -A": "通过运行 'kubectl get po -A' 检查是否有不必要的pod正在运行", "Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start": "检查 'journalctl -xeu kubelet' 的输出,尝试启动 minikube 时添加参数 --extra-config=kubelet.cgroup-driver=systemd", "Check that SELinux is disabled, and that the provided apiserver flags are valid": "检查 SELinux 是否禁用,且提供的 apiserver 标志是否有效", @@ -158,6 +162,7 @@ "Could not process errors from failed deletion": "无法处理删除失败的错误", "Could not resolve IP address": "无法解析 IP 地址", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.": "需要使用的镜像镜像的国家/地区代码。留空以使用全球代码。对于中国大陆用户,请将其设置为 cn。", + "Create Highly Available Multi-Control Plane Cluster with a minimum of three control-plane nodes that will also be marked for work.": "", "Created a new profile : {{.profile_name}}": "创建了新的配置文件:{{.profile_name}}", "Creating Kubernetes in {{.driver_name}} container with (CPUs={{.number_of_cpus}}), Memory={{.memory_size}}MB ({{.host_memory_size}}MB available) ...": "正在 {{.driver_name}} 容器中 创建 Kubernetes,(CPUs={{.number_of_cpus}}), 内存={{.memory_size}}MB ({{.host_memory_size}}MB 可用", "Creating a new profile failed": "创建新的配置文件失败", @@ -276,6 +281,7 @@ "Error getting cluster bootstrapper": "获取 cluster bootstrapper 时出错", "Error getting cluster config": "获取 cluster config 时出错", "Error getting config": "获取 config 时出错", + "Error getting control-plane node": "", "Error getting host": "获取 host 时出错", "Error getting host status": "获取 host status 时出错", "Error getting machine logs": "获取 machine logs 时出错", @@ -431,6 +437,7 @@ "Go template format string for the config view output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list of accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate": "Go模板格式字符串,用于配置视图输出。Go模板的格式可以在此链接找到:https://pkg.go.dev/text/template\n要查看模板中可访问的变量列表,请参见此链接中的结构值:https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd/config#ConfigViewTemplate", "Go template format string for the status output. The format for Go templates can be found here: https://pkg.go.dev/text/template\nFor the list accessible variables for the template, see the struct values here: https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status": "状态输出的 Go 模板格式字符串。Go 模板的格式可以在此处找到:https://pkg.go.dev/text/template\n关于模板中可访问的变量列表,请参阅此处的定义:https://pkg.go.dev/k8s.io/minikube/cmd/minikube/cmd#Status", "Group ID: {{.groupID}}": "组 ID:{{.groupID}}", + "HA (multi-control plane) clusters require 3 or more control-plane nodes": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\n\tminikube{{.profileArg}} addons enable metrics-server\n": "", "Headlamp can display more detailed information when metrics-server is installed. To install it, run:\n\nminikube{{.profileArg}} addons enable metrics-server\t\n\n": "安装metrics-server后,Headlamp可以显示更详细的信息。 要安装它,请运行\n\nminikube{{.profileArg}} 插件启用指标服务器\t\n\n", "Hide the hypervisor signature from the guest in minikube (kvm2 driver only)": "向 minikube 中的访客隐藏管理程序签名(仅限 kvm2 驱动程序)", @@ -442,6 +449,8 @@ "IP Address to use to expose ports (docker and podman driver only)": "用于暴露端口的IP地址(仅适用于docker和podman驱动程序)", "IP address (ssh driver only)": "ssh 主机IP地址(仅适用于SSH驱动程序)", "If present, writes to the provided file instead of stdout.": "如果存在,则写入所提供的文件,而不是标准输出。", + "If set, added node will be available as worker. Defaults to true.": "", + "If set, added node will become a control-plane. Defaults to false. Currently only supported for existing HA (multi-control plane) clusters.": "", "If set, automatically updates drivers to the latest version. Defaults to true.": "如果设置为 true,将自动更新驱动到最新版本。默认为 true。", "If set, delete the current cluster if start fails and try again. Defaults to false.": "如果设置为 true,则在启动失败时删除当前群集,然后重试。默认为 false。", "If set, disables metrics reporting (CPU and memory usage), this can improve CPU usage. Defaults to false.": "如果设置为 true,则禁用指标报告(CPU和内存使用率),这可以提高 CPU 利用率。默认为 false。", @@ -547,7 +556,8 @@ "Networking and Connectivity Commands:": "网络和连接命令:", "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/": "未提供 IP 地址。尝试指定 --ssh-ip-address,或参见 https://minikube.sigs.k8s.io/docs/drivers/ssh/", "No changes required for the \"{{.context}}\" context": "", - "No minikube profile was found. ": "", + "No control-plane nodes found.": "", + "No minikube profile was found.": "", "No possible driver was detected. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/": "未检测到可用的驱动程序。尝试指定 --driver,或查看 https://minikube.sigs.k8s.io/docs/start/", "No such addon {{.name}}": "", "No valid URL found for tunnel.": "未找到有效的隧道URL。", @@ -755,10 +765,10 @@ "Specify arbitrary flags to pass to the build. (format: key=value)": "指定传递给构建过程的任意标志。(format: key=value)", "Specifying extra disks is currently only supported for the following drivers: {{.supported_drivers}}. If you can contribute to add this feature, please create a PR.": "", "StartHost failed, but will try again: {{.error}}": "", + "Starting \"{{.node}}\" {{.role}} node in \"{{.cluster}}\" cluster": "", "Starting control plane node {{.name}} in cluster {{.cluster}}": "正在集群 {{.cluster}} 中启动控制平面节点 {{.name}}", "Starting minikube without Kubernetes in cluster {{.cluster}}": "在集群 {{.cluster}} 中启动 minikube 但不使用 Kubernetes", "Starting tunnel for service {{.service}}.": "为服务 {{.service}} 启动隧道。", - "Starting worker node {{.name}} in cluster {{.cluster}}": "", "Starts a local Kubernetes cluster": "启动本地 Kubernetes 集群", "Starts a local kubernetes cluster": "启动本地 kubernetes 集群", "Starts a node.": "启动一个节点。", @@ -830,12 +840,16 @@ "The certificate hostname provided appears to be invalid (may be a minikube bug, try 'minikube delete')": "提供的证书主机名似乎无效(可能是 minikube 的 bug,请尝试 'minikube delete')", "The cluster dns domain name used in the Kubernetes cluster": "Kubernetes 集群中使用的集群 dns 域名", "The cluster dns domain name used in the kubernetes cluster": "kubernetes 集群中使用的集群 dns 域名", - "The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use \"minikube node add\" to add nodes to an existing cluster.": "", "The container runtime to be used (docker, crio, containerd)": "需要使用的容器运行时(docker、crio、containerd)", - "The control plane for \"{{.name}}\" is paused!": "", - "The control plane node \"{{.name}}\" does not exist.": "", - "The control plane node is not running (state={{.state}})": "", "The control plane node must be running for this command": "执行此命令需要运行控制平面节点", + "The control-plane node {{.name}} apiserver is not running (will try others): (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is not running: (state={{.state}})": "", + "The control-plane node {{.name}} apiserver is paused": "", + "The control-plane node {{.name}} apiserver is paused (will try others)": "", + "The control-plane node {{.name}} host does not exist": "", + "The control-plane node {{.name}} host does not exist (will try others)": "", + "The control-plane node {{.name}} host is not running (will try others): state={{.state}}": "", + "The control-plane node {{.name}} host is not running: state={{.state}}": "", "The cri socket path to be used": "需要使用的 cri 套接字路径", "The cri socket path to be used.": "需要使用的 cri 套接字路径。", "The docker-env command is incompatible with multi-node clusters. Use the 'registry' add-on: https://minikube.sigs.k8s.io/docs/handbook/registry/": "", @@ -870,7 +884,6 @@ "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\t\t\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "Kubernetes v1.24+ 和 docker 容器运行时的 none 驱动需要 cri-dockerd。\n\n请使用以下说明安装 cri-dockerd:\n\n\thttps://github.com/Mirantis/cri-dockerd", "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\t\t\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "Kubernetes v1.24+ 和 docker 容器运行时的 none 驱动需要 dockerd。\n\n请使用以下说明安装 dockerd:\n\n\thttps://docs.docker.com/engine/install/", "The none driver with Kubernetes v1.24+ requires containernetworking-plugins.\n\n\t\tPlease install containernetworking-plugins using these instructions:\n\n\t\thttps://minikube.sigs.k8s.io/docs/faq/#how-do-i-install-containernetworking-plugins-for-none-driver": "", - "The number of nodes to spin up. Defaults to 1.": "", "The output format. One of 'json', 'table'": "输出的格式。'json' 或者 'table'", "The path on the file system where the docs in markdown need to be saved": "Markdown 文档需要保存的文件系统路径。", "The path on the file system where the error code docs in markdown need to be saved": "错误代码文档(markdown 格式)需要保存在文件系统上的路径", @@ -884,6 +897,7 @@ "The services namespace": "服务命名空间", "The socket_vmnet network is only supported on macOS": "The socket_vmnet network is only supported on macOS", "The time interval for each check that wait performs in seconds": "wait 执行每次检查的时间间隔,以秒为单位。", + "The total number of nodes to spin up. Defaults to 1.": "", "The value passed to --format is invalid": "传递给 --format 的值无效。", "The value passed to --format is invalid: {{.error}}": "传递给 --format 的值无效:{{.error}}。", "The {{.driver_name}} driver should not be used with root privileges.": "不应以根权限使用 {{.driver_name}} 驱动程序。", @@ -897,7 +911,6 @@ "This cluster was created before minikube v1.26.0 and doesn't have cri-docker installed. Please run 'minikube delete' and then start minikube again": "此集群是在 minikube v1.26.0 之前创建的,并且未安装 cri-docker。请运行 'minikube delete' 然后重新启动 minikube", "This control plane is not running! (state={{.state}})": "此控制平面未运行!(状态={{.state}})", "This driver does not yet work on your architecture. Maybe try --driver=none": "", - "This flag is currently unsupported.": "", "This is a known issue with BTRFS storage driver, there is a workaround, please checkout the issue on GitHub": "", "This is unusual - you may want to investigate using \"{{.command}}\"": "这很不寻常 - 您可能想要使用 \"{{.command}}\" 进行调查", "This will keep the existing kubectl context and will create a minikube context.": "这将保留现有 kubectl 上下文并创建 minikube 上下文。", @@ -935,6 +948,7 @@ "Unable to determine a default driver to use. Try specifying --vm-driver, or see https://minikube.sigs.k8s.io/docs/start/": "无法确定要使用的默认驱动。尝试通过 --vm-dirver 指定,或者查阅 https://minikube.sigs.k8s.io/docs/start/", "Unable to enable dashboard": "无法启用仪表盘", "Unable to fetch latest version info": "无法获取最新版本信息", + "Unable to find any control-plane nodes": "", "Unable to find control plane": "无法找到控制平面", "Unable to generate docs": "", "Unable to generate the documentation. Please ensure that the path specified is a directory, exists \u0026 you have permission to write to it.": "", @@ -942,6 +956,14 @@ "Unable to get bootstrapper: {{.error}}": "无法获取引导程序:{{.error}}", "Unable to get command runner": "无法获取命令执行器", "Unable to get control plane status: {{.error}}": "无法获取控制平面状态:{{.error}}", + "Unable to get control-plane node {{.name}} apiserver status (will try others): {{.error}}": "", + "Unable to get control-plane node {{.name}} apiserver status: {{.error}}": "", + "Unable to get control-plane node {{.name}} endpoint (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} endpoint: {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host command runner: {{.err}}": "", + "Unable to get control-plane node {{.name}} host status (will try others): {{.err}}": "", + "Unable to get control-plane node {{.name}} host status: {{.err}}": "", "Unable to get current user": "无法获取当前用户", "Unable to get forwarded endpoint": "无法获取转发的端点", "Unable to get machine status": "获取机器状态失败", @@ -952,7 +974,8 @@ "Unable to load cached images from config file.": "无法从配置文件中加载缓存的镜像。", "Unable to load cached images: {{.error}}": "无法加载缓存的镜像:{{.error}}", "Unable to load config: {{.error}}": "无法加载配置:{{.error}}", - "Unable to load host": "", + "Unable to load control-plane node {{.name}} host (will try others): {{.err}}": "", + "Unable to load control-plane node {{.name}} host: {{.err}}": "", "Unable to load profile: {{.error}}": "", "Unable to parse \"{{.kubernetes_version}}\": {{.error}}": "无法解析“{{.kubernetes_version}}”:{{.error}}", "Unable to parse default Kubernetes version from constants: {{.error}}": "无法从常量中解析默认的 Kubernetes 版本号: {{.error}}", @@ -964,6 +987,7 @@ "Unable to push cached images: {{.error}}": "", "Unable to remove machine directory": "", "Unable to restart cluster, will reset it: {{.error}}": "无法重启集群,将进行重置:{{.error}}", + "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "无法安全地将现有的 Kubernetes v{{.old}} 集群降级为 v{{.new}}", "Unable to start VM. Please investigate and run 'minikube delete' if possible": "无法启动虚拟机。可能的话请检查后执行 'minikube delete'", "Unable to stop VM": "无法停止虚拟机", @@ -1061,6 +1085,7 @@ "You cannot change the CPUs for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the disk size for an existing minikube cluster. Please first delete the cluster.": "", "You cannot change the memory size for an existing minikube cluster. Please first delete the cluster.": "您无法更改现有 minikube 集群的内存大小。请先删除集群。", + "You cannot change the number of nodes for an existing minikube cluster. Please use 'minikube node add' to add nodes to an existing cluster.": "", "You cannot change the static IP of an existing minikube cluster. Please first delete the cluster.": "您不能更改现有 minikube 集群的静态 IP。请先删除集群。", "You cannot enable addons on a cluster without Kubernetes, to enable Kubernetes on your cluster, run: minikube start --kubernetes-version=stable": "", "You have authenticated with a service account that does not have an associated JSON file. The GCP Auth addon requires credentials with a JSON file in order to continue.": "", @@ -1106,6 +1131,7 @@ "error creating clientset": "clientset 创建失败", "error creating urls": "url 创建失败", "error fetching Kubernetes version list from GitHub": "", + "error getting control-plane node": "", "error getting defaults: {{.error}}": "获取默认值时出错: {{.error}}", "error getting primary control plane": "获取主控制平面时出错", "error getting ssh port": "获取 ssh 端口号时出错",