From 103d50dba417b9fd6e84be161d7e49df8cd87ed7 Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Sat, 7 Dec 2024 00:30:25 +0000 Subject: [PATCH 1/2] Fix agent tunnel address on rke2 Fix issue where rke2 tunnel was trying to connect to apiserver port instead of supervisor Signed-off-by: Brad Davidson --- pkg/agent/tunnel/tunnel.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/agent/tunnel/tunnel.go b/pkg/agent/tunnel/tunnel.go index 2fe031ce627f..98094a8d02dc 100644 --- a/pkg/agent/tunnel/tunnel.go +++ b/pkg/agent/tunnel/tunnel.go @@ -505,13 +505,14 @@ func (a *agentTunnel) getProxySyncer(ctx context.Context, wg *sync.WaitGroup, tl return } - newAddresses := sets.New(addresses...) + // Compare list of supervisor addresses before and after syncing apiserver + // endpoints into the proxy to figure out which supervisors we need to connect to + // or disconnect from. Note that the addresses we were passed will not match + // the supervisor addresses if the supervisor and apiserver are on different ports - + // they must be round-tripped through proxy.Update before comparing. curAddresses := sets.New(proxy.SupervisorAddresses()...) - if newAddresses.Equal(curAddresses) { - return - } - proxy.Update(addresses) + newAddresses := sets.New(proxy.SupervisorAddresses()...) // add new servers for address := range newAddresses.Difference(curAddresses) { From 0cbfe943114922fd019e07d07f4932b984569416 Mon Sep 17 00:00:00 2001 From: Brad Davidson Date: Mon, 9 Dec 2024 18:06:28 +0000 Subject: [PATCH 2/2] Add hidden flag/var for supervisor/apiserver listen config Add flags supervisor and apiserver ports and bind address so that we can add an e2e to cover supervisor and apiserver on separate ports, as used by rke2 Signed-off-by: Brad Davidson --- pkg/cli/cmds/server.go | 23 ++++++++- tests/e2e/startup/startup_test.go | 84 +++++++++++++++++++++++++++++++ tests/e2e/testutils.go | 13 +++++ 3 files changed, 119 insertions(+), 1 deletion(-) diff --git a/pkg/cli/cmds/server.go b/pkg/cli/cmds/server.go index 0a99c2f5e622..3c84d1a0fd27 100644 --- a/pkg/cli/cmds/server.go +++ b/pkg/cli/cmds/server.go @@ -188,6 +188,27 @@ var ServerFlags = []cli.Flag{ Value: 6443, Destination: &ServerConfig.HTTPSPort, }, + &cli.IntFlag{ + Name: "supervisor-port", + EnvVar: version.ProgramUpper + "_SUPERVISOR_PORT", + Usage: "(experimental) Supervisor listen port override", + Hidden: true, + Destination: &ServerConfig.SupervisorPort, + }, + &cli.IntFlag{ + Name: "apiserver-port", + EnvVar: version.ProgramUpper + "_APISERVER_PORT", + Usage: "(experimental) apiserver internal listen port override", + Hidden: true, + Destination: &ServerConfig.APIServerPort, + }, + &cli.StringFlag{ + Name: "apiserver-bind-address", + EnvVar: version.ProgramUpper + "_APISERVER_BIND_ADDRESS", + Usage: "(experimental) apiserver internal bind address override", + Hidden: true, + Destination: &ServerConfig.APIServerBindAddress, + }, &cli.StringFlag{ Name: "advertise-address", Usage: "(listener) IPv4/IPv6 address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip)", @@ -195,7 +216,7 @@ var ServerFlags = []cli.Flag{ }, &cli.IntFlag{ Name: "advertise-port", - Usage: "(listener) Port that apiserver uses to advertise to members of the cluster (default: listen-port)", + Usage: "(listener) Port that apiserver uses to advertise to members of the cluster (default: https-listen-port)", Destination: &ServerConfig.AdvertisePort, }, &cli.StringSliceFlag{ diff --git a/tests/e2e/startup/startup_test.go b/tests/e2e/startup/startup_test.go index fd1d71872490..3c7dd13627ea 100644 --- a/tests/e2e/startup/startup_test.go +++ b/tests/e2e/startup/startup_test.go @@ -71,6 +71,12 @@ func KillK3sCluster(nodes []string) error { if _, err := e2e.RunCmdOnNode("k3s-killall.sh", node); err != nil { return err } + if _, err := e2e.RunCmdOnNode("journalctl --flush --sync --rotate --vacuum-size=1", node); err != nil { + return err + } + if _, err := e2e.RunCmdOnNode("rm -rf /etc/rancher/k3s/config.yaml.d", node); err != nil { + return err + } if strings.Contains(node, "server") { if _, err := e2e.RunCmdOnNode("rm -rf /var/lib/rancher/k3s/server/db", node); err != nil { return err @@ -93,6 +99,83 @@ var _ = BeforeSuite(func() { }) var _ = Describe("Various Startup Configurations", Ordered, func() { + Context("Verify dedicated supervisor port", func() { + It("Starts K3s with no issues", func() { + for _, node := range agentNodeNames { + cmd := "mkdir -p /etc/rancher/k3s/config.yaml.d; grep -F server: /etc/rancher/k3s/config.yaml | sed s/6443/9345/ > /tmp/99-server.yaml; sudo mv /tmp/99-server.yaml /etc/rancher/k3s/config.yaml.d/" + res, err := e2e.RunCmdOnNode(cmd, node) + By("checking command results: " + res) + Expect(err).NotTo(HaveOccurred()) + } + supervisorPortYAML := "supervisor-port: 9345\napiserver-port: 6443\napiserver-bind-address: 0.0.0.0\ndisable: traefik\nnode-taint: node-role.kubernetes.io/control-plane:NoExecute" + err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), supervisorPortYAML, "") + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks node and pod status", func() { + fmt.Printf("\nFetching node status\n") + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "360s", "5s").Should(Succeed()) + _, _ = e2e.ParseNodes(kubeConfigFile, true) + + fmt.Printf("\nFetching pods status\n") + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "360s", "5s").Should(Succeed()) + _, _ = e2e.ParsePods(kubeConfigFile, true) + }) + + It("Returns pod metrics", func() { + cmd := "kubectl top pod -A" + Eventually(func() error { + _, err := e2e.RunCommand(cmd) + return err + }, "600s", "5s").Should(Succeed()) + }) + + It("Returns node metrics", func() { + cmd := "kubectl top node" + _, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Runs an interactive command a pod", func() { + cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.36.1 -- uname -a" + _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Collects logs from a pod", func() { + cmd := "kubectl logs -n kube-system -l k8s-app=metrics-server -c metrics-server" + _, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Kills the cluster", func() { + err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) + Expect(err).NotTo(HaveOccurred()) + }) + }) Context("Verify CRI-Dockerd :", func() { It("Starts K3s with no issues", func() { dockerYAML := "docker: true" @@ -311,6 +394,7 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { + AddReportEntry("config", e2e.GetConfig(append(serverNodeNames, agentNodeNames...))) AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) } else { Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index 29edcc7f951d..2d2cb12071b0 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -377,6 +377,19 @@ func TailJournalLogs(lines int, nodes []string) string { return logs.String() } +func GetConfig(nodes []string) string { + config := &strings.Builder{} + for _, node := range nodes { + cmd := "tar -Pc /etc/rancher/k3s/ | tar -vxPO" + if c, err := RunCmdOnNode(cmd, node); err != nil { + fmt.Fprintf(config, "** failed to get config for node %s ***\n%v\n", node, err) + } else { + fmt.Fprintf(config, "** config for node %s ***\n%s\n", node, c) + } + } + return config.String() +} + // GetVagrantLog returns the logs of on vagrant commands that initialize the nodes and provision K3s on each node. // It also attempts to fetch the systemctl logs of K3s on nodes where the k3s.service failed. func GetVagrantLog(cErr error) string {