diff --git a/manifests/rolebindings.yaml b/manifests/rolebindings.yaml index 9a4d1f655662..29c65465cdaf 100644 --- a/manifests/rolebindings.yaml +++ b/manifests/rolebindings.yaml @@ -23,6 +23,14 @@ rules: - nodes verbs: - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch - apiGroups: - "" resources: diff --git a/pkg/agent/flannel/setup.go b/pkg/agent/flannel/setup.go index 617482f9e451..a487bfe83b1a 100644 --- a/pkg/agent/flannel/setup.go +++ b/pkg/agent/flannel/setup.go @@ -9,10 +9,12 @@ import ( goruntime "runtime" "strings" - "github.com/k3s-io/k3s/pkg/agent/util" + agentutil "github.com/k3s-io/k3s/pkg/agent/util" "github.com/k3s-io/k3s/pkg/daemons/config" + "github.com/k3s-io/k3s/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" + authorizationv1 "k8s.io/api/authorization/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -64,9 +66,22 @@ func Prepare(ctx context.Context, nodeConfig *config.Node) error { return createFlannelConf(nodeConfig) } -func Run(ctx context.Context, nodeConfig *config.Node, nodes typedcorev1.NodeInterface) error { +func Run(ctx context.Context, nodeConfig *config.Node) error { logrus.Infof("Starting flannel with backend %s", nodeConfig.FlannelBackend) - if err := waitForPodCIDR(ctx, nodeConfig.AgentConfig.NodeName, nodes); err != nil { + + if err := util.WaitForRBACReady(ctx, nodeConfig.AgentConfig.KubeConfigK3sController, util.DefaultAPIServerReadyTimeout, authorizationv1.ResourceAttributes{ + Verb: "list", + Resource: "nodes", + }, ""); err != nil { + return errors.Wrap(err, "flannel failed to wait for RBAC") + } + + coreClient, err := util.GetClientSet(nodeConfig.AgentConfig.KubeConfigK3sController) + if err != nil { + return err + } + + if err := waitForPodCIDR(ctx, nodeConfig.AgentConfig.NodeName, coreClient.CoreV1().Nodes()); err != nil { return errors.Wrap(err, "flannel failed to wait for PodCIDR assignment") } @@ -75,7 +90,7 @@ func Run(ctx context.Context, nodeConfig *config.Node, nodes typedcorev1.NodeInt return errors.Wrap(err, "failed to check netMode for flannel") } go func() { - err := flannel(ctx, nodeConfig.FlannelIface, nodeConfig.FlannelConfFile, nodeConfig.AgentConfig.KubeConfigKubelet, nodeConfig.FlannelIPv6Masq, netMode) + err := flannel(ctx, nodeConfig.FlannelIface, nodeConfig.FlannelConfFile, nodeConfig.AgentConfig.KubeConfigK3sController, nodeConfig.FlannelIPv6Masq, netMode) if err != nil && !errors.Is(err, context.Canceled) { logrus.Errorf("flannel exited: %v", err) os.Exit(1) @@ -123,7 +138,7 @@ func createCNIConf(dir string, nodeConfig *config.Node) error { if nodeConfig.AgentConfig.FlannelCniConfFile != "" { logrus.Debugf("Using %s as the flannel CNI conf", nodeConfig.AgentConfig.FlannelCniConfFile) - return util.CopyFile(nodeConfig.AgentConfig.FlannelCniConfFile, p, false) + return agentutil.CopyFile(nodeConfig.AgentConfig.FlannelCniConfFile, p, false) } cniConfJSON := cniConf @@ -138,7 +153,7 @@ func createCNIConf(dir string, nodeConfig *config.Node) error { cniConfJSON = strings.ReplaceAll(cniConfJSON, "%SERVICE_CIDR%", nodeConfig.AgentConfig.ServiceCIDR.String()) } - return util.WriteFile(p, cniConfJSON) + return agentutil.WriteFile(p, cniConfJSON) } func createFlannelConf(nodeConfig *config.Node) error { @@ -235,7 +250,7 @@ func createFlannelConf(nodeConfig *config.Node) error { confJSON = strings.ReplaceAll(confJSON, "%backend%", backendConf) logrus.Debugf("The flannel configuration is %s", confJSON) - return util.WriteFile(nodeConfig.FlannelConfFile, confJSON) + return agentutil.WriteFile(nodeConfig.FlannelConfFile, confJSON) } // fundNetMode returns the mode (ipv4, ipv6 or dual-stack) in which flannel is operating diff --git a/pkg/agent/run.go b/pkg/agent/run.go index ff672d58fffb..b535097798c9 100644 --- a/pkg/agent/run.go +++ b/pkg/agent/run.go @@ -177,17 +177,18 @@ func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error { return errors.Wrap(err, "failed to wait for apiserver ready") } - coreClient, err := util.GetClientSet(nodeConfig.AgentConfig.KubeConfigKubelet) + // Use the kubelet kubeconfig to update annotations on the local node + kubeletClient, err := util.GetClientSet(nodeConfig.AgentConfig.KubeConfigKubelet) if err != nil { return err } - if err := configureNode(ctx, nodeConfig, coreClient.CoreV1().Nodes()); err != nil { + if err := configureNode(ctx, nodeConfig, kubeletClient.CoreV1().Nodes()); err != nil { return err } if !nodeConfig.NoFlannel { - if err := flannel.Run(ctx, nodeConfig, coreClient.CoreV1().Nodes()); err != nil { + if err := flannel.Run(ctx, nodeConfig); err != nil { return err } } diff --git a/pkg/cloudprovider/servicelb.go b/pkg/cloudprovider/servicelb.go index 0f2e6d4bae97..3e8a8fa4d618 100644 --- a/pkg/cloudprovider/servicelb.go +++ b/pkg/cloudprovider/servicelb.go @@ -2,12 +2,13 @@ package cloudprovider import ( "context" + "encoding/json" "fmt" "sort" "strconv" "strings" "time" - "encoding/json" + "sigs.k8s.io/yaml" "github.com/k3s-io/k3s/pkg/util" @@ -27,11 +28,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/util/retry" "k8s.io/cloud-provider/names" servicehelper "k8s.io/cloud-provider/service/helpers" - "k8s.io/kubernetes/pkg/features" utilsnet "k8s.io/utils/net" utilsptr "k8s.io/utils/ptr" ) @@ -563,7 +562,7 @@ func (k *k3s) newDaemonSet(svc *core.Service) (*apps.DaemonSet, error) { Name: "DEST_IPS", ValueFrom: &core.EnvVarSource{ FieldRef: &core.ObjectFieldSelector{ - FieldPath: getHostIPsFieldPath(), + FieldPath: "status.hostIPs", }, }, }, @@ -710,8 +709,8 @@ func (k *k3s) getPriorityClassName(svc *core.Service) string { return k.LBDefaultPriorityClassName } -// getTolerations retrieves the tolerations from a service's annotations. -// It parses the tolerations from a JSON or YAML string stored in the annotations. +// getTolerations retrieves the tolerations from a service's annotations. +// It parses the tolerations from a JSON or YAML string stored in the annotations. func (k *k3s) getTolerations(svc *core.Service) ([]core.Toleration, error) { tolerationsStr, ok := svc.Annotations[tolerationsAnnotation] if !ok { @@ -778,10 +777,3 @@ func ingressToString(ingresses []core.LoadBalancerIngress) []string { } return parts } - -func getHostIPsFieldPath() string { - if utilfeature.DefaultFeatureGate.Enabled(features.PodHostIPs) { - return "status.hostIPs" - } - return "status.hostIP" -} diff --git a/pkg/daemons/agent/agent_linux.go b/pkg/daemons/agent/agent_linux.go index ca7f94a529d0..f77656caafc2 100644 --- a/pkg/daemons/agent/agent_linux.go +++ b/pkg/daemons/agent/agent_linux.go @@ -141,8 +141,6 @@ func kubeletArgs(cfg *config.Agent) map[string]string { argsMap["node-ip"] = cfg.NodeIP } } else { - // Cluster is using the embedded CCM, we know that the feature-gate will be enabled there as well. - argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "CloudDualStackNodeIPs=true") if nodeIPs := util.JoinIPs(cfg.NodeIPs); nodeIPs != "" { argsMap["node-ip"] = util.JoinIPs(cfg.NodeIPs) } diff --git a/pkg/daemons/agent/agent_windows.go b/pkg/daemons/agent/agent_windows.go index 7bbf468eb6dd..03d548ff4e59 100644 --- a/pkg/daemons/agent/agent_windows.go +++ b/pkg/daemons/agent/agent_windows.go @@ -106,8 +106,6 @@ func kubeletArgs(cfg *config.Agent) map[string]string { argsMap["node-ip"] = cfg.NodeIP } } else { - // Cluster is using the embedded CCM, we know that the feature-gate will be enabled there as well. - argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "CloudDualStackNodeIPs=true") if nodeIPs := util.JoinIPs(cfg.NodeIPs); nodeIPs != "" { argsMap["node-ip"] = util.JoinIPs(cfg.NodeIPs) } diff --git a/pkg/daemons/control/deps/deps.go b/pkg/daemons/control/deps/deps.go index e623157f6d81..ecd25347c60e 100644 --- a/pkg/daemons/control/deps/deps.go +++ b/pkg/daemons/control/deps/deps.go @@ -29,8 +29,8 @@ import ( "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/apis/apiserver" apiserverconfigv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" + apiserverv1beta1 "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/client-go/util/keyutil" ) @@ -785,19 +785,19 @@ func genEncryptionConfigAndState(controlConfig *config.Control) error { } func genEgressSelectorConfig(controlConfig *config.Control) error { - var clusterConn apiserver.Connection + var clusterConn apiserverv1beta1.Connection if controlConfig.EgressSelectorMode == config.EgressSelectorModeDisabled { - clusterConn = apiserver.Connection{ - ProxyProtocol: apiserver.ProtocolDirect, + clusterConn = apiserverv1beta1.Connection{ + ProxyProtocol: apiserverv1beta1.ProtocolDirect, } } else { - clusterConn = apiserver.Connection{ - ProxyProtocol: apiserver.ProtocolHTTPConnect, - Transport: &apiserver.Transport{ - TCP: &apiserver.TCPTransport{ + clusterConn = apiserverv1beta1.Connection{ + ProxyProtocol: apiserverv1beta1.ProtocolHTTPConnect, + Transport: &apiserverv1beta1.Transport{ + TCP: &apiserverv1beta1.TCPTransport{ URL: fmt.Sprintf("https://%s:%d", controlConfig.BindAddressOrLoopback(false, true), controlConfig.SupervisorPort), - TLSConfig: &apiserver.TLSConfig{ + TLSConfig: &apiserverv1beta1.TLSConfig{ CABundle: controlConfig.Runtime.ServerCA, ClientKey: controlConfig.Runtime.ClientKubeAPIKey, ClientCert: controlConfig.Runtime.ClientKubeAPICert, @@ -807,12 +807,12 @@ func genEgressSelectorConfig(controlConfig *config.Control) error { } } - egressConfig := apiserver.EgressSelectorConfiguration{ + egressConfig := apiserverv1beta1.EgressSelectorConfiguration{ TypeMeta: metav1.TypeMeta{ Kind: "EgressSelectorConfiguration", APIVersion: "apiserver.k8s.io/v1beta1", }, - EgressSelections: []apiserver.EgressSelection{ + EgressSelections: []apiserverv1beta1.EgressSelection{ { Name: "cluster", Connection: clusterConn, diff --git a/pkg/daemons/control/server.go b/pkg/daemons/control/server.go index 993bb2cfc591..54429dd2b010 100644 --- a/pkg/daemons/control/server.go +++ b/pkg/daemons/control/server.go @@ -19,7 +19,14 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" authorizationv1 "k8s.io/api/authorization/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + toolswatch "k8s.io/client-go/tools/watch" + cloudproviderapi "k8s.io/cloud-provider/api" logsapi "k8s.io/component-base/logs/api/v1" "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/registry/core/node" @@ -157,8 +164,36 @@ func scheduler(ctx context.Context, cfg *config.Control) error { args := config.GetArgs(argsMap, cfg.ExtraSchedulerAPIArgs) + schedulerNodeReady := make(chan struct{}) + + go func() { + defer close(schedulerNodeReady) + + apiReadyLoop: + for { + select { + case <-ctx.Done(): + return + case <-cfg.Runtime.APIServerReady: + break apiReadyLoop + case <-time.After(30 * time.Second): + logrus.Infof("Waiting for API server to become available to start kube-scheduler") + } + } + + // If we're running the embedded cloud controller, wait for it to untaint at least one + // node (usually, the local node) before starting the scheduler to ensure that it + // finds a node that is ready to run pods during its initial scheduling loop. + if !cfg.DisableCCM { + logrus.Infof("Waiting for untainted node") + if err := waitForUntaintedNode(ctx, runtime.KubeConfigScheduler); err != nil { + logrus.Fatalf("failed to wait for untained node: %v", err) + } + } + }() + logrus.Infof("Running kube-scheduler %s", config.ArgString(args)) - return executor.Scheduler(ctx, cfg.Runtime.APIServerReady, args) + return executor.Scheduler(ctx, schedulerNodeReady, args) } func apiServer(ctx context.Context, cfg *config.Control) error { @@ -323,7 +358,6 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control) error { "authentication-kubeconfig": runtime.KubeConfigCloudController, "node-status-update-frequency": "1m0s", "bind-address": cfg.Loopback(false), - "feature-gates": "CloudDualStackNodeIPs=true", } if cfg.NoLeaderElect { argsMap["leader-elect"] = "false" @@ -359,7 +393,7 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control) error { case <-cfg.Runtime.APIServerReady: break apiReadyLoop case <-time.After(30 * time.Second): - logrus.Infof("Waiting for API server to become available") + logrus.Infof("Waiting for API server to become available to start cloud-controller-manager") } } @@ -449,3 +483,50 @@ func promise(f func() error) <-chan error { }() return c } + +// waitForUntaintedNode watches nodes, waiting to find one not tainted as +// uninitialized by the external cloud provider. +func waitForUntaintedNode(ctx context.Context, kubeConfig string) error { + + restConfig, err := util.GetRESTConfig(kubeConfig) + if err != nil { + return err + } + coreClient, err := typedcorev1.NewForConfig(restConfig) + if err != nil { + return err + } + nodes := coreClient.Nodes() + + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (object k8sruntime.Object, e error) { + return nodes.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { + return nodes.Watch(ctx, options) + }, + } + + condition := func(ev watch.Event) (bool, error) { + if node, ok := ev.Object.(*v1.Node); ok { + return getCloudTaint(node.Spec.Taints) == nil, nil + } + return false, errors.New("event object not of type v1.Node") + } + + if _, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition); err != nil { + return errors.Wrap(err, "failed to wait for untainted node") + } + return nil +} + +// getCloudTaint returns the external cloud provider taint, if present. +// Cribbed from k8s.io/cloud-provider/controllers/node/node_controller.go +func getCloudTaint(taints []v1.Taint) *v1.Taint { + for _, taint := range taints { + if taint.Key == cloudproviderapi.TaintExternalCloudProvider { + return &taint + } + } + return nil +} diff --git a/pkg/daemons/executor/embed.go b/pkg/daemons/executor/embed.go index 7e69f956e84b..ab52ee2f7ac4 100644 --- a/pkg/daemons/executor/embed.go +++ b/pkg/daemons/executor/embed.go @@ -8,7 +8,6 @@ import ( "flag" "net/http" "os" - "runtime" "runtime/debug" "strconv" "time" @@ -21,16 +20,8 @@ import ( "github.com/k3s-io/k3s/pkg/version" "github.com/pkg/errors" "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/authentication/authenticator" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - toolswatch "k8s.io/client-go/tools/watch" cloudprovider "k8s.io/cloud-provider" - cloudproviderapi "k8s.io/cloud-provider/api" ccmapp "k8s.io/cloud-provider/app" cloudcontrollerconfig "k8s.io/cloud-provider/app/config" "k8s.io/cloud-provider/names" @@ -152,23 +143,11 @@ func (*Embedded) APIServer(ctx context.Context, etcdReady <-chan struct{}, args } func (e *Embedded) Scheduler(ctx context.Context, apiReady <-chan struct{}, args []string) error { - command := sapp.NewSchedulerCommand() + command := sapp.NewSchedulerCommand(ctx.Done()) command.SetArgs(args) go func() { <-apiReady - // wait for Bootstrap to set nodeConfig - for e.nodeConfig == nil { - runtime.Gosched() - } - // If we're running the embedded cloud controller, wait for it to untaint at least one - // node (usually, the local node) before starting the scheduler to ensure that it - // finds a node that is ready to run pods during its initial scheduling loop. - if !e.nodeConfig.AgentConfig.DisableCCM { - if err := waitForUntaintedNode(ctx, e.nodeConfig.AgentConfig.KubeConfigKubelet); err != nil { - logrus.Fatalf("failed to wait for untained node: %v", err) - } - } defer func() { if err := recover(); err != nil { logrus.WithField("stack", string(debug.Stack())).Fatalf("scheduler panic: %v", err) @@ -264,49 +243,3 @@ func (e *Embedded) Containerd(ctx context.Context, cfg *daemonconfig.Node) error func (e *Embedded) Docker(ctx context.Context, cfg *daemonconfig.Node) error { return cridockerd.Run(ctx, cfg) } - -// waitForUntaintedNode watches nodes, waiting to find one not tainted as -// uninitialized by the external cloud provider. -func waitForUntaintedNode(ctx context.Context, kubeConfig string) error { - restConfig, err := util.GetRESTConfig(kubeConfig) - if err != nil { - return err - } - coreClient, err := typedcorev1.NewForConfig(restConfig) - if err != nil { - return err - } - nodes := coreClient.Nodes() - - lw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (object k8sruntime.Object, e error) { - return nodes.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { - return nodes.Watch(ctx, options) - }, - } - - condition := func(ev watch.Event) (bool, error) { - if node, ok := ev.Object.(*v1.Node); ok { - return getCloudTaint(node.Spec.Taints) == nil, nil - } - return false, errors.New("event object not of type v1.Node") - } - - if _, err := toolswatch.UntilWithSync(ctx, lw, &v1.Node{}, nil, condition); err != nil { - return errors.Wrap(err, "failed to wait for untainted node") - } - return nil -} - -// getCloudTaint returns the external cloud provider taint, if present. -// Cribbed from k8s.io/cloud-provider/controllers/node/node_controller.go -func getCloudTaint(taints []v1.Taint) *v1.Taint { - for _, taint := range taints { - if taint.Key == cloudproviderapi.TaintExternalCloudProvider { - return &taint - } - } - return nil -} diff --git a/pkg/deploy/zz_generated_bindata.go b/pkg/deploy/zz_generated_bindata.go index f518d6034625..e2c32387b1d3 100644 --- a/pkg/deploy/zz_generated_bindata.go +++ b/pkg/deploy/zz_generated_bindata.go @@ -292,7 +292,7 @@ func metricsServerResourceReaderYaml() (*asset, error) { return a, nil } -var _rolebindingsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x94\x31\x6f\xe3\x30\x0c\x85\x77\xfd\x0a\x21\xbb\x72\x38\xdc\x72\xf0\xd8\x0e\xdd\x03\xb4\xbb\x2c\xb1\x09\x6b\x59\x14\x48\x39\x41\xfb\xeb\x0b\xc7\x4e\xd2\xc4\x76\xe0\xb4\xe9\x66\x0b\xe2\xfb\x48\xbe\x07\xd9\x84\x2f\xc0\x82\x14\x0b\xcd\xa5\x75\x4b\xdb\xe4\x0d\x31\x7e\xd8\x8c\x14\x97\xd5\x7f\x59\x22\xfd\xd9\xfe\x55\x15\x46\x5f\xe8\xc7\xd0\x48\x06\x5e\x51\x80\x07\x8c\x1e\xe3\x5a\xd5\x90\xad\xb7\xd9\x16\x4a\xeb\x68\x6b\x28\x74\xd5\x94\x60\x6c\x42\x01\xde\x02\x9b\xf6\x37\x40\x36\xd6\xd7\x18\x15\x53\x80\x15\xbc\xb6\xb7\x6d\xc2\x27\xa6\x26\x5d\x21\x2b\xad\x07\xe0\x23\x47\xde\x25\x43\x5d\x1c\xf5\x13\xf6\x0c\x69\xca\x37\x70\x59\x0a\x65\x6e\x82\x3c\x0b\xf0\xc4\x14\x4a\x19\x63\xd4\xf7\xb7\x35\xb2\xa6\x43\xfb\xff\xc4\x38\x8a\x99\x29\x04\x60\xc5\x4d\x80\xb3\xc6\xa5\xad\x30\x7a\xb1\x50\x5a\x33\x08\x35\xec\xa0\x3f\x8b\xe4\x41\x94\xd6\x5b\xe0\xb2\x3f\x5a\x43\x9e\x59\x6b\x6b\x90\x64\xdd\xa5\x40\x40\xc9\xfb\x8f\x9d\xcd\x6e\x33\xa2\x15\x21\xef\x88\x2b\x8c\xeb\x7e\xde\x31\xf1\xee\x4e\xa2\x80\x0e\xf7\x04\xa3\x5d\xb7\x0c\x87\x9e\x6f\x45\x8e\x10\x20\xfa\x44\x18\x73\xa7\x9d\xc8\x4f\x69\xb6\x0b\x39\x69\xff\xd0\xc5\xe9\xcc\x4f\x98\x79\xff\xb0\x9f\x03\x4e\x49\x6f\x67\x9c\xc7\xb8\x48\xfb\x75\xc0\xfd\x63\xff\x35\x07\xa6\x4d\xf0\x64\xe4\x07\x49\x1b\xc6\x60\x76\xa8\x7e\xcd\xf8\x91\x71\xee\x67\xfa\x50\xfc\xdc\xf0\xae\x72\x8f\x18\x3a\x79\x78\x1d\xe6\xb5\xf1\x19\x00\x00\xff\xff\x20\xa2\xda\xb0\x09\x06\x00\x00") +var _rolebindingsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x94\x41\x8f\xda\x30\x10\x85\xef\xfe\x15\x16\x77\x83\xaa\x5e\xaa\x1c\xdb\x43\xef\x48\xed\xdd\xb1\xa7\x30\x8d\x63\x5b\x33\x63\x50\xfb\xeb\xab\x90\x40\x17\x92\xb0\x64\x97\x3d\x25\xb1\xec\xf7\x8d\x67\xde\x8b\xcd\xf8\x13\x88\x31\xc5\x4a\x53\x6d\xdd\xda\x16\xd9\x27\xc2\xbf\x56\x30\xc5\x75\xf3\x85\xd7\x98\x36\x87\x4f\xaa\xc1\xe8\x2b\xfd\x2d\x14\x16\xa0\x6d\x0a\xf0\x15\xa3\xc7\xb8\x53\x2d\x88\xf5\x56\x6c\xa5\xb4\x8e\xb6\x85\x4a\x37\xa5\x06\x63\x33\x32\xd0\x01\xc8\x74\x9f\x01\xc4\x58\xdf\x62\x54\x94\x02\x6c\xe1\x57\xb7\xdb\x66\xfc\x4e\xa9\xe4\x3b\x64\xa5\xf5\x08\x7c\xe1\xf0\x1f\x16\x68\xab\x8b\x7e\xc6\x81\xc1\xa5\xfe\x0d\x4e\xb8\x52\x66\x11\xe4\x07\x03\xcd\xdc\x42\x29\x63\x8c\x7a\x7b\xb7\x26\xda\x74\x2e\xff\x33\x1b\x97\xa2\x50\x0a\x01\x48\x51\x09\x70\x55\x38\x77\x27\x8c\x5e\xad\x94\xd6\x04\x9c\x0a\x39\x18\xd6\x62\xf2\xc0\x4a\xeb\x03\x50\x3d\x2c\xed\x40\x4e\xcf\x80\xdc\xbf\x1c\xad\xb8\xfd\x02\xb9\x0d\x8b\x95\x72\xa3\x9a\x17\x88\xd8\x16\x38\x5b\x77\x5b\xd8\xab\x05\x45\x90\x63\xa2\x06\xe3\x6e\xe8\xe3\x94\x78\xbf\x27\xa7\x80\x0e\x4f\x04\xa3\x5d\xdf\x64\x87\x9e\x96\x22\x27\x08\x10\x7d\x4e\x18\xa5\xd7\xce\xc9\xcf\x69\x9e\x1b\xdd\x6b\xbf\xd3\x1d\xf3\x59\x9a\x31\xc9\xf3\x43\x74\x0d\xf8\x9f\xa0\xee\x8e\x8f\x31\x6e\x52\x74\x1f\xf0\xfc\x38\xbd\xf4\x81\xe9\xac\x3c\x1b\xa5\x91\xd3\xc6\x36\x78\xd8\x54\x1f\x36\xf8\x89\xeb\x3c\x6f\xe8\x63\xf1\xeb\x81\xf7\x27\x4f\x88\xf1\x24\xcf\x7f\x9d\xc7\xca\xf8\x17\x00\x00\xff\xff\x40\xa6\x57\x0f\x61\x06\x00\x00") func rolebindingsYamlBytes() ([]byte, error) { return bindataRead( diff --git a/tests/integration/kubeflags/kubeflags_test.go b/tests/integration/kubeflags/kubeflags_test.go index ccbf7eac6103..90f4baabab0d 100644 --- a/tests/integration/kubeflags/kubeflags_test.go +++ b/tests/integration/kubeflags/kubeflags_test.go @@ -153,7 +153,7 @@ var _ = Describe("create a new cluster with kube-* flags", Ordered, func() { "--authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 "+ "--cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 "+ "--configure-cloud-routes=false --controllers=*,-route,-cloud-node,-cloud-node-lifecycle "+ - "--feature-gates=CloudDualStackNodeIPs=true --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig "+ + "--kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig "+ "--leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false --secure-port=0") if err != nil { return err