From bc4001db679f0d2ea402c1de46ddb751d17f1a3a Mon Sep 17 00:00:00 2001 From: vadasambar Date: Wed, 27 Sep 2023 11:48:33 +0530 Subject: [PATCH] test: add more unit tests - add tests for kwok helpers - fix and update kwok config tests - fix a bug where gpu label was getting assigned to `kwokConfig.status.key` - expose `loadConfigFile` -> `LoadConfigFile` - throw error if templates configmap does not have `templates` key (value of which is node templates) - finish test for `GPULabel()` - add tests for `NodeGroupForNode()` - expose `loadNodeTemplatesFromConfigMap` -> `LoadNodeTemplatesFromConfigMap` - fix `KwokCloudProvider`'s kwok config was empty (this caused `GPULabel()` to return empty) Signed-off-by: vadasambar --- .../cloudprovider/kwok/kwok_config.go | 4 +- .../cloudprovider/kwok/kwok_config_test.go | 22 +- .../cloudprovider/kwok/kwok_helpers.go | 6 +- .../cloudprovider/kwok/kwok_helpers_test.go | 822 +++++++++++++++--- .../cloudprovider/kwok/kwok_provider.go | 5 +- .../cloudprovider/kwok/kwok_provider_test.go | 102 ++- 6 files changed, 790 insertions(+), 171 deletions(-) diff --git a/cluster-autoscaler/cloudprovider/kwok/kwok_config.go b/cluster-autoscaler/cloudprovider/kwok/kwok_config.go index bc786c7480a3..59be17b97a5c 100644 --- a/cluster-autoscaler/cloudprovider/kwok/kwok_config.go +++ b/cluster-autoscaler/cloudprovider/kwok/kwok_config.go @@ -79,7 +79,7 @@ func getConfigMapName() string { return configMapName } -func loadConfigFile(kubeClient kubeclient.Interface) (*KwokProviderConfig, error) { +func LoadConfigFile(kubeClient kubeclient.Interface) (*KwokProviderConfig, error) { configMapName := getConfigMapName() currentNamespace := getCurrentNamespace() @@ -146,8 +146,8 @@ func loadConfigFile(kubeClient kubeclient.Interface) (*KwokProviderConfig, error } else { if kwokConfig.Nodes.GPUConfig.GPULabelKey != "" && kwokConfig.Nodes.GPUConfig.AvailableGPUTypes != nil { - kwokConfig.status.key = kwokConfig.Nodes.GPUConfig.GPULabelKey kwokConfig.status.availableGPUTypes = kwokConfig.Nodes.GPUConfig.AvailableGPUTypes + kwokConfig.status.gpuLabel = kwokConfig.Nodes.GPUConfig.GPULabelKey } else { return nil, errors.New("nodes.gpuConfig.gpuLabelKey or file.nodes.gpuConfig.availableGPUTypes is empty") } diff --git a/cluster-autoscaler/cloudprovider/kwok/kwok_config_test.go b/cluster-autoscaler/cloudprovider/kwok/kwok_config_test.go index 75e19e06c54a..7a92d95314f6 100644 --- a/cluster-autoscaler/cloudprovider/kwok/kwok_config_test.go +++ b/cluster-autoscaler/cloudprovider/kwok/kwok_config_test.go @@ -47,7 +47,7 @@ nodegroups: # node3: m5.xlarge # nodegroup1: [node1,node3] # nodegroup2: [node2] - fromNodeLabelKey: "node.kubernetes.io/instance-type" + fromNodeLabelKey: "kubernetes.io/hostname" # you can either specify fromNodeLabelKey OR fromNodeAnnotation # (both are not allowed) # fromNodeAnnotation: "eks.amazonaws.com/nodegroup" @@ -59,7 +59,7 @@ nodes: "nvidia-tesla-k80": {} "nvidia-tesla-p100": {} configmap: - name: kwok-provider-config + name: kwok-provider-templates kwok: {} ` @@ -86,7 +86,7 @@ nodes: "nvidia-tesla-k80": {} "nvidia-tesla-p100": {} configmap: - name: without-kwok + name: kwok-provider-templates ` const withStaticKwokRelease = ` @@ -114,7 +114,7 @@ nodes: kwok: release: "v0.2.1" configmap: - name: with-static-kwok-release + name: kwok-provider-templates ` const skipKwokInstall = ` @@ -140,7 +140,7 @@ nodes: "nvidia-tesla-k80": {} "nvidia-tesla-p100": {} configmap: - name: skip-kwok-install + name: kwok-provider-templates kwok: skipInstall: true ` @@ -168,31 +168,35 @@ func TestLoadConfigFile(t *testing.T) { os.Setenv("POD_NAMESPACE", "kube-system") - kwokConfig, err := loadConfigFile(fakeClient) + kwokConfig, err := LoadConfigFile(fakeClient) assert.Nil(t, err) assert.NotNil(t, kwokConfig) assert.NotNil(t, kwokConfig.status) + assert.NotEmpty(t, kwokConfig.status.gpuLabel) assert.NotEmpty(t, kwokConfig.status.kwokRelease) os.Setenv("KWOK_CONFIG_MAP_NAME", "without-kwok") - kwokConfig, err = loadConfigFile(fakeClient) + kwokConfig, err = LoadConfigFile(fakeClient) assert.Nil(t, err) assert.NotNil(t, kwokConfig) assert.NotNil(t, kwokConfig.status) + assert.NotEmpty(t, kwokConfig.status.gpuLabel) assert.NotEmpty(t, kwokConfig.status.kwokRelease) os.Setenv("KWOK_CONFIG_MAP_NAME", "with-static-kwok-release") - kwokConfig, err = loadConfigFile(fakeClient) + kwokConfig, err = LoadConfigFile(fakeClient) assert.Nil(t, err) assert.NotNil(t, kwokConfig) assert.NotNil(t, kwokConfig.status) + assert.NotEmpty(t, kwokConfig.status.gpuLabel) assert.NotEmpty(t, kwokConfig.status.kwokRelease) assert.Equal(t, kwokConfig.status.kwokRelease, "v0.2.1") os.Setenv("KWOK_CONFIG_MAP_NAME", "skip-kwok-install") - kwokConfig, err = loadConfigFile(fakeClient) + kwokConfig, err = LoadConfigFile(fakeClient) assert.Nil(t, err) assert.NotNil(t, kwokConfig) assert.NotNil(t, kwokConfig.status) + assert.NotEmpty(t, kwokConfig.status.gpuLabel) assert.Empty(t, kwokConfig.status.kwokRelease) } diff --git a/cluster-autoscaler/cloudprovider/kwok/kwok_helpers.go b/cluster-autoscaler/cloudprovider/kwok/kwok_helpers.go index 47770dd5f71b..6b1210beb414 100644 --- a/cluster-autoscaler/cloudprovider/kwok/kwok_helpers.go +++ b/cluster-autoscaler/cloudprovider/kwok/kwok_helpers.go @@ -57,7 +57,7 @@ func loadNodeTemplatesFromCluster(kc *KwokProviderConfig, } // check https://github.com/vadafoss/node-templates for more info on the parsing logic -func loadNodeTemplatesFromConfigMap(kc *KwokProviderConfig, +func LoadNodeTemplatesFromConfigMap(kc *KwokProviderConfig, kubeClient kubernetes.Interface, lister kube_util.NodeLister) ([]*apiv1.Node, error) { currentNamespace := getCurrentNamespace() @@ -68,6 +68,10 @@ func loadNodeTemplatesFromConfigMap(kc *KwokProviderConfig, return nil, fmt.Errorf("failed to get configmap '%s': %v", kc.ConfigMap.Name, err) } + if c.Data[templatesKey] == "" { + return nil, fmt.Errorf("configmap '%s' doesn't have 'templates' key", kc.ConfigMap.Name) + } + scheme := runtime.NewScheme() clientscheme.AddToScheme(scheme) diff --git a/cluster-autoscaler/cloudprovider/kwok/kwok_helpers_test.go b/cluster-autoscaler/cloudprovider/kwok/kwok_helpers_test.go index 057650f34039..843fab057888 100644 --- a/cluster-autoscaler/cloudprovider/kwok/kwok_helpers_test.go +++ b/cluster-autoscaler/cloudprovider/kwok/kwok_helpers_test.go @@ -20,135 +20,628 @@ import ( "os" "testing" + "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" ) -const wrongIndentation = ` +const multipleNodes = ` +apiVersion: v1 +kind: Node +metadata: + annotations: + kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2023-05-31T04:39:16Z" + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + kubernetes.io/arch: amd64 + kubernetes.io/hostname: kind-control-plane + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + name: kind-control-plane + resourceVersion: "603" + uid: 86716ec7-3071-4091-b055-77b4361d1dca +spec: + podCIDR: 10.244.0.0/24 + podCIDRs: + - 10.244.0.0/24 + providerID: kind://docker/kind/kind-control-plane + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane +status: + addresses: + - address: 172.18.0.2 + type: InternalIP + - address: kind-control-plane + type: Hostname + allocatable: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + capacity: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2023-05-31T04:40:29Z" + lastTransitionTime: "2023-05-31T04:39:13Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2023-05-31T04:40:29Z" + lastTransitionTime: "2023-05-31T04:39:13Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2023-05-31T04:40:29Z" + lastTransitionTime: "2023-05-31T04:39:13Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2023-05-31T04:40:29Z" + lastTransitionTime: "2023-05-31T04:39:46Z" + message: kubelet is posting ready status + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - registry.k8s.io/etcd:3.5.6-0 + sizeBytes: 102542580 + - names: + - docker.io/library/import-2023-03-30@sha256:ba097b515c8c40689733c0f19de377e9bf8995964b7d7150c2045f3dfd166657 + - registry.k8s.io/kube-apiserver:v1.26.3 + sizeBytes: 80392681 + - names: + - docker.io/library/import-2023-03-30@sha256:8dbb345de79d1c44f59a7895da702a5f71997ae72aea056609445c397b0c10dc + - registry.k8s.io/kube-controller-manager:v1.26.3 + sizeBytes: 68538487 + - names: + - docker.io/library/import-2023-03-30@sha256:44db4d50a5f9c8efbac0d37ea974d1c0419a5928f90748d3d491a041a00c20b5 + - registry.k8s.io/kube-proxy:v1.26.3 + sizeBytes: 67217404 + - names: + - docker.io/library/import-2023-03-30@sha256:3dd2337f70af979c7362b5e52bbdfcb3a5fd39c78d94d02145150cd2db86ba39 + - registry.k8s.io/kube-scheduler:v1.26.3 + sizeBytes: 57761399 + - names: + - docker.io/kindest/kindnetd:v20230330-48f316cd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + - docker.io/kindest/kindnetd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + sizeBytes: 27726335 + - names: + - docker.io/kindest/local-path-provisioner:v0.0.23-kind.0@sha256:f2d0a02831ff3a03cf51343226670d5060623b43a4cfc4808bd0875b2c4b9501 + - docker.io/kindest/local-path-provisioner@sha256:f2d0a02831ff3a03cf51343226670d5060623b43a4cfc4808bd0875b2c4b9501 + sizeBytes: 18664669 + - names: + - registry.k8s.io/coredns/coredns:v1.9.3 + sizeBytes: 14837849 + - names: + - docker.io/kindest/local-path-helper:v20230330-48f316cd@sha256:135203f2441f916fb13dad1561d27f60a6f11f50ec288b01a7d2ee9947c36270 + sizeBytes: 3052037 + - names: + - registry.k8s.io/pause:3.7 + sizeBytes: 311278 + nodeInfo: + architecture: amd64 + bootID: 2d71b318-5d07-4de2-9e61-2da28cf5bbf0 + containerRuntimeVersion: containerd://1.6.19-46-g941215f49 + kernelVersion: 5.15.0-72-generic + kubeProxyVersion: v1.26.3 + kubeletVersion: v1.26.3 + machineID: 96f8c8b8c8ae4600a3654341f207586e + operatingSystem: linux + osImage: Ubuntu + systemUUID: 111aa932-7f99-4bef-aaf7-36aa7fb9b012 +--- + +apiVersion: v1 +kind: Node +metadata: + annotations: + kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2023-05-31T04:39:57Z" + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + kubernetes.io/arch: amd64 + kubernetes.io/hostname: kind-worker + kubernetes.io/os: linux + name: kind-worker + resourceVersion: "577" + uid: 2ac0eb71-e5cf-4708-bbbf-476e8f19842b +spec: + podCIDR: 10.244.2.0/24 + podCIDRs: + - 10.244.2.0/24 + providerID: kind://docker/kind/kind-worker +status: + addresses: + - address: 172.18.0.3 + type: InternalIP + - address: kind-worker + type: Hostname + allocatable: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + capacity: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:40:05Z" + message: kubelet is posting ready status + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - registry.k8s.io/etcd:3.5.6-0 + sizeBytes: 102542580 + - names: + - docker.io/library/import-2023-03-30@sha256:ba097b515c8c40689733c0f19de377e9bf8995964b7d7150c2045f3dfd166657 + - registry.k8s.io/kube-apiserver:v1.26.3 + sizeBytes: 80392681 + - names: + - docker.io/library/import-2023-03-30@sha256:8dbb345de79d1c44f59a7895da702a5f71997ae72aea056609445c397b0c10dc + - registry.k8s.io/kube-controller-manager:v1.26.3 + sizeBytes: 68538487 + - names: + - docker.io/library/import-2023-03-30@sha256:44db4d50a5f9c8efbac0d37ea974d1c0419a5928f90748d3d491a041a00c20b5 + - registry.k8s.io/kube-proxy:v1.26.3 + sizeBytes: 67217404 + - names: + - docker.io/library/import-2023-03-30@sha256:3dd2337f70af979c7362b5e52bbdfcb3a5fd39c78d94d02145150cd2db86ba39 + - registry.k8s.io/kube-scheduler:v1.26.3 + sizeBytes: 57761399 + - names: + - docker.io/kindest/kindnetd:v20230330-48f316cd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + - docker.io/kindest/kindnetd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + sizeBytes: 27726335 + - names: + - docker.io/kindest/local-path-provisioner:v0.0.23-kind.0@sha256:f2d0a02831ff3a03cf51343226670d5060623b43a4cfc4808bd0875b2c4b9501 + sizeBytes: 18664669 + - names: + - registry.k8s.io/coredns/coredns:v1.9.3 + sizeBytes: 14837849 + - names: + - docker.io/kindest/local-path-helper:v20230330-48f316cd@sha256:135203f2441f916fb13dad1561d27f60a6f11f50ec288b01a7d2ee9947c36270 + sizeBytes: 3052037 + - names: + - registry.k8s.io/pause:3.7 + sizeBytes: 311278 + nodeInfo: + architecture: amd64 + bootID: 2d71b318-5d07-4de2-9e61-2da28cf5bbf0 + containerRuntimeVersion: containerd://1.6.19-46-g941215f49 + kernelVersion: 5.15.0-72-generic + kubeProxyVersion: v1.26.3 + kubeletVersion: v1.26.3 + machineID: a98a13ff474d476294935341f1ba9816 + operatingSystem: linux + osImage: Ubuntu + systemUUID: 5f3c1af8-a385-4776-85e4-73d7f4252b44 +` + +const nodeList = ` apiVersion: v1 items: - apiVersion: v1 - # everything below should be in-line with apiVersion above - kind: Node - metadata: - annotations: - kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock - node.alpha.kubernetes.io/ttl: "0" - volumes.kubernetes.io/controller-managed-attach-detach: "true" - creationTimestamp: "2023-05-31T04:39:57Z" - labels: - beta.kubernetes.io/arch: amd64 - beta.kubernetes.io/os: linux - kubernetes.io/arch: amd64 - kubernetes.io/hostname: kind-worker - kubernetes.io/os: linux - name: kind-worker - resourceVersion: "577" - uid: 2ac0eb71-e5cf-4708-bbbf-476e8f19842b - spec: - podCIDR: 10.244.2.0/24 - podCIDRs: - - 10.244.2.0/24 - providerID: kind://docker/kind/kind-worker - status: - addresses: - - address: 172.18.0.3 - type: InternalIP - - address: kind-worker - type: Hostname - allocatable: - cpu: "12" - ephemeral-storage: 959786032Ki - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 32781516Ki - pods: "110" - capacity: - cpu: "12" - ephemeral-storage: 959786032Ki - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 32781516Ki - pods: "110" - conditions: - - lastHeartbeatTime: "2023-05-31T04:40:17Z" - lastTransitionTime: "2023-05-31T04:39:57Z" - message: kubelet has sufficient memory available - reason: KubeletHasSufficientMemory - status: "False" - type: MemoryPressure - - lastHeartbeatTime: "2023-05-31T04:40:17Z" - lastTransitionTime: "2023-05-31T04:39:57Z" - message: kubelet has no disk pressure - reason: KubeletHasNoDiskPressure - status: "False" - type: DiskPressure - - lastHeartbeatTime: "2023-05-31T04:40:17Z" - lastTransitionTime: "2023-05-31T04:39:57Z" - message: kubelet has sufficient PID available - reason: KubeletHasSufficientPID - status: "False" - type: PIDPressure - - lastHeartbeatTime: "2023-05-31T04:40:17Z" - lastTransitionTime: "2023-05-31T04:40:05Z" - message: kubelet is posting ready status - reason: KubeletReady - status: "True" - type: Ready - daemonEndpoints: - kubeletEndpoint: - Port: 10250 - images: - - names: - - registry.k8s.io/etcd:3.5.6-0 - sizeBytes: 102542580 - - names: - - docker.io/library/import-2023-03-30@sha256:ba097b515c8c40689733c0f19de377e9bf8995964b7d7150c2045f3dfd166657 - - registry.k8s.io/kube-apiserver:v1.26.3 - sizeBytes: 80392681 - - names: - - docker.io/library/import-2023-03-30@sha256:8dbb345de79d1c44f59a7895da702a5f71997ae72aea056609445c397b0c10dc - - registry.k8s.io/kube-controller-manager:v1.26.3 - sizeBytes: 68538487 - - names: - - docker.io/library/import-2023-03-30@sha256:44db4d50a5f9c8efbac0d37ea974d1c0419a5928f90748d3d491a041a00c20b5 - - registry.k8s.io/kube-proxy:v1.26.3 - sizeBytes: 67217404 - - names: - - docker.io/library/import-2023-03-30@sha256:3dd2337f70af979c7362b5e52bbdfcb3a5fd39c78d94d02145150cd2db86ba39 - - registry.k8s.io/kube-scheduler:v1.26.3 - sizeBytes: 57761399 - - names: - - docker.io/kindest/kindnetd:v20230330-48f316cd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af - - docker.io/kindest/kindnetd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af - sizeBytes: 27726335 - - names: - - docker.io/kindest/local-path-provisioner:v0.0.23-kind.0@sha256:f2d0a02831ff3a03cf51343226670d5060623b43a4cfc4808bd0875b2c4b9501 - sizeBytes: 18664669 - - names: - - registry.k8s.io/coredns/coredns:v1.9.3 - sizeBytes: 14837849 - - names: - - docker.io/kindest/local-path-helper:v20230330-48f316cd@sha256:135203f2441f916fb13dad1561d27f60a6f11f50ec288b01a7d2ee9947c36270 - sizeBytes: 3052037 - - names: - - registry.k8s.io/pause:3.7 - sizeBytes: 311278 - nodeInfo: - architecture: amd64 - bootID: 2d71b318-5d07-4de2-9e61-2da28cf5bbf0 - containerRuntimeVersion: containerd://1.6.19-46-g941215f49 - kernelVersion: 5.15.0-72-generic - kubeProxyVersion: v1.26.3 - kubeletVersion: v1.26.3 - machineID: a98a13ff474d476294935341f1ba9816 - operatingSystem: linux - osImage: Ubuntu 22.04.2 LTS - systemUUID: 5f3c1af8-a385-4776-85e4-73d7f4252b44 + kind: Node + metadata: + annotations: + kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2023-05-31T04:39:16Z" + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + kubernetes.io/arch: amd64 + kubernetes.io/hostname: kind-control-plane + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + name: kind-control-plane + resourceVersion: "506" + uid: 86716ec7-3071-4091-b055-77b4361d1dca + spec: + podCIDR: 10.244.0.0/24 + podCIDRs: + - 10.244.0.0/24 + providerID: kind://docker/kind/kind-control-plane + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + status: + addresses: + - address: 172.18.0.2 + type: InternalIP + - address: kind-control-plane + type: Hostname + allocatable: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + capacity: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2023-05-31T04:39:58Z" + lastTransitionTime: "2023-05-31T04:39:13Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2023-05-31T04:39:58Z" + lastTransitionTime: "2023-05-31T04:39:13Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2023-05-31T04:39:58Z" + lastTransitionTime: "2023-05-31T04:39:13Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2023-05-31T04:39:58Z" + lastTransitionTime: "2023-05-31T04:39:46Z" + message: kubelet is posting ready status + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - registry.k8s.io/etcd:3.5.6-0 + sizeBytes: 102542580 + - names: + - docker.io/library/import-2023-03-30@sha256:ba097b515c8c40689733c0f19de377e9bf8995964b7d7150c2045f3dfd166657 + - registry.k8s.io/kube-apiserver:v1.26.3 + sizeBytes: 80392681 + - names: + - docker.io/library/import-2023-03-30@sha256:8dbb345de79d1c44f59a7895da702a5f71997ae72aea056609445c397b0c10dc + - registry.k8s.io/kube-controller-manager:v1.26.3 + sizeBytes: 68538487 + - names: + - docker.io/library/import-2023-03-30@sha256:44db4d50a5f9c8efbac0d37ea974d1c0419a5928f90748d3d491a041a00c20b5 + - registry.k8s.io/kube-proxy:v1.26.3 + sizeBytes: 67217404 + - names: + - docker.io/library/import-2023-03-30@sha256:3dd2337f70af979c7362b5e52bbdfcb3a5fd39c78d94d02145150cd2db86ba39 + - registry.k8s.io/kube-scheduler:v1.26.3 + sizeBytes: 57761399 + - names: + - docker.io/kindest/kindnetd:v20230330-48f316cd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + - docker.io/kindest/kindnetd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + sizeBytes: 27726335 + - names: + - docker.io/kindest/local-path-provisioner:v0.0.23-kind.0@sha256:f2d0a02831ff3a03cf51343226670d5060623b43a4cfc4808bd0875b2c4b9501 + sizeBytes: 18664669 + - names: + - registry.k8s.io/coredns/coredns:v1.9.3 + sizeBytes: 14837849 + - names: + - docker.io/kindest/local-path-helper:v20230330-48f316cd@sha256:135203f2441f916fb13dad1561d27f60a6f11f50ec288b01a7d2ee9947c36270 + sizeBytes: 3052037 + - names: + - registry.k8s.io/pause:3.7 + sizeBytes: 311278 + nodeInfo: + architecture: amd64 + bootID: 2d71b318-5d07-4de2-9e61-2da28cf5bbf0 + containerRuntimeVersion: containerd://1.6.19-46-g941215f49 + kernelVersion: 5.15.0-72-generic + kubeProxyVersion: v1.26.3 + kubeletVersion: v1.26.3 + machineID: 96f8c8b8c8ae4600a3654341f207586e + operatingSystem: linux + osImage: Ubuntu + systemUUID: 111aa932-7f99-4bef-aaf7-36aa7fb9b012 +- apiVersion: v1 + kind: Node + metadata: + annotations: + kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2023-05-31T04:39:57Z" + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + kubernetes.io/arch: amd64 + kubernetes.io/hostname: kind-worker + kubernetes.io/os: linux + name: kind-worker + resourceVersion: "577" + uid: 2ac0eb71-e5cf-4708-bbbf-476e8f19842b + spec: + podCIDR: 10.244.2.0/24 + podCIDRs: + - 10.244.2.0/24 + providerID: kind://docker/kind/kind-worker + status: + addresses: + - address: 172.18.0.3 + type: InternalIP + - address: kind-worker + type: Hostname + allocatable: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + capacity: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:40:05Z" + message: kubelet is posting ready status + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - registry.k8s.io/etcd:3.5.6-0 + sizeBytes: 102542580 + - names: + - docker.io/library/import-2023-03-30@sha256:ba097b515c8c40689733c0f19de377e9bf8995964b7d7150c2045f3dfd166657 + - registry.k8s.io/kube-apiserver:v1.26.3 + sizeBytes: 80392681 + - names: + - docker.io/library/import-2023-03-30@sha256:8dbb345de79d1c44f59a7895da702a5f71997ae72aea056609445c397b0c10dc + - registry.k8s.io/kube-controller-manager:v1.26.3 + sizeBytes: 68538487 + - names: + - docker.io/library/import-2023-03-30@sha256:44db4d50a5f9c8efbac0d37ea974d1c0419a5928f90748d3d491a041a00c20b5 + - registry.k8s.io/kube-proxy:v1.26.3 + sizeBytes: 67217404 + - names: + - docker.io/library/import-2023-03-30@sha256:3dd2337f70af979c7362b5e52bbdfcb3a5fd39c78d94d02145150cd2db86ba39 + - registry.k8s.io/kube-scheduler:v1.26.3 + sizeBytes: 57761399 + - names: + - docker.io/kindest/kindnetd:v20230330-48f316cd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + - docker.io/kindest/kindnetd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + sizeBytes: 27726335 + - names: + - docker.io/kindest/local-path-provisioner:v0.0.23-kind.0@sha256:f2d0a02831ff3a03cf51343226670d5060623b43a4cfc4808bd0875b2c4b9501 + sizeBytes: 18664669 + - names: + - registry.k8s.io/coredns/coredns:v1.9.3 + sizeBytes: 14837849 + - names: + - docker.io/kindest/local-path-helper:v20230330-48f316cd@sha256:135203f2441f916fb13dad1561d27f60a6f11f50ec288b01a7d2ee9947c36270 + sizeBytes: 3052037 + - names: + - registry.k8s.io/pause:3.7 + sizeBytes: 311278 + nodeInfo: + architecture: amd64 + bootID: 2d71b318-5d07-4de2-9e61-2da28cf5bbf0 + containerRuntimeVersion: containerd://1.6.19-46-g941215f49 + kernelVersion: 5.15.0-72-generic + kubeProxyVersion: v1.26.3 + kubeletVersion: v1.26.3 + machineID: a98a13ff474d476294935341f1ba9816 + operatingSystem: linux + osImage: Ubuntu + systemUUID: 5f3c1af8-a385-4776-85e4-73d7f4252b44 +kind: List +metadata: + resourceVersion: "" +` + +const wrongIndentation = ` +apiVersion: v1 + items: + - apiVersion: v1 +# everything below should be in-line with apiVersion above + kind: Node +metadata: + annotations: + kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2023-05-31T04:39:57Z" + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + kubernetes.io/arch: amd64 + kubernetes.io/hostname: kind-worker + kubernetes.io/os: linux + name: kind-worker + resourceVersion: "577" + uid: 2ac0eb71-e5cf-4708-bbbf-476e8f19842b +spec: + podCIDR: 10.244.2.0/24 + podCIDRs: + - 10.244.2.0/24 + providerID: kind://docker/kind/kind-worker +status: + addresses: + - address: 172.18.0.3 + type: InternalIP + - address: kind-worker + type: Hostname + allocatable: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + capacity: + cpu: "12" + ephemeral-storage: 959786032Ki + hugepages-1Gi: "0" + hugepages-2Mi: "0" + memory: 32781516Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:39:57Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2023-05-31T04:40:17Z" + lastTransitionTime: "2023-05-31T04:40:05Z" + message: kubelet is posting ready status + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - registry.k8s.io/etcd:3.5.6-0 + sizeBytes: 102542580 + - names: + - docker.io/library/import-2023-03-30@sha256:ba097b515c8c40689733c0f19de377e9bf8995964b7d7150c2045f3dfd166657 + - registry.k8s.io/kube-apiserver:v1.26.3 + sizeBytes: 80392681 + - names: + - docker.io/library/import-2023-03-30@sha256:8dbb345de79d1c44f59a7895da702a5f71997ae72aea056609445c397b0c10dc + - registry.k8s.io/kube-controller-manager:v1.26.3 + sizeBytes: 68538487 + - names: + - docker.io/library/import-2023-03-30@sha256:44db4d50a5f9c8efbac0d37ea974d1c0419a5928f90748d3d491a041a00c20b5 + - registry.k8s.io/kube-proxy:v1.26.3 + sizeBytes: 67217404 + - names: + - docker.io/library/import-2023-03-30@sha256:3dd2337f70af979c7362b5e52bbdfcb3a5fd39c78d94d02145150cd2db86ba39 + - registry.k8s.io/kube-scheduler:v1.26.3 + sizeBytes: 57761399 + - names: + - docker.io/kindest/kindnetd:v20230330-48f316cd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + - docker.io/kindest/kindnetd@sha256:c19d6362a6a928139820761475a38c24c0cf84d507b9ddf414a078cf627497af + sizeBytes: 27726335 + - names: + - docker.io/kindest/local-path-provisioner:v0.0.23-kind.0@sha256:f2d0a02831ff3a03cf51343226670d5060623b43a4cfc4808bd0875b2c4b9501 + sizeBytes: 18664669 + - names: + - registry.k8s.io/coredns/coredns:v1.9.3 + sizeBytes: 14837849 + - names: + - docker.io/kindest/local-path-helper:v20230330-48f316cd@sha256:135203f2441f916fb13dad1561d27f60a6f11f50ec288b01a7d2ee9947c36270 + sizeBytes: 3052037 + - names: + - registry.k8s.io/pause:3.7 + sizeBytes: 311278 + nodeInfo: + architecture: amd64 + bootID: 2d71b318-5d07-4de2-9e61-2da28cf5bbf0 + containerRuntimeVersion: containerd://1.6.19-46-g941215f49 + kernelVersion: 5.15.0-72-generic + kubeProxyVersion: v1.26.3 + kubeletVersion: v1.26.3 + machineID: a98a13ff474d476294935341f1ba9816 + operatingSystem: linux + osImage: Ubuntu 22.04.2 LTS + systemUUID: 5f3c1af8-a385-4776-85e4-73d7f4252b44 kind: List metadata: resourceVersion: "" @@ -281,11 +774,14 @@ metadata: func TestLoadNodeTemplatesFromConfigMap(t *testing.T) { var testTemplatesMap = map[string]string{ - "noGPULabel": noGPULabel, "wrongIndentation": wrongIndentation, defaultTemplatesConfigName: testTemplates, + "multipleNodes": multipleNodes, + "nodeList": nodeList, } + testTemplateName := defaultTemplatesConfigName + fakeClient := &fake.Clientset{} fakeClient.Fake.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) { getAction := action.(core.GetAction) @@ -295,22 +791,22 @@ func TestLoadNodeTemplatesFromConfigMap(t *testing.T) { } if getAction.GetName() == defaultConfigName { - return true, &v1.ConfigMap{ + return true, &apiv1.ConfigMap{ Data: map[string]string{ configKey: testConfig, }, }, nil } - if testTemplatesMap[getAction.GetName()] != "" { - return true, &v1.ConfigMap{ + if testTemplatesMap[testTemplateName] != "" { + return true, &apiv1.ConfigMap{ Data: map[string]string{ - templatesKey: testTemplatesMap[getAction.GetName()], + templatesKey: testTemplatesMap[testTemplateName], }, }, nil } - return true, nil, errors.NewNotFound(v1.Resource("configmaps"), "whatever") + return true, nil, errors.NewNotFound(apiv1.Resource("configmaps"), "whatever") }) fakeClient.Fake.AddReactor("list", "nodes", func(action core.Action) (bool, runtime.Object, error) { @@ -320,26 +816,70 @@ func TestLoadNodeTemplatesFromConfigMap(t *testing.T) { return false, nil, nil } - if getAction.GetName() == defaultConfigName { - return true, &v1.ConfigMap{ - Data: map[string]string{ - configKey: testConfig, - }, - }, nil - } + return true, &apiv1.NodeList{Items: []apiv1.Node{}}, errors.NewNotFound(apiv1.Resource("nodes"), "whatever") + }) - if testTemplatesMap[getAction.GetName()] != "" { - return true, &v1.ConfigMap{ - Data: map[string]string{ - templatesKey: testTemplatesMap[getAction.GetName()], - }, - }, nil + os.Setenv("POD_NAMESPACE", "kube-system") + + kwokConfig, err := LoadConfigFile(fakeClient) + assert.Nil(t, err) + + // happy path + testTemplateName = defaultTemplatesConfigName + nos, err := LoadNodeTemplatesFromConfigMap(kwokConfig, fakeClient, kube_util.NewTestNodeLister([]*apiv1.Node{})) + assert.Nil(t, err) + assert.NotEmpty(t, nos) + assert.Greater(t, len(nos), 0) + + testTemplateName = "wrongIndentation" + nos, err = LoadNodeTemplatesFromConfigMap(kwokConfig, fakeClient, kube_util.NewTestNodeLister([]*apiv1.Node{})) + assert.Error(t, err) + assert.Empty(t, nos) + assert.Equal(t, len(nos), 0) + + // multiple nodes is something like []*Node{node1, node2, node3, ...} + testTemplateName = "multipleNodes" + nos, err = LoadNodeTemplatesFromConfigMap(kwokConfig, fakeClient, kube_util.NewTestNodeLister([]*apiv1.Node{})) + assert.Nil(t, err) + assert.NotEmpty(t, nos) + assert.Greater(t, len(nos), 0) + + // node list is something like []*List{Items:[]*Node{node1, node2, node3, ...}} + testTemplateName = "nodeList" + nos, err = LoadNodeTemplatesFromConfigMap(kwokConfig, fakeClient, kube_util.NewTestNodeLister([]*apiv1.Node{})) + assert.Nil(t, err) + assert.NotEmpty(t, nos) + assert.Greater(t, len(nos), 0) + + // fake client which returns configmap with wrong key + fakeClient = &fake.Clientset{} + fakeClient.Fake.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) { + getAction := action.(core.GetAction) + + if getAction == nil { + return false, nil, nil } - return true, &apiv1.NodeList{Items: []apiv1.Node{}}, errors.NewNotFound(v1.Resource("nodes"), "whatever") + return true, &apiv1.ConfigMap{ + Data: map[string]string{ + "foo": testTemplatesMap[testTemplateName], + }, + }, nil }) - os.Setenv("POD_NAMESPACE", "kube-system") + fakeClient.Fake.AddReactor("list", "nodes", func(action core.Action) (bool, runtime.Object, error) { + getAction := action.(core.GetAction) + + if getAction == nil { + return false, nil, nil + } + + return true, &apiv1.NodeList{Items: []apiv1.Node{}}, errors.NewNotFound(apiv1.Resource("nodes"), "whatever") + }) - // loadNodeTemplatesFromConfigMap + // throw error if configmap data key is not `templates` + nos, err = LoadNodeTemplatesFromConfigMap(kwokConfig, fakeClient, kube_util.NewTestNodeLister([]*apiv1.Node{})) + assert.Error(t, err) + assert.Empty(t, nos) + assert.Equal(t, len(nos), 0) } diff --git a/cluster-autoscaler/cloudprovider/kwok/kwok_provider.go b/cluster-autoscaler/cloudprovider/kwok/kwok_provider.go index deb50ae75cba..11f05d4c3e78 100644 --- a/cluster-autoscaler/cloudprovider/kwok/kwok_provider.go +++ b/cluster-autoscaler/cloudprovider/kwok/kwok_provider.go @@ -191,7 +191,7 @@ func BuildKwok(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscove func buildKwokProvider(ko *kwokOptions) (*KwokCloudProvider, error) { - kwokConfig, err := loadConfigFile(ko.kubeClient) + kwokConfig, err := LoadConfigFile(ko.kubeClient) if err != nil { return nil, fmt.Errorf("failed to load kwok provider config: %v", err) } @@ -200,7 +200,7 @@ func buildKwokProvider(ko *kwokOptions) (*KwokCloudProvider, error) { var nodeTemplates []*apiv1.Node switch kwokConfig.ReadNodesFrom { case nodeTemplatesFromConfigMap: - if nodeTemplates, err = loadNodeTemplatesFromConfigMap(kwokConfig, ko.kubeClient, ko.nodeLister); err != nil { + if nodeTemplates, err = LoadNodeTemplatesFromConfigMap(kwokConfig, ko.kubeClient, ko.nodeLister); err != nil { return nil, err } case nodeTemplatesFromCluster: @@ -226,5 +226,6 @@ func buildKwokProvider(ko *kwokOptions) (*KwokCloudProvider, error) { lister: ko.nodeLister, kubeClient: ko.kubeClient, resourceLimiter: ko.resourceLimiter, + config: kwokConfig, }, nil } diff --git a/cluster-autoscaler/cloudprovider/kwok/kwok_provider_test.go b/cluster-autoscaler/cloudprovider/kwok/kwok_provider_test.go index c67bcba70b7b..765793aaf514 100644 --- a/cluster-autoscaler/cloudprovider/kwok/kwok_provider_test.go +++ b/cluster-autoscaler/cloudprovider/kwok/kwok_provider_test.go @@ -17,19 +17,18 @@ limitations under the License. package kwok import ( - "fmt" "os" "testing" - "time" "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config" - "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" + kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" ) @@ -43,10 +42,8 @@ func TestGPULabel(t *testing.T) { return false, nil, nil } - fmt.Println("getAction", getAction.GetName()) - if getAction.GetName() == defaultConfigName { - return true, &v1.ConfigMap{ + return true, &apiv1.ConfigMap{ Data: map[string]string{ configKey: testConfig, }, @@ -54,14 +51,14 @@ func TestGPULabel(t *testing.T) { } if getAction.GetName() == defaultTemplatesConfigName { - return true, &v1.ConfigMap{ + return true, &apiv1.ConfigMap{ Data: map[string]string{ templatesKey: testTemplates, }, }, nil } - return true, nil, errors.NewNotFound(v1.Resource("configmaps"), "whatever") + return true, nil, errors.NewNotFound(apiv1.Resource("configmaps"), "whatever") }) fakeClient.Fake.AddReactor("list", "nodes", func(action core.Action) (bool, runtime.Object, error) { @@ -71,7 +68,7 @@ func TestGPULabel(t *testing.T) { return false, nil, nil } - return true, &apiv1.NodeList{Items: []apiv1.Node{}}, errors.NewNotFound(v1.Resource("nodes"), "whatever") + return true, &apiv1.NodeList{Items: []apiv1.Node{}}, errors.NewNotFound(apiv1.Resource("nodes"), "whatever") }) os.Setenv("POD_NAMESPACE", "kube-system") @@ -83,21 +80,94 @@ func TestGPULabel(t *testing.T) { resourceLimiter: cloudprovider.NewResourceLimiter( map[string]int64{cloudprovider.ResourceNameCores: 1, cloudprovider.ResourceNameMemory: 10000000}, map[string]int64{cloudprovider.ResourceNameCores: 10, cloudprovider.ResourceNameMemory: 100000000}), - nodeLister: kubernetes.NewTestNodeLister([]*apiv1.Node{}), - // ngNodeListerFn: , TODO: check this + nodeLister: kube_util.NewTestNodeLister([]*apiv1.Node{}), + ngNodeListerFn: testNodeLister, } p, err := buildKwokProvider(ko) assert.NoError(t, err) assert.NotNil(t, p) - time.Sleep(time.Second * 10) - l := p.GPULabel() - assert.Equal(t, "nvidia-tesla-k80", l) + assert.Equal(t, "k8s.amazonaws.com/accelerator", l) } -func testNodeLister() { +func TestNodeGroupForNode(t *testing.T) { + fakeClient := &fake.Clientset{} + fakeClient.Fake.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) { + getAction := action.(core.GetAction) + + if getAction == nil { + return false, nil, nil + } + + if getAction.GetName() == defaultConfigName { + return true, &apiv1.ConfigMap{ + Data: map[string]string{ + configKey: testConfig, + }, + }, nil + } + + if getAction.GetName() == defaultTemplatesConfigName { + return true, &apiv1.ConfigMap{ + Data: map[string]string{ + templatesKey: testTemplates, + }, + }, nil + } + + return true, nil, errors.NewNotFound(apiv1.Resource("configmaps"), "whatever") + }) + + fakeClient.Fake.AddReactor("list", "nodes", func(action core.Action) (bool, runtime.Object, error) { + getAction := action.(core.GetAction) + + if getAction == nil { + return false, nil, nil + } + + return true, &apiv1.NodeList{Items: []apiv1.Node{}}, errors.NewNotFound(apiv1.Resource("nodes"), "whatever") + }) + + os.Setenv("POD_NAMESPACE", "kube-system") + + ko := &kwokOptions{ + kubeClient: fakeClient, + autoscalingOpts: &config.AutoscalingOptions{}, + discoveryOpts: &cloudprovider.NodeGroupDiscoveryOptions{}, + resourceLimiter: cloudprovider.NewResourceLimiter( + map[string]int64{cloudprovider.ResourceNameCores: 1, cloudprovider.ResourceNameMemory: 10000000}, + map[string]int64{cloudprovider.ResourceNameCores: 10, cloudprovider.ResourceNameMemory: 100000000}), + nodeLister: kube_util.NewTestNodeLister([]*apiv1.Node{}), + ngNodeListerFn: testNodeLister, + } + + p, err := buildKwokProvider(ko) + assert.NoError(t, err) + assert.NotNil(t, p) + + testNode := &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "kubernetes.io/hostname": "kind-worker", + "k8s.amazonaws.com/accelerator": "nvidia-tesla-k80", + }, + Name: "kind-worker", + }, + Spec: apiv1.NodeSpec{ + ProviderID: ProviderName, + }, + } + ng, err := p.NodeGroupForNode(testNode) + assert.NoError(t, err) + assert.NotNil(t, ng) + assert.Equal(t, "kind-worker", ng.Id()) + +} +func testNodeLister(kubeClient kubernetes.Interface, + filter func(*apiv1.Node) bool, stopChannel <-chan struct{}) kube_util.NodeLister { + return kube_util.NewTestNodeLister(nil) }