diff --git a/deployer/README.md b/deployer/README.md index c6f5135e32..49548e0cb2 100644 --- a/deployer/README.md +++ b/deployer/README.md @@ -88,7 +88,11 @@ The `deployer.py` file is the main file, that contains all of the commands regis │   │   | ├── decision.py │   │   | └── jobs.py | | └── resource_allocation +│   │   ├── daemonset_requests.py +│   │   ├── daemonset_requests.yaml │   │   ├── generate_choices.py +│   │   ├── instance_capacities.py +│   │   ├── instance_capacities.yaml │   │   ├── node-capacity-info.json │   │   ├── resource_allocation_app.py │   │   └── update_nodeinfo.py @@ -264,6 +268,22 @@ This sub-command can be used to generate the resource allocation choices for giv ##### `generate resource-allocation choices` This generates a custom number of resource allocation choices for a certain instance type, depending on a certain chosen strategy that can be used in the profile list of a hub. +##### `generate resource-allocation daemonset-requests` +Updates `daemonset_requests.yaml` with an individual cluster's DaemonSets' requests summarized. + +Only DaemonSet's with running pods are considered, and GPU related DaemonSets (with "nvidia" in the name) are also ignored. + +To run this command for all clusters, `xargs` can be used like this: + + ls config/clusters | xargs -I {} deployer generate resource-allocation daemonset-requests {} + +##### `generate resource-allocation instance-capacities` +Updates `instance_capacities.yaml` with an individual cluster's running instance types' total and allocatable capacity. + +To run this command for all clusters, `xargs` can be used like this: + + ls config/clusters | xargs -I {} deployer generate resource-allocation instance-capacities {} + ##### `generate resource-allocation node-info-update` This updates the json file `node-capacity-info.json` with info about the capacity of a node of a certain type. This file is then used for generating the resource choices. diff --git a/deployer/__main__.py b/deployer/__main__.py index 2b638f7111..782f047f34 100644 --- a/deployer/__main__.py +++ b/deployer/__main__.py @@ -9,7 +9,9 @@ import deployer.commands.generate.dedicated_cluster.aws # noqa: F401 import deployer.commands.generate.dedicated_cluster.gcp # noqa: F401 import deployer.commands.generate.helm_upgrade.jobs # noqa: F401 +import deployer.commands.generate.resource_allocation.daemonset_requests # noqa: F401 import deployer.commands.generate.resource_allocation.generate_choices # noqa: F401 +import deployer.commands.generate.resource_allocation.instance_capacities # noqa: F401 import deployer.commands.generate.resource_allocation.update_nodeinfo # noqa: F401 import deployer.commands.grafana.central_grafana # noqa: F401 import deployer.commands.grafana.deploy_dashboards # noqa: F401 diff --git a/deployer/commands/generate/resource_allocation/daemonset_requests.py b/deployer/commands/generate/resource_allocation/daemonset_requests.py new file mode 100644 index 0000000000..e47ac51ed6 --- /dev/null +++ b/deployer/commands/generate/resource_allocation/daemonset_requests.py @@ -0,0 +1,157 @@ +import json +import math +import subprocess +from pathlib import Path + +import typer +from kubernetes.utils.quantity import parse_quantity +from ruamel.yaml import YAML + +from deployer.infra_components.cluster import Cluster +from deployer.utils.file_acquisition import find_absolute_path_to_cluster_file + +from .resource_allocation_app import resource_allocation_app + +HERE = Path(__file__).parent +yaml = YAML() +yaml.preserve_quotes = True +yaml.indent(mapping=2, sequence=4, offset=2) + + +def get_k8s_distribution(): + """ + Returns a 2-tuple with the guessed the k8s distribution based on the k8s + api-server's reported version, either Google's GKE, Amazon's EKS, or Azure's + AKS, and the server's reported gitVersion. + """ + output = subprocess.check_output( + [ + "kubectl", + "version", + "--output=json", + ], + text=True, + ) + version_info = json.loads(output) + server_version_info = version_info["serverVersion"]["gitVersion"] + if "gke" in server_version_info: + return "gke", server_version_info + if "eks" in server_version_info: + return "eks", server_version_info + return "aks", server_version_info + + +def get_daemon_sets_requests(): + """ + Returns a list of dicts with info about DaemonSets with pods desired to be scheduled on + some nodes the k8s cluster. + """ + output = subprocess.check_output( + [ + "kubectl", + "get", + "ds", + "--all-namespaces", + "--output=jsonpath-as-json={.items[*]}", + ], + text=True, + ) + daemon_sets = json.loads(output) + + # filter out DaemonSets that aren't desired on any node + daemon_sets = [ds for ds in daemon_sets if ds["status"]["desiredNumberScheduled"]] + + info = [] + for ds in daemon_sets: + name = ds["metadata"]["name"] + req_mem = req_cpu = lim_mem = lim_cpu = 0 + for c in ds["spec"]["template"]["spec"]["containers"]: + resources = c.get("resources", {}) + requests = resources.get("requests", {}) + limits = resources.get("limits", {}) + req_mem += parse_quantity(requests.get("memory", 0)) + lim_mem += parse_quantity(limits.get("memory", 0)) + req_cpu += parse_quantity(requests.get("cpu", 0)) + lim_cpu += parse_quantity(limits.get("cpu", 0)) + + info.append( + { + "name": name, + "cpu_request": float(req_cpu), + "cpu_limit": float(lim_cpu), + "memory_request": int(req_mem), + "memory_limit": int(lim_mem), + } + ) + + return info + + +def get_daemon_sets_requests_summary(): + """ + Returns a summary of the requests from `get_daemon_sets_requests`. + """ + daemon_sets = get_daemon_sets_requests() + # filter out DaemonSets related to nvidia GPUs + daemon_sets = [ds for ds in daemon_sets if "nvidia" not in ds["name"]] + # separate DaemonSets without requests, as only requests are what impacts + # scheduling of pods and reduces a node's remaining allocatable resources + req_daemon_sets = [ + ds for ds in daemon_sets if ds["cpu_request"] or ds["memory_request"] + ] + other_daemon_sets = [ + ds for ds in daemon_sets if not ds["cpu_request"] and not ds["memory_request"] + ] + + cpu_requests = sum([ds["cpu_request"] for ds in req_daemon_sets]) + memory_requests = sum([ds["memory_request"] for ds in req_daemon_sets]) + info = { + "requesting_daemon_sets": ",".join( + sorted([ds["name"] for ds in req_daemon_sets]) + ), + "other_daemon_sets": ",".join(sorted([ds["name"] for ds in other_daemon_sets])), + "cpu_requests": str(math.ceil(cpu_requests * 1000)) + "m", + "memory_requests": str(math.ceil(memory_requests / 1024**2)) + "Mi", + } + return info + + +@resource_allocation_app.command() +def daemonset_requests( + cluster_name: str = typer.Argument(..., help="Name of cluster to operate on"), +): + """ + Updates `daemonset_requests.yaml` with an individual cluster's DaemonSets' + requests summarized. + + Only DaemonSet's with running pods are considered, and GPU related + DaemonSets (with "nvidia" in the name) are also ignored. + + To run this command for all clusters, `xargs` can be used like this: + + ls config/clusters | xargs -I {} deployer generate resource-allocation daemonset-requests {} + """ + file_path = HERE / "daemonset_requests.yaml" + file_path.touch(exist_ok=True) + + # acquire a Cluster object + config_file_path = find_absolute_path_to_cluster_file(cluster_name) + with open(config_file_path) as f: + cluster = Cluster(yaml.load(f), config_file_path.parent) + + # auth and inspect cluster + with cluster.auth(): + k8s_dist, k8s_version = get_k8s_distribution() + ds_requests = get_daemon_sets_requests_summary() + + # read + with open(file_path) as f: + info = yaml.load(f) or {} + + # update + ds_requests["k8s_version"] = k8s_version + info.setdefault(k8s_dist, {})[cluster_name] = ds_requests + + # write + with open(file_path, "w") as f: + yaml.dump(info, f) diff --git a/deployer/commands/generate/resource_allocation/daemonset_requests.yaml b/deployer/commands/generate/resource_allocation/daemonset_requests.yaml new file mode 100644 index 0000000000..fd45f511c7 --- /dev/null +++ b/deployer/commands/generate/resource_allocation/daemonset_requests.yaml @@ -0,0 +1,180 @@ +# This file contains generated information about cpu/memory requests made by +# DaemonSets with running pods in our clusters. This information is relevant +# when planning cpu/memory requests for other pods as the daemonsets requests +# reduces the available allocatable capacity. +# +# The requests vary between cloud providers, clusters, and k8s versions for +# reasons like: +# +# - Cloud providers' managed k8s provides different DaemonSets by default +# - DaemonSets may be coupled to managed k8s features (calico-node) +# - DaemonSets' requests may be coupled to managed k8s version (netd) +# - DaemonSets may have a vertical autoscaler changing requests dynamically over +# time if needed (calico-node-vertical-autoscaler) +# - We may deploy or change a DaemonSet's requests over time (support-cryptnono, +# support-prometheus-node-exporter) +# +# This file isn't updated by automation, but can easily be updated by manually +# running a command once for each cluster: +# +# ls config/clusters | xargs -I {} deployer generate resource-allocation daemonset-requests {} +# +gke: + 2i2c: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: binder-staging-dind,binder-staging-image-cleaner,continuous-image-puller,imagebuilding-demo-binderhub-service-docker-api,netd + cpu_requests: 342m + memory_requests: 566Mi + k8s_version: v1.26.5-gke.2100 + 2i2c-uk: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 344m + memory_requests: 596Mi + k8s_version: v1.27.4-gke.900 + awi-ciroh: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: netd + cpu_requests: 342m + memory_requests: 566Mi + k8s_version: v1.25.10-gke.2700 + callysto: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 344m + memory_requests: 596Mi + k8s_version: v1.27.4-gke.900 + catalystproject-latam: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 338m + memory_requests: 496Mi + k8s_version: v1.27.3-gke.100 + cloudbank: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: continuous-image-puller,continuous-image-puller,continuous-image-puller,netd + cpu_requests: 342m + memory_requests: 566Mi + k8s_version: v1.26.5-gke.2100 + hhmi: + requesting_daemon_sets: fluentbit-gke,gke-metadata-server,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 228m + memory_requests: 480Mi + k8s_version: v1.27.3-gke.100 + leap: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: netd + cpu_requests: 342m + memory_requests: 566Mi + k8s_version: v1.25.10-gke.2700 + linked-earth: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 344m + memory_requests: 596Mi + k8s_version: v1.27.4-gke.900 + m2lines: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 344m + memory_requests: 596Mi + k8s_version: v1.27.4-gke.900 + meom-ige: + requesting_daemon_sets: fluentbit-gke,gke-metadata-server,gke-metrics-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 234m + memory_requests: 580Mi + k8s_version: v1.27.4-gke.900 + pangeo-hubs: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: netd + cpu_requests: 342m + memory_requests: 566Mi + k8s_version: v1.26.5-gke.2100 + qcl: + requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,ip-masq-agent,pdcsi-node,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: continuous-image-puller,continuous-image-puller,netd + cpu_requests: 336m + memory_requests: 466Mi + k8s_version: v1.25.10-gke.2700 +eks: + 2i2c-aws-us: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.25.12-eks-2d98532 + carbonplan: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.24.16-eks-2d98532 + catalystproject-africa: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.27.4-eks-2d98532 + gridsst: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.25.12-eks-2d98532 + jupyter-meets-the-earth: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.25.12-eks-2d98532 + nasa-cryo: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.25.12-eks-2d98532 + nasa-ghg: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.27.4-eks-2d98532 + nasa-veda: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.25.12-eks-2d98532 + openscapes: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.24.16-eks-2d98532 + smithsonian: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.25.12-eks-2d98532 + ubc-eoas: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.24.17-eks-f8587cb + victor: + requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: "" + cpu_requests: 170m + memory_requests: 250Mi + k8s_version: v1.25.12-eks-2d98532 +aks: + utoronto: + requesting_daemon_sets: cloud-node-manager,csi-azuredisk-node,csi-azurefile-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter + other_daemon_sets: calico-node,continuous-image-puller,continuous-image-puller,continuous-image-puller,continuous-image-puller + cpu_requests: 226m + memory_requests: 300Mi + k8s_version: v1.26.3 diff --git a/deployer/commands/generate/resource_allocation/instance_capacities.py b/deployer/commands/generate/resource_allocation/instance_capacities.py new file mode 100644 index 0000000000..5dc0df9947 --- /dev/null +++ b/deployer/commands/generate/resource_allocation/instance_capacities.py @@ -0,0 +1,144 @@ +import json +import math +import subprocess +from pathlib import Path + +import typer +from kubernetes.utils.quantity import parse_quantity +from ruamel.yaml import YAML + +from deployer.infra_components.cluster import Cluster +from deployer.utils.file_acquisition import find_absolute_path_to_cluster_file + +from .resource_allocation_app import resource_allocation_app + +HERE = Path(__file__).parent +yaml = YAML() +yaml.preserve_quotes = True +yaml.indent(mapping=2, sequence=4, offset=2) + + +def get_running_instance_types(): + """ + Returns a unique list of the k8s cluster's running nodes' instance types. + """ + output = subprocess.check_output( + [ + "kubectl", + "get", + "node", + r"--output=jsonpath-as-json={.items[*].metadata.labels['node\.kubernetes\.io/instance-type']}", + ], + text=True, + ) + instance_types = list(set(json.loads(output))) + return instance_types + + +def get_instance_capacity(instance_type: str): + """ + Returns a dictionary summarizing total and allocatable capacity of + cpu/memory for an instance type by inspecting one in the k8s cluster. + """ + output = subprocess.check_output( + [ + "kubectl", + "get", + "node", + "--output=jsonpath-as-json={.items[*].status}", + f"--selector=node.kubernetes.io/instance-type={instance_type}", + ], + text=True, + ) + + # all nodes of a given instance type should report the same capacity and + # allocatable cpu/memory, we just pick one + status = json.loads(output)[0] + + cpu_capacity = float(parse_quantity(status["capacity"]["cpu"])) + cpu_allocatable = float(parse_quantity(status["allocatable"]["cpu"])) + mem_capacity = int(parse_quantity(status["capacity"]["memory"])) + mem_allocatable = int(parse_quantity(status["allocatable"]["memory"])) + + # format memory to use Gi with 3 decimal places + mem_capacity = str(math.floor(mem_capacity / 1024**3 * 1000) / 1000) + "Gi" + mem_allocatable = str(math.floor(mem_allocatable / 1024**3 * 1000) / 1000) + "Gi" + + info = { + "cpu_capacity_low": cpu_capacity, + "cpu_capacity_high": cpu_capacity, + "cpu_allocatable_low": cpu_allocatable, + "cpu_allocatable_high": cpu_allocatable, + "mem_capacity_low": mem_capacity, + "mem_capacity_high": mem_capacity, + "mem_allocatable_low": mem_allocatable, + "mem_allocatable_high": mem_allocatable, + } + return info + + +def get_instance_capacities(): + """ + Returns a dictionary with entries for each of the k8s cluster's running + instance types. + """ + instance_types = get_running_instance_types() + + info = {} + for it in instance_types: + info[it] = get_instance_capacity(it) + return info + + +@resource_allocation_app.command() +def instance_capacities( + cluster_name: str = typer.Argument(..., help="Name of cluster to operate on"), +): + """ + Updates `instance_capacities.yaml` with an individual cluster's running + instance types' total and allocatable capacity. + + To run this command for all clusters, `xargs` can be used like this: + + ls config/clusters | xargs -I {} deployer generate resource-allocation instance-capacities {} + """ + file_path = HERE / "instance_capacities.yaml" + file_path.touch(exist_ok=True) + + # acquire a Cluster object + config_file_path = find_absolute_path_to_cluster_file(cluster_name) + with open(config_file_path) as f: + cluster = Cluster(yaml.load(f), config_file_path.parent) + + # auth and inspect cluster + with cluster.auth(): + new_ics = get_instance_capacities() + + # read + with open(file_path) as f: + ics = yaml.load(f) or {} + + # update + for type, new_cap in new_ics.items(): + cap = ics.get(type) + + # add new entry + if not cap: + ics[type] = new_cap + continue + + # update existing entry, comparing and updating the lowest low and + # highest high for the kind of resources + props = ["cpu_capacity", "cpu_allocatable", "mem_capacity", "mem_allocatable"] + for p in props: + lp = f"{p}_low" + if new_cap[lp] < cap[lp]: + cap[lp] = new_cap[lp] + for p in props: + lp = f"{p}_high" + if new_cap[lp] > cap[lp]: + cap[lp] = new_cap[lp] + + # write + with open(file_path, "w") as f: + yaml.dump(ics, f) diff --git a/deployer/commands/generate/resource_allocation/instance_capacities.yaml b/deployer/commands/generate/resource_allocation/instance_capacities.yaml new file mode 100644 index 0000000000..ebe5eef58e --- /dev/null +++ b/deployer/commands/generate/resource_allocation/instance_capacities.yaml @@ -0,0 +1,184 @@ +# Contains instances' total and allocatable capacity reported from our k8s +# clusters via "kubectl get node". +# +# An instance type's reported total capacity and allocatable capacity has been +# found to vary slightly, perhaps due to changes in the node pool's k8s +# versions. To keep an eye on that situation, we keep track of the lowest low +# and highest high. +# +# Note that n2-highmem-*, r5.*large, and Standard_E*s_v5 the same kind of +# instances, all providing a ratio of 1:8 for CPU:memory and the 3:rd generation +# Intel Xeon (Icy Lake) CPU. +# +# This file isn't updated by automation, but can easily be updated by manually +# by running a command once for each cluster: +# +# ls config/clusters | xargs -I {} deployer generate resource-allocation instance-capacities {} +# +# GKE instance types +n2-highmem-2: + cpu_capacity_low: 2.0 + cpu_capacity_high: 2.0 + cpu_allocatable_low: 1.93 + cpu_allocatable_high: 1.93 + mem_capacity_low: 15.631Gi + mem_capacity_high: 15.634Gi + mem_allocatable_low: 12.972Gi + mem_allocatable_high: 12.975Gi +n2-highmem-4: + cpu_capacity_low: 4.0 + cpu_capacity_high: 4.0 + cpu_allocatable_low: 3.92 + cpu_allocatable_high: 3.92 + mem_capacity_low: 31.357Gi + mem_capacity_high: 31.357Gi + mem_allocatable_low: 27.738Gi + mem_allocatable_high: 27.738Gi +n2-highmem-8: + cpu_capacity_low: 8.0 + cpu_capacity_high: 8.0 + cpu_allocatable_low: 7.91 + cpu_allocatable_high: 7.91 + mem_capacity_low: 62.809Gi + mem_capacity_high: 62.809Gi + mem_allocatable_low: 57.27Gi + mem_allocatable_high: 57.27Gi +n2-highmem-16: + cpu_capacity_low: 16.0 + cpu_capacity_high: 16.0 + cpu_allocatable_low: 15.89 + cpu_allocatable_high: 15.89 + mem_capacity_low: 125.807Gi + mem_capacity_high: 125.81Gi + mem_allocatable_low: 116.549Gi + mem_allocatable_high: 116.551Gi +n2-highmem-32: + cpu_capacity_low: 32.0 + cpu_capacity_high: 32.0 + cpu_allocatable_low: 31.85 + cpu_allocatable_high: 31.85 + mem_capacity_low: 251.897Gi + mem_capacity_high: 251.897Gi + mem_allocatable_low: 240.079Gi + mem_allocatable_high: 240.079Gi +n1-highmem-4: + cpu_capacity_low: 4.0 + cpu_capacity_high: 4.0 + cpu_allocatable_low: 3.92 + cpu_allocatable_high: 3.92 + mem_capacity_low: 25.451Gi + mem_capacity_high: 25.451Gi + mem_allocatable_low: 22.192Gi + mem_allocatable_high: 22.192Gi +e2-highmem-16: + cpu_capacity_low: 16.0 + cpu_capacity_high: 16.0 + cpu_allocatable_low: 15.89 + cpu_allocatable_high: 15.89 + mem_capacity_low: 125.807Gi + mem_capacity_high: 125.807Gi + mem_allocatable_low: 116.549Gi + mem_allocatable_high: 116.549Gi +n1-standard-2: + cpu_capacity_low: 2.0 + cpu_capacity_high: 2.0 + cpu_allocatable_low: 1.93 + cpu_allocatable_high: 1.93 + mem_capacity_low: 7.276Gi + mem_capacity_high: 7.276Gi + mem_allocatable_low: 5.483Gi + mem_allocatable_high: 5.483Gi +n1-standard-8: + cpu_capacity_low: 8.0 + cpu_capacity_high: 8.0 + cpu_allocatable_low: 7.91 + cpu_allocatable_high: 7.91 + mem_capacity_low: 29.387Gi + mem_capacity_high: 29.387Gi + mem_allocatable_low: 25.888Gi + mem_allocatable_high: 25.888Gi +n1-standard-16: + cpu_capacity_low: 16.0 + cpu_capacity_high: 16.0 + cpu_allocatable_low: 15.89 + cpu_allocatable_high: 15.89 + mem_capacity_low: 58.87Gi + mem_capacity_high: 58.87Gi + mem_allocatable_low: 53.571Gi + mem_allocatable_high: 53.571Gi + +# EKS instance types +r5.xlarge: + cpu_capacity_low: 4.0 + cpu_capacity_high: 4.0 + cpu_allocatable_low: 3.92 + cpu_allocatable_high: 3.92 + mem_capacity_low: 30.887Gi + mem_capacity_high: 30.907Gi + mem_allocatable_low: 29.917Gi + mem_allocatable_high: 29.937Gi +r5.2xlarge: + cpu_capacity_low: 8.0 + cpu_capacity_high: 8.0 + cpu_allocatable_low: 7.91 + cpu_allocatable_high: 7.91 + mem_capacity_low: 62.011Gi + mem_capacity_high: 62.011Gi + mem_allocatable_low: 61.041Gi + mem_allocatable_high: 61.041Gi +r5.4xlarge: + cpu_capacity_low: 16.0 + cpu_capacity_high: 16.0 + cpu_allocatable_low: 15.89 + cpu_allocatable_high: 15.89 + mem_capacity_low: 124.364Gi + mem_capacity_high: 124.364Gi + mem_allocatable_low: 121.504Gi + mem_allocatable_high: 121.504Gi +m5.large: + cpu_capacity_low: 2.0 + cpu_capacity_high: 2.0 + cpu_allocatable_low: 1.93 + cpu_allocatable_high: 1.93 + mem_capacity_low: 7.473Gi + mem_capacity_high: 7.473Gi + mem_allocatable_low: 6.815Gi + mem_allocatable_high: 6.815Gi +m5.xlarge: + cpu_capacity_low: 4.0 + cpu_capacity_high: 4.0 + cpu_allocatable_low: 3.92 + cpu_allocatable_high: 3.92 + mem_capacity_low: 15.172Gi + mem_capacity_high: 15.344Gi + mem_allocatable_low: 14.202Gi + mem_allocatable_high: 14.375Gi +g4dn.xlarge: + cpu_capacity_low: 4.0 + cpu_capacity_high: 4.0 + cpu_allocatable_low: 3.92 + cpu_allocatable_high: 3.92 + mem_capacity_low: 15.333Gi + mem_capacity_high: 15.333Gi + mem_allocatable_low: 14.675Gi + mem_allocatable_high: 14.675Gi + +# AKS instance types +Standard_E4s_v3: + cpu_capacity_low: 4.0 + cpu_capacity_high: 4.0 + cpu_allocatable_low: 3.86 + cpu_allocatable_high: 3.86 + mem_capacity_low: 31.354Gi + mem_capacity_high: 31.354Gi + mem_allocatable_low: 27.062Gi + mem_allocatable_high: 27.062Gi +Standard_E8s_v3: + cpu_capacity_low: 8.0 + cpu_capacity_high: 8.0 + cpu_allocatable_low: 7.82 + cpu_allocatable_high: 7.82 + mem_capacity_low: 62.806Gi + mem_capacity_high: 62.806Gi + mem_allocatable_low: 56.594Gi + mem_allocatable_high: 56.594Gi