From afa68dfdbfa0caf88c3c58fd0a6b8376466972d4 Mon Sep 17 00:00:00 2001 From: Quentin Bisson Date: Mon, 23 Sep 2024 12:45:47 +0200 Subject: [PATCH] remove deorecated app labels for ksm metrics (#1373) --- CHANGELOG.md | 3 +- .../kubernetes-mixins.rules.yml | 30 +++++++++---------- .../kube-state-metrics.rules.yml | 14 ++------- .../alerting-rules/crossplane.rules.test.yml | 10 +++---- .../external-secrets.rules.test.yml | 15 ++++------ .../alerting-rules/kyverno.rules.test.yml | 10 +++---- 6 files changed, 34 insertions(+), 48 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 89a51db14..4a208b445 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Dashboard links in alertmanager and mimir rules -- Remove deprecated app labels for external-dns and ingress-nginx alerts. +- Remove deprecated app labels for `external-dns` and `ingress-nginx` alerts. +- Remove deprecated app labels for `kube-state-metrics` alerts. - Fix falco events alerts node label to hostname as node does not exist. ## [4.15.2] - 2024-09-17 diff --git a/helm/prometheus-rules/templates/kaas/turtles/recording-rules/kubernetes-mixins.rules.yml b/helm/prometheus-rules/templates/kaas/turtles/recording-rules/kubernetes-mixins.rules.yml index 30048002c..ad45600a5 100644 --- a/helm/prometheus-rules/templates/kaas/turtles/recording-rules/kubernetes-mixins.rules.yml +++ b/helm/prometheus-rules/templates/kaas/turtles/recording-rules/kubernetes-mixins.rules.yml @@ -482,7 +482,7 @@ spec: ) record: node_namespace_pod_container:container_memory_swap - expr: | - kube_pod_container_resource_requests{resource="memory",app="kube-state-metrics"} * on (namespace, pod, cluster_id, installation, pipeline, provider) + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster_id, installation, pipeline, provider) group_left() max by (namespace, pod, cluster_id, installation, pipeline, provider) ( (kube_pod_status_phase{phase=~"Pending|Running"} == 1) ) @@ -491,7 +491,7 @@ spec: sum by (namespace, cluster_id, installation, pipeline, provider) ( sum by (namespace, pod, cluster_id, installation, pipeline, provider) ( max by (namespace, pod, container, cluster_id, installation, pipeline, provider) ( - kube_pod_container_resource_requests{resource="memory",app="kube-state-metrics"} + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} ) * on(namespace, pod, cluster_id, installation, pipeline, provider) group_left() max by (namespace, pod, cluster_id, installation, pipeline, provider) ( kube_pod_status_phase{phase=~"Pending|Running"} == 1 ) @@ -499,7 +499,7 @@ spec: ) record: namespace_memory:kube_pod_container_resource_requests:sum - expr: | - kube_pod_container_resource_requests{resource="cpu",app="kube-state-metrics"} * on (namespace, pod, cluster_id, installation, pipeline, provider) + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster_id, installation, pipeline, provider) group_left() max by (namespace, pod, cluster_id, installation, pipeline, provider) ( (kube_pod_status_phase{phase=~"Pending|Running"} == 1) ) @@ -508,7 +508,7 @@ spec: sum by (namespace, cluster_id, installation, pipeline, provider) ( sum by (namespace, pod, cluster_id, installation, pipeline, provider) ( max by (namespace, pod, container, cluster_id, installation, pipeline, provider) ( - kube_pod_container_resource_requests{resource="cpu",app="kube-state-metrics"} + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} ) * on(namespace, pod, cluster_id, installation, pipeline, provider) group_left() max by (namespace, pod, cluster_id, installation, pipeline, provider) ( kube_pod_status_phase{phase=~"Pending|Running"} == 1 ) @@ -516,7 +516,7 @@ spec: ) record: namespace_cpu:kube_pod_container_resource_requests:sum - expr: | - kube_pod_container_resource_limits{resource="memory",app="kube-state-metrics"} * on (namespace, pod, cluster_id, installation, pipeline, provider) + kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster_id, installation, pipeline, provider) group_left() max by (namespace, pod, cluster_id, installation, pipeline, provider) ( (kube_pod_status_phase{phase=~"Pending|Running"} == 1) ) @@ -525,7 +525,7 @@ spec: sum by (namespace, cluster_id, installation, pipeline, provider) ( sum by (namespace, pod, cluster_id, installation, pipeline, provider) ( max by (namespace, pod, container, cluster_id, installation, pipeline, provider) ( - kube_pod_container_resource_limits{resource="memory",app="kube-state-metrics"} + kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} ) * on(namespace, pod, cluster_id, installation, pipeline, provider) group_left() max by (namespace, pod, cluster_id, installation, pipeline, provider) ( kube_pod_status_phase{phase=~"Pending|Running"} == 1 ) @@ -533,7 +533,7 @@ spec: ) record: namespace_memory:kube_pod_container_resource_limits:sum - expr: | - kube_pod_container_resource_limits{resource="cpu",app="kube-state-metrics"} * on (namespace, pod, cluster_id, installation, pipeline, provider) + kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster_id, installation, pipeline, provider) group_left() max by (namespace, pod, cluster_id, installation, pipeline, provider) ( (kube_pod_status_phase{phase=~"Pending|Running"} == 1) ) @@ -542,7 +542,7 @@ spec: sum by (namespace, cluster_id, installation, pipeline, provider) ( sum by (namespace, pod, cluster_id, installation, pipeline, provider) ( max by (namespace, pod, container, cluster_id, installation, pipeline, provider) ( - kube_pod_container_resource_limits{resource="cpu",app="kube-state-metrics"} + kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} ) * on(namespace, pod, cluster_id, installation, pipeline, provider) group_left() max by (namespace, pod, cluster_id, installation, pipeline, provider) ( kube_pod_status_phase{phase=~"Pending|Running"} == 1 ) @@ -553,11 +553,11 @@ spec: max by (cluster_id, installation, pipeline, provider, namespace, workload, pod) ( label_replace( label_replace( - kube_pod_owner{app="kube-state-metrics", owner_kind="ReplicaSet"}, + kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, "replicaset", "$1", "owner_name", "(.*)" ) * on(replicaset, namespace) group_left(owner_name) topk by(replicaset, namespace) ( 1, max by (replicaset, namespace, owner_name) ( - kube_replicaset_owner{app="kube-state-metrics"} + kube_replicaset_owner{job="kube-state-metrics"} ) ), "workload", "$1", "owner_name", "(.*)" @@ -569,7 +569,7 @@ spec: - expr: | max by (cluster_id, installation, pipeline, provider, namespace, workload, pod) ( label_replace( - kube_pod_owner{app="kube-state-metrics", owner_kind="DaemonSet"}, + kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, "workload", "$1", "owner_name", "(.*)" ) ) @@ -579,7 +579,7 @@ spec: - expr: | max by (cluster_id, installation, pipeline, provider, namespace, workload, pod) ( label_replace( - kube_pod_owner{app="kube-state-metrics", owner_kind="StatefulSet"}, + kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, "workload", "$1", "owner_name", "(.*)" ) ) @@ -589,7 +589,7 @@ spec: - expr: | max by (cluster_id, installation, pipeline, provider, namespace, workload, pod) ( label_replace( - kube_pod_owner{app="kube-state-metrics", owner_kind="Job"}, + kube_pod_owner{job="kube-state-metrics", owner_kind="Job"}, "workload", "$1", "owner_name", "(.*)" ) ) @@ -648,7 +648,7 @@ spec: - expr: | topk by(cluster_id, installation, pipeline, provider, namespace, pod) (1, max by (cluster_id, installation, pipeline, provider, node, namespace, pod) ( - label_replace(kube_pod_info{app="kube-state-metrics",node!=""}, "pod", "$1", "pod", "(.*)") + label_replace(kube_pod_info{job="kube-state-metrics",node!=""}, "pod", "$1", "pod", "(.*)") )) record: 'node_namespace_pod:kube_pod_info:' - expr: | @@ -697,4 +697,4 @@ spec: histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (cluster_id, installation, pipeline, provider, instance, le) * on(cluster_id, installation, pipeline, provider, instance) group_left(node) kubelet_node_name{app="kubelet"}) labels: quantile: "0.5" - record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile \ No newline at end of file + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile diff --git a/helm/prometheus-rules/templates/platform/atlas/alerting-rules/kube-state-metrics.rules.yml b/helm/prometheus-rules/templates/platform/atlas/alerting-rules/kube-state-metrics.rules.yml index 163b5fff5..90b0f0bdf 100644 --- a/helm/prometheus-rules/templates/platform/atlas/alerting-rules/kube-state-metrics.rules.yml +++ b/helm/prometheus-rules/templates/platform/atlas/alerting-rules/kube-state-metrics.rules.yml @@ -16,16 +16,7 @@ spec: opsrecipe: kube-state-metrics-down/ {{- if not .Values.mimir.enabled }} expr: |- - ( - # modern clusters - label_replace(up{job="kube-state-metrics",instance=~".*:8080"}, "ip", "$1.$2.$3.$4", "node", "ip-(\\d+)-(\\d+)-(\\d+)-(\\d+).*") == 0 or absent(up{job="kube-state-metrics",instance=~".*:8080"} == 1) - ) - and - ( - # vintage clusters without servicemonitor - # We need to keep the app label until all clusters are migrated to a release >= 18.2. TODO(@giantswarm/team-atlas): Remove when this is the case - label_replace(up{app="kube-state-metrics",container=""}, "ip", "$1.$2.$3.$4", "node", "ip-(\\d+)-(\\d+)-(\\d+)-(\\d+).*") == 0 or absent(up{app="kube-state-metrics",container=""} == 1) - ) + label_replace(up{job="kube-state-metrics",instance=~".*:8080"}, "ip", "$1.$2.$3.$4", "node", "ip-(\\d+)-(\\d+)-(\\d+)-(\\d+).*") == 0 or absent(up{job="kube-state-metrics",instance=~".*:8080"} == 1) {{- else }} expr: |- count by (cluster_id, installation, provider, pipeline) (label_replace(up{job="kube-state-metrics", instance=~".*:8080"}, "ip", "$1.$2.$3.$4", "node", "ip-(\\d+)-(\\d+)-(\\d+)-(\\d+).*")) == 0 @@ -79,8 +70,7 @@ spec: opsrecipe: kube-state-metrics-down/ expr: |- # When it looks up but we don't have metrics - # We need to keep the app label until all clusters are migrated to a release >= 18.2. TODO(@giantswarm/team-atlas): Remove when this is the case - count({job="kube-state-metrics", __name__=~"kube_.+"} or {app="kube-state-metrics", __name__=~"kube_.+"}) by (cluster_id, installation, provider, pipeline) <= 100 + count({job="kube-state-metrics", __name__=~"kube_.+"}) by (cluster_id, installation, provider, pipeline) <= 100 for: 20m labels: area: platform diff --git a/test/tests/providers/global/platform/honeybadger/alerting-rules/crossplane.rules.test.yml b/test/tests/providers/global/platform/honeybadger/alerting-rules/crossplane.rules.test.yml index 178ad9217..b053d54d3 100644 --- a/test/tests/providers/global/platform/honeybadger/alerting-rules/crossplane.rules.test.yml +++ b/test/tests/providers/global/platform/honeybadger/alerting-rules/crossplane.rules.test.yml @@ -5,7 +5,7 @@ rule_files: tests: - interval: 1m input_series: - - series: 'kube_deployment_status_replicas_unavailable{app="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="crossplane", installation="gauss", instance="100.64.5.122:8080", job="gauss-prometheus/workload-gauss/0", namespace="crossplane", node="ip-10-0-5-119.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-95bbb4bd7-v6hvh", provider="aws", service_priority="highest"}' + - series: 'kube_deployment_status_replicas_unavailable{job="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="crossplane", installation="gauss", instance="100.64.5.122:8080", namespace="crossplane", node="ip-10-0-5-119.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-95bbb4bd7-v6hvh", provider="aws", service_priority="highest"}' values: "0+0x20 1+0x100" alert_rule_test: - alertname: CrossplaneDeploymentNotSatisfied @@ -13,7 +13,7 @@ tests: exp_alerts: - exp_labels: alertname: CrossplaneDeploymentNotSatisfied - app: kube-state-metrics + job: kube-state-metrics area: platform cancel_if_cluster_status_creating: "true" cancel_if_cluster_status_deleting: "true" @@ -26,7 +26,6 @@ tests: deployment: crossplane installation: gauss instance: 100.64.5.122:8080 - job: gauss-prometheus/workload-gauss/0 namespace: crossplane node: ip-10-0-5-119.eu-west-1.compute.internal organization: giantswarm @@ -41,7 +40,7 @@ tests: opsrecipe: "deployment-not-satisfied/" - interval: 1m input_series: - - series: 'kube_deployment_status_replicas_unavailable{app="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="caicloud-event-exporter", installation="gauss", instance="100.64.5.122:8080", job="gauss-prometheus/workload-gauss/0", namespace="crossplane", node="ip-10-0-5-119.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-95bbb4bd7-v6hvh", provider="aws", service_priority="highest"}' + - series: 'kube_deployment_status_replicas_unavailable{job="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="caicloud-event-exporter", installation="gauss", instance="100.64.5.122:8080", namespace="crossplane", node="ip-10-0-5-119.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-95bbb4bd7-v6hvh", provider="aws", service_priority="highest"}' values: "0+0x20 1+0x100" alert_rule_test: - alertname: CrossplaneDeploymentNotSatisfied @@ -49,7 +48,7 @@ tests: exp_alerts: - exp_labels: alertname: CrossplaneDeploymentNotSatisfied - app: kube-state-metrics + job: kube-state-metrics area: platform cancel_if_cluster_status_creating: "true" cancel_if_cluster_status_deleting: "true" @@ -62,7 +61,6 @@ tests: deployment: caicloud-event-exporter installation: gauss instance: 100.64.5.122:8080 - job: gauss-prometheus/workload-gauss/0 namespace: crossplane node: ip-10-0-5-119.eu-west-1.compute.internal organization: giantswarm diff --git a/test/tests/providers/global/platform/honeybadger/alerting-rules/external-secrets.rules.test.yml b/test/tests/providers/global/platform/honeybadger/alerting-rules/external-secrets.rules.test.yml index d534d649b..a13af4099 100644 --- a/test/tests/providers/global/platform/honeybadger/alerting-rules/external-secrets.rules.test.yml +++ b/test/tests/providers/global/platform/honeybadger/alerting-rules/external-secrets.rules.test.yml @@ -5,7 +5,7 @@ rule_files: tests: - interval: 1m input_series: - - series: 'kube_deployment_status_replicas_unavailable{app="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="external-secrets", installation="gauss", instance="100.64.6.226:8080", job="gauss-prometheus/workload-gauss/0", namespace="external-secrets", node="ip-10-0-5-161.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-fd99568b6-fnhdv", provider="aws", service_priority="highest"}' + - series: 'kube_deployment_status_replicas_unavailable{job="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="external-secrets", installation="gauss", instance="100.64.6.226:8080", namespace="external-secrets", node="ip-10-0-5-161.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-fd99568b6-fnhdv", provider="aws", service_priority="highest"}' values: "0+0x20 1+0x100" alert_rule_test: - alertname: ExternalSecretsDeploymentNotSatisfied @@ -13,7 +13,7 @@ tests: exp_alerts: - exp_labels: alertname: ExternalSecretsDeploymentNotSatisfied - app: kube-state-metrics + job: kube-state-metrics area: platform cancel_if_cluster_status_creating: "true" cancel_if_cluster_status_deleting: "true" @@ -26,7 +26,6 @@ tests: deployment: external-secrets installation: gauss instance: 100.64.6.226:8080 - job: gauss-prometheus/workload-gauss/0 namespace: external-secrets node: ip-10-0-5-161.eu-west-1.compute.internal organization: giantswarm @@ -41,7 +40,7 @@ tests: opsrecipe: "deployment-not-satisfied/" - interval: 1m input_series: - - series: 'kube_deployment_status_replicas_unavailable{app="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="external-secrets-cert-controller", installation="gauss", instance="100.64.6.226:8080", job="gauss-prometheus/workload-gauss/0", namespace="external-secrets", node="ip-10-0-5-161.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-fd99568b6-fnhdv", provider="aws", service_priority="highest"}' + - series: 'kube_deployment_status_replicas_unavailable{job="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="external-secrets-cert-controller", installation="gauss", instance="100.64.6.226:8080", namespace="external-secrets", node="ip-10-0-5-161.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-fd99568b6-fnhdv", provider="aws", service_priority="highest"}' values: "0+0x20 1+0x100" alert_rule_test: - alertname: ExternalSecretsDeploymentNotSatisfied @@ -49,7 +48,7 @@ tests: exp_alerts: - exp_labels: alertname: ExternalSecretsDeploymentNotSatisfied - app: kube-state-metrics + job: kube-state-metrics area: platform cancel_if_cluster_status_creating: "true" cancel_if_cluster_status_deleting: "true" @@ -62,7 +61,6 @@ tests: deployment: external-secrets-cert-controller installation: gauss instance: 100.64.6.226:8080 - job: gauss-prometheus/workload-gauss/0 namespace: external-secrets node: ip-10-0-5-161.eu-west-1.compute.internal organization: giantswarm @@ -77,7 +75,7 @@ tests: opsrecipe: "deployment-not-satisfied/" - interval: 1m input_series: - - series: 'kube_deployment_status_replicas_unavailable{app="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="external-secrets-webhook", installation="gauss", instance="100.64.6.226:8080", job="gauss-prometheus/workload-gauss/0", namespace="external-secrets", node="ip-10-0-5-161.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-fd99568b6-fnhdv", provider="aws", service_priority="highest"}' + - series: 'kube_deployment_status_replicas_unavailable{job="kube-state-metrics", cluster_id="gauss", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="external-secrets-webhook", installation="gauss", instance="100.64.6.226:8080", namespace="external-secrets", node="ip-10-0-5-161.eu-west-1.compute.internal", organization="giantswarm", pod="kube-state-metrics-fd99568b6-fnhdv", provider="aws", service_priority="highest"}' values: "0+0x20 1+0x100" alert_rule_test: - alertname: ExternalSecretsDeploymentNotSatisfied @@ -85,7 +83,7 @@ tests: exp_alerts: - exp_labels: alertname: ExternalSecretsDeploymentNotSatisfied - app: kube-state-metrics + job: kube-state-metrics area: platform cancel_if_cluster_status_creating: "true" cancel_if_cluster_status_deleting: "true" @@ -98,7 +96,6 @@ tests: deployment: external-secrets-webhook installation: gauss instance: 100.64.6.226:8080 - job: gauss-prometheus/workload-gauss/0 namespace: external-secrets node: ip-10-0-5-161.eu-west-1.compute.internal organization: giantswarm diff --git a/test/tests/providers/global/platform/shield/alerting-rules/kyverno.rules.test.yml b/test/tests/providers/global/platform/shield/alerting-rules/kyverno.rules.test.yml index 46bf9b7c3..6738f6303 100644 --- a/test/tests/providers/global/platform/shield/alerting-rules/kyverno.rules.test.yml +++ b/test/tests/providers/global/platform/shield/alerting-rules/kyverno.rules.test.yml @@ -5,17 +5,17 @@ tests: - interval: 1m input_series: # Kyverno validating webhooks - - series: 'kube_validatingwebhookconfiguration_info{app="kube-state-metrics", cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest", validatingwebhookconfiguration="kyverno-exception-validating-webhook-cfg"}' + - series: 'kube_validatingwebhookconfiguration_info{cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest", validatingwebhookconfiguration="kyverno-exception-validating-webhook-cfg"}' values: "1+0x20" - - series: 'kube_validatingwebhookconfiguration_info{app="kube-state-metrics", cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest", validatingwebhookconfiguration="kyverno-policy-validating-webhook-cfg"}' + - series: 'kube_validatingwebhookconfiguration_info{cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest", validatingwebhookconfiguration="kyverno-policy-validating-webhook-cfg"}' values: "1+0x20" - - series: 'kube_validatingwebhookconfiguration_info{app="kube-state-metrics", cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest", validatingwebhookconfiguration="kyverno-resource-validating-webhook-cfg"}' + - series: 'kube_validatingwebhookconfiguration_info{cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest", validatingwebhookconfiguration="kyverno-resource-validating-webhook-cfg"}' values: "1+0x20" # Kyverno deployment status replicas - - series: 'kube_deployment_status_replicas_ready{app="kube-state-metrics", cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="kyverno", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", namespace="kyverno", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest"}' + - series: 'kube_deployment_status_replicas_ready{cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="kyverno", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", namespace="kyverno", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest"}' values: "0+0x20" # Kyverno deployment spec replicas - - series: 'kube_deployment_spec_replicas{app="kube-state-metrics", cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="kyverno", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", namespace="kyverno", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest"}' + - series: 'kube_deployment_spec_replicas{cluster_id="gremlin", cluster_type="management_cluster", container="kube-state-metrics", customer="giantswarm", deployment="kyverno", endpoint="http", installation="gremlin", instance="10.0.135.241:8080", job="kube-state-metrics", namespace="kyverno", node="master-00000y", organization="giantswarm", pipeline="testing", pod="prometheus-operator-app-kube-state-metrics-d7f4ff68d-qn6sb", prometheus="kube-system/prometheus-agent", prometheus_replica="prometheus-prometheus-agent-0", provider="aws", region="germanywestcentral", service="prometheus-operator-app-kube-state-metrics", service_priority="highest"}' values: "0+0x240 1+0x70" # Kyverno admission reports - series: 'aggregation:kyverno_resource_counts{cluster_id="gremlin", installation="gremlin", kind="admissionreports.kyverno.io", pipeline="testing", provider="aws"}'