diff --git a/CHANGELOG.md b/CHANGELOG.md index ad35992280..f36da1fe5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,11 +49,12 @@ * [ENHANCEMENT] Query-frontend: add `instance_enable_ipv6` to support IPv6. #6111 * [ENHANCEMENT] Store-gateway: return same detailed error messages as queriers when chunks or series limits are reached. #6347 * [ENHANCEMENT] Querier: reduce memory consumed for queries that hit store-gateways. #6348 -* [ENHANCEMENT] Ruler: include corresponding trace ID with log messages associated with rule evaluation. #6379 +* [ENHANCEMENT] Ruler: include corresponding trace ID with log messages associated with rule evaluation. #6379 #6520 * [ENHANCEMENT] Querier: clarify log messages and span events emitted while querying ingesters, and include both ingester name and address when relevant. #6381 * [ENHANCEMENT] Memcached: introduce new experimental configuration parameters `-.memcached.write-buffer-size-bytes` `-.memcached.read-buffer-size-bytes` to customise the memcached client write and read buffer size (the buffer is allocated for each memcached connection). #6468 * [ENHANCEMENT] Ingester, Distributor: added experimental support for rejecting push requests received via gRPC before reading them into memory, if ingester or distributor is unable to accept the request. This is activated by using `-ingester.limit-inflight-requests-using-grpc-method-limiter` for ingester, and `-distributor.limit-inflight-requests-using-grpc-method-limiter` for distributor. #5976 #6300 * [ENHANCEMENT] Query-frontend: return warnings generated during query evaluation. #6391 +* [ENHANCEMENT] Server: Add the option `-server.http-read-header-timeout` to enable specifying a timeout for reading HTTP request headers. It defaults to 0, in which case reading of headers can take up to `-server.http-read-timeout`, leaving no time for reading body, if there's any. #6517 * [BUGFIX] Ring: Ensure network addresses used for component hash rings are formatted correctly when using IPv6. #6068 * [BUGFIX] Query-scheduler: don't retain connections from queriers that have shut down, leading to gradually increasing enqueue latency over time. #6100 #6145 * [BUGFIX] Ingester: prevent query logic from continuing to execute after queries are canceled. #6085 diff --git a/Makefile b/Makefile index fe4f5828da..06ccc3dcdd 100644 --- a/Makefile +++ b/Makefile @@ -212,7 +212,7 @@ mimir-build-image/$(UPTODATE): mimir-build-image/* # All the boiler plate for building golang follows: SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E") BUILD_IN_CONTAINER ?= true -LATEST_BUILD_IMAGE_TAG ?= pr6383-54c7fc6702 +LATEST_BUILD_IMAGE_TAG ?= pr6456-fb5d96e52f # TTY is parameterized to allow Google Cloud Builder to run builds, # as it currently disallows TTY devices. This value needs to be overridden diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 877caf9349..ee354d7ee1 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -420,13 +420,23 @@ "kind": "field", "name": "http_server_read_timeout", "required": false, - "desc": "Read timeout for HTTP server", + "desc": "Read timeout for entire HTTP request, including headers and body.", "fieldValue": null, "fieldDefaultValue": 30000000000, "fieldFlag": "server.http-read-timeout", "fieldType": "duration", "fieldCategory": "advanced" }, + { + "kind": "field", + "name": "http_server_read_header_timeout", + "required": false, + "desc": "Read timeout for HTTP request headers. If set to 0, value of -server.http-read-timeout is used.", + "fieldValue": null, + "fieldDefaultValue": 0, + "fieldFlag": "server.http-read-header-timeout", + "fieldType": "duration" + }, { "kind": "field", "name": "http_server_write_timeout", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index faec803f17..318943f97c 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -2559,8 +2559,10 @@ Usage of ./cmd/mimir/mimir: HTTP server listen network, default tcp (default "tcp") -server.http-listen-port int HTTP server listen port. (default 8080) + -server.http-read-header-timeout duration + Read timeout for HTTP request headers. If set to 0, value of -server.http-read-timeout is used. -server.http-read-timeout duration - Read timeout for HTTP server (default 30s) + Read timeout for entire HTTP request, including headers and body. (default 30s) -server.http-tls-ca-path string HTTP TLS Client CA path. -server.http-tls-cert-path string diff --git a/cmd/mimir/help.txt.tmpl b/cmd/mimir/help.txt.tmpl index 132819ee4b..12fea3a711 100644 --- a/cmd/mimir/help.txt.tmpl +++ b/cmd/mimir/help.txt.tmpl @@ -657,6 +657,8 @@ Usage of ./cmd/mimir/mimir: HTTP server listen address. -server.http-listen-port int HTTP server listen port. (default 8080) + -server.http-read-header-timeout duration + Read timeout for HTTP request headers. If set to 0, value of -server.http-read-timeout is used. -server.log-request-headers Optionally log request headers. -server.log-request-headers-exclude-list string diff --git a/development/mimir-monolithic-mode-with-swift-storage/docker-compose.yml b/development/mimir-monolithic-mode-with-swift-storage/docker-compose.yml index 7bf3cc19b2..2d36677071 100644 --- a/development/mimir-monolithic-mode-with-swift-storage/docker-compose.yml +++ b/development/mimir-monolithic-mode-with-swift-storage/docker-compose.yml @@ -43,7 +43,7 @@ services: # Scrape the metrics also with the Grafana agent (useful to test metadata ingestion # until metadata remote write is not supported by Prometheus). grafana-agent: - image: grafana/agent:v0.37.1 + image: grafana/agent:v0.37.3 command: ["-config.file=/etc/agent-config/grafana-agent.yaml", "-prometheus.wal-directory=/tmp"] volumes: - ./config:/etc/agent-config diff --git a/development/mimir-monolithic-mode/docker-compose.yml b/development/mimir-monolithic-mode/docker-compose.yml index 7cd2f56770..159d1a50aa 100644 --- a/development/mimir-monolithic-mode/docker-compose.yml +++ b/development/mimir-monolithic-mode/docker-compose.yml @@ -44,7 +44,7 @@ services: grafana-agent: profiles: - grafana-agent-static - image: grafana/agent:v0.37.2 + image: grafana/agent:v0.37.3 command: ["-config.file=/etc/agent-config/grafana-agent.yaml", "-prometheus.wal-directory=/tmp"] volumes: - ./config:/etc/agent-config @@ -54,7 +54,7 @@ services: grafana-agent-flow: profiles: - grafana-agent-flow - image: grafana/agent:v0.37.2 + image: grafana/agent:v0.37.3 environment: - AGENT_MODE=flow command: ["run", "--server.http.listen-addr=0.0.0.0:9092", "/etc/agent/config.river"] diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index 771757c621..dde618c09a 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -590,10 +590,15 @@ grpc_tls_config: # CLI flag: -server.graceful-shutdown-timeout [graceful_shutdown_timeout: | default = 30s] -# (advanced) Read timeout for HTTP server +# (advanced) Read timeout for entire HTTP request, including headers and body. # CLI flag: -server.http-read-timeout [http_server_read_timeout: | default = 30s] +# Read timeout for HTTP request headers. If set to 0, value of +# -server.http-read-timeout is used. +# CLI flag: -server.http-read-header-timeout +[http_server_read_header_timeout: | default = 0s] + # (advanced) Write timeout for HTTP server # CLI flag: -server.http-write-timeout [http_server_write_timeout: | default = 2m] diff --git a/go.mod b/go.mod index e5a88c01b3..09560970bf 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/gopacket v1.1.19 github.com/gorilla/mux v1.8.0 - github.com/grafana/dskit v0.0.0-20231030022856-30221e73f47e + github.com/grafana/dskit v0.0.0-20231031132813-52f4e8d82d59 github.com/grafana/e2e v0.1.1 github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/json-iterator/go v1.1.12 @@ -249,7 +249,7 @@ require ( ) // Using a fork of Prometheus with Mimir-specific changes. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20231027081731-c0ddc1f2ec07 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20231030232122-b7f66b93b9b5 // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index ed5fd8aa07..835ff67fe2 100644 --- a/go.sum +++ b/go.sum @@ -538,8 +538,8 @@ github.com/gosimple/slug v1.1.1 h1:fRu/digW+NMwBIP+RmviTK97Ho/bEj/C9swrCspN3D4= github.com/gosimple/slug v1.1.1/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0= github.com/grafana-tools/sdk v0.0.0-20220919052116-6562121319fc h1:PXZQA2WCxe85Tnn+WEvr8fDpfwibmEPgfgFEaC87G24= github.com/grafana-tools/sdk v0.0.0-20220919052116-6562121319fc/go.mod h1:AHHlOEv1+GGQ3ktHMlhuTUwo3zljV3QJbC0+8o2kn+4= -github.com/grafana/dskit v0.0.0-20231030022856-30221e73f47e h1:78Seyn40QzROIdIyVhbOzaEn1xGl3DC+37zh+ZLOUAo= -github.com/grafana/dskit v0.0.0-20231030022856-30221e73f47e/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= +github.com/grafana/dskit v0.0.0-20231031132813-52f4e8d82d59 h1:tWbF2UD8HgvpqyxV60zmUDDzJI2gybMJxw4/RY/UNTo= +github.com/grafana/dskit v0.0.0-20231031132813-52f4e8d82d59/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc= github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE= github.com/grafana/goautoneg v0.0.0-20231010094147-47ce5e72a9ae h1:Yxbw9jKGJVC6qAK5Ubzzb/qZwM6rRMMqaDc/d4Vp3pM= @@ -548,8 +548,8 @@ github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPft github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20231027081731-c0ddc1f2ec07 h1:7/7VR+ixN8O1VlxrIokJnc9OgjeSfBI9KbvhjP0ehV0= -github.com/grafana/mimir-prometheus v0.0.0-20231027081731-c0ddc1f2ec07/go.mod h1:AEcvuS2UD6tkY+LgW6TTcLQI7urG/vuyBb+tVO8EiMI= +github.com/grafana/mimir-prometheus v0.0.0-20231030232122-b7f66b93b9b5 h1:GQquGI6Z7ElxnucOfpDxYPTG7aDCrT/RbCZ0rTQeMgE= +github.com/grafana/mimir-prometheus v0.0.0-20231030232122-b7f66b93b9b5/go.mod h1:AEcvuS2UD6tkY+LgW6TTcLQI7urG/vuyBb+tVO8EiMI= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= diff --git a/mimir-build-image/Dockerfile b/mimir-build-image/Dockerfile index 13f9ae6db7..f4b76c42bb 100644 --- a/mimir-build-image/Dockerfile +++ b/mimir-build-image/Dockerfile @@ -3,7 +3,7 @@ # Provenance-includes-license: Apache-2.0 # Provenance-includes-copyright: The Cortex Authors. -FROM registry.k8s.io/kustomize/kustomize:v5.1.1 as kustomize +FROM registry.k8s.io/kustomize/kustomize:v5.2.1 as kustomize FROM alpine/helm:3.13.1 as helm FROM golang:1.21.3-bullseye ARG goproxyValue diff --git a/operations/helm/charts/mimir-distributed/Chart.lock b/operations/helm/charts/mimir-distributed/Chart.lock index 28f7beb1f1..e8ee7485c8 100644 --- a/operations/helm/charts/mimir-distributed/Chart.lock +++ b/operations/helm/charts/mimir-distributed/Chart.lock @@ -4,9 +4,9 @@ dependencies: version: 5.0.14 - name: grafana-agent-operator repository: https://grafana.github.io/helm-charts - version: 0.3.7 + version: 0.3.8 - name: rollout-operator repository: https://grafana.github.io/helm-charts version: 0.9.1 -digest: sha256:a33f94d698d6fd2cd703c0f7482ad6d03f4cb44e4971272bb9fa1703e9f10a12 -generated: "2023-10-16T00:58:44.873531463Z" +digest: sha256:8a4b2f3d0e30bc7a5aedf99f0a26da61a267a8b0d143eb85f45485bc02cad58d +generated: "2023-10-23T03:41:31.534206747Z" diff --git a/operations/helm/charts/mimir-distributed/Chart.yaml b/operations/helm/charts/mimir-distributed/Chart.yaml index 0fd115151c..62fd49fad2 100644 --- a/operations/helm/charts/mimir-distributed/Chart.yaml +++ b/operations/helm/charts/mimir-distributed/Chart.yaml @@ -14,7 +14,7 @@ dependencies: condition: minio.enabled - name: grafana-agent-operator alias: grafana-agent-operator - version: 0.3.7 + version: 0.3.8 repository: https://grafana.github.io/helm-charts condition: metaMonitoring.grafanaAgent.installOperator - name: rollout-operator diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md index 59e7efdbdb..b9947a6542 100644 --- a/operations/helm/charts/mimir-distributed/README.md +++ b/operations/helm/charts/mimir-distributed/README.md @@ -25,7 +25,7 @@ Kubernetes: `^1.20.0-0` | Repository | Name | Version | |------------|------|---------| | https://charts.min.io/ | minio(minio) | 5.0.14 | -| https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.3.7 | +| https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.3.8 | | https://grafana.github.io/helm-charts | rollout_operator(rollout-operator) | 0.9.1 | # Contributing and releasing diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index f87f2d864f..ec694d4c2f 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -1659,7 +1659,7 @@ memcached: # -- Memcached Docker image repository repository: memcached # -- Memcached Docker image tag - tag: 1.6.21-alpine + tag: 1.6.22-alpine # -- Memcached Docker image pull policy pullPolicy: IfNotPresent diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml index 226abe7eff..bd433f3898 100644 --- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml +++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml @@ -58,7 +58,7 @@ spec: secretName: tls-certs containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml index fc2273c729..3806c7e814 100644 --- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml +++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml @@ -58,7 +58,7 @@ spec: secretName: tls-certs containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml index 85bc440a22..e14d904076 100644 --- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml +++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml @@ -58,7 +58,7 @@ spec: secretName: tls-certs containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml index f7d4803f63..a135813847 100644 --- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml +++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml @@ -58,7 +58,7 @@ spec: secretName: tls-certs containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-aggregation-cache/graphite-aggregation-cache-statefulset.yaml b/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-aggregation-cache/graphite-aggregation-cache-statefulset.yaml index a7f98ae781..c3084aafc1 100644 --- a/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-aggregation-cache/graphite-aggregation-cache-statefulset.yaml +++ b/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-aggregation-cache/graphite-aggregation-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-metric-name-cache/graphite-metric-name-cache-statefulset.yaml b/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-metric-name-cache/graphite-metric-name-cache-statefulset.yaml index 1814534b11..f4154c963d 100644 --- a/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-metric-name-cache/graphite-metric-name-cache-statefulset.yaml +++ b/operations/helm/tests/graphite-enabled-values-generated/mimir-distributed/templates/graphite-proxy/graphite-metric-name-cache/graphite-metric-name-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml index 9aec1dfaca..9271987087 100644 --- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml +++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml index d228f64ea9..e2b21a650a 100644 --- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml +++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml index 285fe45f6b..6b8030b4ef 100644 --- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml +++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml index 05fb1eec8e..63bddfcf09 100644 --- a/operations/helm/tests/large-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml +++ b/operations/helm/tests/large-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrole.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrole.yaml index 81e7132f79..94368e7e4d 100644 --- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrole.yaml +++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrole.yaml @@ -9,8 +9,8 @@ metadata: app.kubernetes.io/instance: metamonitoring-values app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: operator - helm.sh/chart: grafana-agent-operator-0.3.7 - app.kubernetes.io/version: "0.37.1" + helm.sh/chart: grafana-agent-operator-0.3.8 + app.kubernetes.io/version: "0.37.2" rules: - apiGroups: [monitoring.grafana.com] resources: diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrolebinding.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrolebinding.yaml index 6e4107a047..6517b0659e 100644 --- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrolebinding.yaml +++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-clusterrolebinding.yaml @@ -9,8 +9,8 @@ metadata: app.kubernetes.io/instance: metamonitoring-values app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: operator - helm.sh/chart: grafana-agent-operator-0.3.7 - app.kubernetes.io/version: "0.37.1" + helm.sh/chart: grafana-agent-operator-0.3.8 + app.kubernetes.io/version: "0.37.2" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-deployment.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-deployment.yaml index 6f5b5bf900..989d2da47c 100644 --- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-deployment.yaml +++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-deployment.yaml @@ -9,8 +9,8 @@ metadata: app.kubernetes.io/instance: metamonitoring-values app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: operator - helm.sh/chart: grafana-agent-operator-0.3.7 - app.kubernetes.io/version: "0.37.1" + helm.sh/chart: grafana-agent-operator-0.3.8 + app.kubernetes.io/version: "0.37.2" spec: replicas: 1 selector: @@ -33,7 +33,7 @@ spec: type: RuntimeDefault containers: - name: grafana-agent-operator - image: "docker.io/grafana/agent-operator:v0.37.1" + image: "docker.io/grafana/agent-operator:v0.37.2" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-serviceaccount.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-serviceaccount.yaml index a11660b97b..5cbfeda980 100644 --- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-serviceaccount.yaml +++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/operator-serviceaccount.yaml @@ -10,5 +10,5 @@ metadata: app.kubernetes.io/instance: metamonitoring-values app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: operator - helm.sh/chart: grafana-agent-operator-0.3.7 - app.kubernetes.io/version: "0.37.1" + helm.sh/chart: grafana-agent-operator-0.3.8 + app.kubernetes.io/version: "0.37.2" diff --git a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/tests/test-grafanaagent.yaml b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/tests/test-grafanaagent.yaml index 6a02551045..025bf99fe3 100644 --- a/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/tests/test-grafanaagent.yaml +++ b/operations/helm/tests/metamonitoring-values-generated/mimir-distributed/charts/grafana-agent-operator/templates/tests/test-grafanaagent.yaml @@ -94,7 +94,7 @@ metadata: "helm.sh/hook": test "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed spec: - image: "docker.io/grafana/agent:v0.37.1" + image: "docker.io/grafana/agent:v0.37.2" logLevel: info serviceAccountName: grafana-agent-test-sa metrics: diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml index 4f4f362b0f..61de9afec6 100644 --- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml +++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml index 792f05b881..1903184e03 100644 --- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml +++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml index fc58613ac5..2f73ba86c5 100644 --- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml +++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml index b947cdc61b..b7498f78e2 100644 --- a/operations/helm/tests/small-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml +++ b/operations/helm/tests/small-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml index 900c5347c5..2c4834fcbc 100644 --- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml +++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml index 642d1d1f1c..93c749a6e4 100644 --- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml +++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml index 9513c0f07a..3da5b96120 100644 --- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml +++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml index a86e23fe0d..09d4a47e4e 100644 --- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml +++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml @@ -54,7 +54,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml index 0fba8c6836..16d924b997 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/chunks-cache/chunks-cache-statefulset.yaml @@ -55,7 +55,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: null diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml index 020510a764..c8f364947f 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/index-cache/index-cache-statefulset.yaml @@ -55,7 +55,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: null diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml index bdd12a7fee..1314d9b6be 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/metadata-cache/metadata-cache-statefulset.yaml @@ -55,7 +55,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: null diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml index a4366d7c42..dc0f610021 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/results-cache/results-cache-statefulset.yaml @@ -55,7 +55,7 @@ spec: terminationGracePeriodSeconds: 60 containers: - name: memcached - image: memcached:1.6.21-alpine + image: memcached:1.6.22-alpine imagePullPolicy: IfNotPresent resources: limits: null diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 2950e75730..ad1ad26b6a 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -23,6 +23,7 @@ import ( "github.com/grafana/dskit/instrument" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/limiter" + "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/mtime" "github.com/grafana/dskit/ring" ring_client "github.com/grafana/dskit/ring/client" @@ -46,7 +47,6 @@ import ( "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/globalerror" - util_log "github.com/grafana/mimir/pkg/util/log" util_math "github.com/grafana/mimir/pkg/util/math" "github.com/grafana/mimir/pkg/util/pool" "github.com/grafana/mimir/pkg/util/spanlogger" @@ -1151,7 +1151,7 @@ func (d *Distributor) limitsMiddleware(next PushFunc) PushFunc { // We don't know request size yet, will check it later. ctx, rs, err := d.startPushRequest(ctx, -1) if err != nil { - return util_log.DoNotLogError{Err: err} + return middleware.DoNotLogError{Err: err} } rs.pushHandlerPerformsCleanup = true diff --git a/pkg/distributor/errors_test.go b/pkg/distributor/errors_test.go index 7fc05c221d..66e2e2f7c3 100644 --- a/pkg/distributor/errors_test.go +++ b/pkg/distributor/errors_test.go @@ -7,13 +7,13 @@ import ( "testing" "github.com/gogo/status" + "github.com/grafana/dskit/middleware" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "github.com/grafana/mimir/pkg/mimirpb" - "github.com/grafana/mimir/pkg/util/log" ) func TestNewReplicasNotMatchError(t *testing.T) { @@ -180,7 +180,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorMsg: originalMsg, }, "a DoNotLog error of a generic error gets translated into an Internal error with no details": { - err: log.DoNotLogError{Err: originalErr}, + err: middleware.DoNotLogError{Err: originalErr}, expectedGRPCCode: codes.Internal, expectedErrorMsg: originalMsg, }, @@ -191,7 +191,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.REPLICAS_DID_NOT_MATCH}, }, "a DoNotLotError of a replicasDidNotMatchError gets translated into an AlreadyExists error with REPLICAS_DID_NOT_MATCH cause": { - err: log.DoNotLogError{Err: replicasDidNotMatchErr}, + err: middleware.DoNotLogError{Err: replicasDidNotMatchErr}, expectedGRPCCode: codes.AlreadyExists, expectedErrorMsg: replicasDidNotMatchErr.Error(), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.REPLICAS_DID_NOT_MATCH}, @@ -203,7 +203,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.TOO_MANY_CLUSTERS}, }, "a DoNotLogError of a tooManyClustersError gets translated into a FailedPrecondition error with TOO_MANY_CLUSTERS cause": { - err: log.DoNotLogError{Err: tooManyClustersErr}, + err: middleware.DoNotLogError{Err: tooManyClustersErr}, expectedGRPCCode: codes.FailedPrecondition, expectedErrorMsg: tooManyClustersErr.Error(), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.TOO_MANY_CLUSTERS}, @@ -215,7 +215,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.BAD_DATA}, }, "a DoNotLogError of a validationError gets translated into gets translated into a FailedPrecondition error with VALIDATION cause": { - err: log.DoNotLogError{Err: newValidationError(originalErr)}, + err: middleware.DoNotLogError{Err: newValidationError(originalErr)}, expectedGRPCCode: codes.FailedPrecondition, expectedErrorMsg: originalMsg, expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.BAD_DATA}, @@ -227,7 +227,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.INGESTION_RATE_LIMITED}, }, "a DoNotLogError of an ingestionRateLimitedError gets translated into gets translated into a ResourceExhausted error with INGESTION_RATE_LIMITED cause": { - err: log.DoNotLogError{Err: ingestionRateLimitedErr}, + err: middleware.DoNotLogError{Err: ingestionRateLimitedErr}, expectedGRPCCode: codes.ResourceExhausted, expectedErrorMsg: ingestionRateLimitedErr.Error(), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.INGESTION_RATE_LIMITED}, @@ -240,7 +240,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.REQUEST_RATE_LIMITED}, }, "a DoNotLogError of a requestRateLimitedError with serviceOverloadErrorEnabled gets translated into an Unavailable error with REQUEST_RATE_LIMITED cause": { - err: log.DoNotLogError{Err: requestRateLimitedErr}, + err: middleware.DoNotLogError{Err: requestRateLimitedErr}, serviceOverloadErrorEnabled: true, expectedGRPCCode: codes.Unavailable, expectedErrorMsg: requestRateLimitedErr.Error(), @@ -253,7 +253,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.REQUEST_RATE_LIMITED}, }, "a DoNotLogError of a requestRateLimitedError without serviceOverloadErrorEnabled gets translated into an ResourceExhausted error with REQUEST_RATE_LIMITED cause": { - err: log.DoNotLogError{Err: requestRateLimitedErr}, + err: middleware.DoNotLogError{Err: requestRateLimitedErr}, expectedGRPCCode: codes.ResourceExhausted, expectedErrorMsg: requestRateLimitedErr.Error(), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.REQUEST_RATE_LIMITED}, @@ -265,7 +265,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.BAD_DATA}, }, "a DoNotLogError of an ingesterPushError with BAD_DATA cause gets translated into a FailedPrecondition error with BAD_DATA cause": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Internal, originalMsg, mimirpb.BAD_DATA))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Internal, originalMsg, mimirpb.BAD_DATA))}, expectedGRPCCode: codes.FailedPrecondition, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.BAD_DATA}, @@ -277,7 +277,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.INSTANCE_LIMIT}, }, "a DoNotLogError of an ingesterPushError with INSTANCE_LIMIT cause gets translated into a Internal error with INSTANCE_LIMIT cause": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.INSTANCE_LIMIT))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.INSTANCE_LIMIT))}, expectedGRPCCode: codes.Internal, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.INSTANCE_LIMIT}, @@ -289,7 +289,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.SERVICE_UNAVAILABLE}, }, "a DoNotLogError of an ingesterPushError with SERVICE_UNAVAILABLE cause gets translated into a Internal error with SERVICE_UNAVAILABLE cause": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.SERVICE_UNAVAILABLE))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.SERVICE_UNAVAILABLE))}, expectedGRPCCode: codes.Internal, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.SERVICE_UNAVAILABLE}, @@ -301,7 +301,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.TSDB_UNAVAILABLE}, }, "a DoNotLogError of an ingesterPushError with TSDB_UNAVAILABLE cause gets translated into a Internal error with TSDB_UNAVAILABLE cause": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Internal, originalMsg, mimirpb.TSDB_UNAVAILABLE))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Internal, originalMsg, mimirpb.TSDB_UNAVAILABLE))}, expectedGRPCCode: codes.Internal, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.TSDB_UNAVAILABLE}, @@ -313,7 +313,7 @@ func TestToGRPCError(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.UNKNOWN_CAUSE}, }, "a DoNotLogError of an ingesterPushError with UNKNOWN_CAUSE cause gets translated into a Internal error with UNKNOWN_CAUSE cause": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.UNKNOWN_CAUSE))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.UNKNOWN_CAUSE))}, expectedGRPCCode: codes.Internal, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: mimirpb.UNKNOWN_CAUSE}, diff --git a/pkg/distributor/push.go b/pkg/distributor/push.go index 00f339170f..171cf049ae 100644 --- a/pkg/distributor/push.go +++ b/pkg/distributor/push.go @@ -14,6 +14,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/httpgrpc/server" "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/tenant" @@ -140,6 +141,7 @@ func handler( if code != 202 { level.Error(logger).Log("msg", "push error", "err", err) } + addHeaders(w, err) http.Error(w, msg, code) } }) @@ -187,3 +189,10 @@ func toHTTPStatus(ctx context.Context, pushErr error, limits *validation.Overrid return http.StatusInternalServerError } + +func addHeaders(w http.ResponseWriter, err error) { + var doNotLogError middleware.DoNotLogError + if errors.As(err, &doNotLogError) { + w.Header().Set(server.DoNotLogErrorHeaderKey, "true") + } +} diff --git a/pkg/distributor/push_test.go b/pkg/distributor/push_test.go index 7bd923cd92..1c78c74b6f 100644 --- a/pkg/distributor/push_test.go +++ b/pkg/distributor/push_test.go @@ -19,6 +19,7 @@ import ( "github.com/golang/snappy" "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/httpgrpc/server" "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/tenant" "github.com/grafana/dskit/user" @@ -33,7 +34,6 @@ import ( "google.golang.org/grpc/codes" "github.com/grafana/mimir/pkg/mimirpb" - "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/test" "github.com/grafana/mimir/pkg/util/validation" ) @@ -728,47 +728,89 @@ func TestNewDistributorMaxWriteMessageSizeErr(t *testing.T) { } func TestHandler_ErrorTranslation(t *testing.T) { + errMsg := "this is an error" + parserTestCases := []struct { + name string + err error + expectedHTTPStatus int + expectedErrorMessage string + }{ + { + name: "a generic error during request parsing gets an HTTP 400", + err: fmt.Errorf(errMsg), + expectedHTTPStatus: http.StatusBadRequest, + expectedErrorMessage: errMsg, + }, + { + name: "a gRPC error with a status during request parsing gets translated into HTTP error without DoNotLogError header", + err: httpgrpc.Errorf(http.StatusRequestEntityTooLarge, errMsg), + expectedHTTPStatus: http.StatusRequestEntityTooLarge, + expectedErrorMessage: errMsg, + }, + } + for _, tc := range parserTestCases { + t.Run(tc.name, func(t *testing.T) { + parserFunc := func(context.Context, *http.Request, int, []byte, *mimirpb.PreallocWriteRequest) ([]byte, error) { + return nil, tc.err + } + pushFunc := func(ctx context.Context, req *Request) error { + _, err := req.WriteRequest() // just read the body so we can trigger the parser + return err + } + + h := handler(10, nil, false, nil, pushFunc, parserFunc) + + recorder := httptest.NewRecorder() + h.ServeHTTP(recorder, httptest.NewRequest(http.MethodPost, "/push", bufCloser{&bytes.Buffer{}})) + + assert.Equal(t, tc.expectedHTTPStatus, recorder.Code) + assert.Equal(t, fmt.Sprintf("%s\n", tc.expectedErrorMessage), recorder.Body.String()) + }) + } + testCases := []struct { - name string - parserError bool - err error - expectedHTTPStatus int + name string + err error + expectedHTTPStatus int + expectedErrorMessage string + expectedDoNotLogErrorHeader bool }{ { name: "no error during push gets translated into a HTTP 200", - parserError: false, err: nil, expectedHTTPStatus: http.StatusOK, }, { - name: "a generic error during request parsing gets an HTTP 400", - parserError: true, - err: fmt.Errorf("something went wrong during the request parsing"), - expectedHTTPStatus: http.StatusBadRequest, + name: "a generic error during push gets a HTTP 500 without DoNotLogError header", + err: fmt.Errorf(errMsg), + expectedHTTPStatus: http.StatusInternalServerError, + expectedErrorMessage: errMsg, }, { - name: "a gRPC error with a status during request parsing gets translated into HTTP error", - parserError: true, - err: httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "too big"), - expectedHTTPStatus: http.StatusRequestEntityTooLarge, + name: "a DoNotLogError of a generic error during push gets a HTTP 500 with DoNotLogError header", + err: middleware.DoNotLogError{Err: fmt.Errorf(errMsg)}, + expectedHTTPStatus: http.StatusInternalServerError, + expectedErrorMessage: errMsg, + expectedDoNotLogErrorHeader: true, }, { - name: "a generic error during push gets a HTTP 500", - parserError: false, - err: fmt.Errorf("something went wrong during the push"), - expectedHTTPStatus: http.StatusInternalServerError, + name: "a gRPC error with a status during push gets translated into HTTP error without DoNotLogError header", + err: httpgrpc.Errorf(http.StatusRequestEntityTooLarge, errMsg), + expectedHTTPStatus: http.StatusRequestEntityTooLarge, + expectedErrorMessage: errMsg, }, { - name: "a gRPC error with a status during push gets translated into HTTP error", - parserError: false, - err: httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "too big"), - expectedHTTPStatus: http.StatusRequestEntityTooLarge, + name: "a DoNotLogError of a gRPC error with a status during push gets translated into HTTP error without DoNotLogError header", + err: middleware.DoNotLogError{Err: httpgrpc.Errorf(http.StatusRequestEntityTooLarge, errMsg)}, + expectedHTTPStatus: http.StatusRequestEntityTooLarge, + expectedErrorMessage: errMsg, + expectedDoNotLogErrorHeader: true, }, { - name: "a context.Canceled error during push gets translated into a HTTP 499", - parserError: false, - err: context.Canceled, - expectedHTTPStatus: statusClientClosedRequest, + name: "a context.Canceled error during push gets translated into a HTTP 499", + err: context.Canceled, + expectedHTTPStatus: statusClientClosedRequest, + expectedErrorMessage: context.Canceled.Error(), }, } @@ -776,9 +818,6 @@ func TestHandler_ErrorTranslation(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { parserFunc := func(context.Context, *http.Request, int, []byte, *mimirpb.PreallocWriteRequest) ([]byte, error) { - if tc.parserError { - return nil, tc.err - } return nil, nil } pushFunc := func(ctx context.Context, req *Request) error { @@ -795,6 +834,15 @@ func TestHandler_ErrorTranslation(t *testing.T) { h.ServeHTTP(recorder, httptest.NewRequest(http.MethodPost, "/push", bufCloser{&bytes.Buffer{}})) assert.Equal(t, tc.expectedHTTPStatus, recorder.Code) + if tc.err != nil { + assert.Equal(t, fmt.Sprintf("%s\n", tc.expectedErrorMessage), recorder.Body.String()) + } + header := recorder.Header().Get(server.DoNotLogErrorHeaderKey) + if tc.expectedDoNotLogErrorHeader { + require.Equal(t, "true", header) + } else { + require.Equal(t, "", header) + } }) } } @@ -821,7 +869,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: originalMsg, }, "a DoNotLog of a generic error gets translated into a HTTP 500": { - err: log.DoNotLogError{Err: originalErr}, + err: middleware.DoNotLogError{Err: originalErr}, expectedHTTPStatus: http.StatusInternalServerError, expectedErrorMsg: originalMsg, }, @@ -836,7 +884,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: replicasNotMatchErr.Error(), }, "a DoNotLogError of a replicasDidNotMatchError gets translated into an HTTP 202": { - err: log.DoNotLogError{Err: replicasNotMatchErr}, + err: middleware.DoNotLogError{Err: replicasNotMatchErr}, expectedHTTPStatus: http.StatusAccepted, expectedErrorMsg: replicasNotMatchErr.Error(), }, @@ -846,7 +894,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: tooManyClustersErr.Error(), }, "a DoNotLogError of a tooManyClustersError gets translated into an HTTP 400": { - err: log.DoNotLogError{Err: tooManyClustersErr}, + err: middleware.DoNotLogError{Err: tooManyClustersErr}, expectedHTTPStatus: http.StatusBadRequest, expectedErrorMsg: tooManyClustersErr.Error(), }, @@ -856,7 +904,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: originalMsg, }, "a DoNotLogError of a validationError gets translated into an HTTP 400": { - err: log.DoNotLogError{Err: newValidationError(originalErr)}, + err: middleware.DoNotLogError{Err: newValidationError(originalErr)}, expectedHTTPStatus: http.StatusBadRequest, expectedErrorMsg: originalMsg, }, @@ -866,7 +914,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: ingestionRateLimitedErr.Error(), }, "a DoNotLogError of an ingestionRateLimitedError gets translated into an HTTP 429": { - err: log.DoNotLogError{Err: ingestionRateLimitedErr}, + err: middleware.DoNotLogError{Err: ingestionRateLimitedErr}, expectedHTTPStatus: http.StatusTooManyRequests, expectedErrorMsg: ingestionRateLimitedErr.Error(), }, @@ -877,7 +925,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: requestRateLimitedErr.Error(), }, "a DoNotLogError of a requestRateLimitedError with serviceOverloadErrorEnabled gets translated into an HTTP 529": { - err: log.DoNotLogError{Err: requestRateLimitedErr}, + err: middleware.DoNotLogError{Err: requestRateLimitedErr}, serviceOverloadErrorEnabled: true, expectedHTTPStatus: StatusServiceOverloaded, expectedErrorMsg: requestRateLimitedErr.Error(), @@ -889,7 +937,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: requestRateLimitedErr.Error(), }, "a DoNotLogError of a requestRateLimitedError without serviceOverloadErrorEnabled gets translated into an HTTP 429": { - err: log.DoNotLogError{Err: requestRateLimitedErr}, + err: middleware.DoNotLogError{Err: requestRateLimitedErr}, serviceOverloadErrorEnabled: false, expectedHTTPStatus: http.StatusTooManyRequests, expectedErrorMsg: requestRateLimitedErr.Error(), @@ -900,7 +948,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, "a DoNotLogError of an ingesterPushError with BAD_DATA cause gets translated into an HTTP 400": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.FailedPrecondition, originalMsg, mimirpb.BAD_DATA))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.FailedPrecondition, originalMsg, mimirpb.BAD_DATA))}, expectedHTTPStatus: http.StatusBadRequest, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, @@ -910,7 +958,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, "a DoNotLogError of an ingesterPushError with TSDB_UNAVAILABLE cause gets translated into an HTTP 503": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Internal, originalMsg, mimirpb.TSDB_UNAVAILABLE))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Internal, originalMsg, mimirpb.TSDB_UNAVAILABLE))}, expectedHTTPStatus: http.StatusServiceUnavailable, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, @@ -920,7 +968,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, "a DoNotLogError of an ingesterPushError with SERVICE_UNAVAILABLE cause gets translated into an HTTP 500": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.SERVICE_UNAVAILABLE))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.SERVICE_UNAVAILABLE))}, expectedHTTPStatus: http.StatusInternalServerError, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, @@ -930,7 +978,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, "a DoNotLogError of an ingesterPushError with INSTANCE_LIMIT cause gets translated into an HTTP 500": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.INSTANCE_LIMIT))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Unavailable, originalMsg, mimirpb.INSTANCE_LIMIT))}, expectedHTTPStatus: http.StatusInternalServerError, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, @@ -940,7 +988,7 @@ func TestHandler_ToHTTPStatus(t *testing.T) { expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, "a DoNotLogError of an ingesterPushError with UNKNOWN_CAUSE cause gets translated into an HTTP 500": { - err: log.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Internal, originalMsg, mimirpb.UNKNOWN_CAUSE))}, + err: middleware.DoNotLogError{Err: newIngesterPushError(createStatusWithDetails(t, codes.Internal, originalMsg, mimirpb.UNKNOWN_CAUSE))}, expectedHTTPStatus: http.StatusInternalServerError, expectedErrorMsg: fmt.Sprintf("%s: %s", failedPushingToIngesterMessage, originalMsg), }, diff --git a/pkg/ingester/errors.go b/pkg/ingester/errors.go index 3820b1fd4f..41d67286dd 100644 --- a/pkg/ingester/errors.go +++ b/pkg/ingester/errors.go @@ -13,6 +13,7 @@ import ( "github.com/gogo/status" "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/services" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -548,7 +549,7 @@ func handlePushErrorWithGRPC(err error) error { errCode = codes.Unavailable case mimirpb.INSTANCE_LIMIT: errCode = codes.Unavailable - wrappedErr = log.DoNotLogError{Err: err} + wrappedErr = middleware.DoNotLogError{Err: err} case mimirpb.TSDB_UNAVAILABLE: errCode = codes.Internal } @@ -569,7 +570,7 @@ func handlePushErrorWithHTTPGRPC(err error) error { case mimirpb.SERVICE_UNAVAILABLE: return newErrorWithStatus(err, codes.Unavailable) case mimirpb.INSTANCE_LIMIT: - return newErrorWithStatus(log.DoNotLogError{Err: err}, codes.Unavailable) + return newErrorWithStatus(middleware.DoNotLogError{Err: err}, codes.Unavailable) case mimirpb.TSDB_UNAVAILABLE: return newErrorWithHTTPStatus(err, http.StatusServiceUnavailable) } diff --git a/pkg/ingester/errors_test.go b/pkg/ingester/errors_test.go index 92d79f91bc..ddcc912ee1 100644 --- a/pkg/ingester/errors_test.go +++ b/pkg/ingester/errors_test.go @@ -22,7 +22,6 @@ import ( "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/util/globalerror" - "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/validation" ) @@ -256,7 +255,7 @@ func TestErrorWithStatus(t *testing.T) { expectedErrorDetails: &mimirpb.WriteErrorDetails{Cause: ingesterErr.errorCause()}, }, "new errorWithStatus backed by a DoNotLog error of ingesterError contains WriteErrorDetails": { - originErr: log.DoNotLogError{Err: ingesterErr}, + originErr: middleware.DoNotLogError{Err: ingesterErr}, statusCode: codes.Unimplemented, doNotLog: true, expectedErrorMessage: errMsg, @@ -370,7 +369,7 @@ func TestHandlePushErrorWithGRPC(t *testing.T) { expectedDetails: nil, }, "a DoNotLog of a generic error gets translated into an Internal gRPC error without details": { - err: log.DoNotLogError{Err: originalErr}, + err: middleware.DoNotLogError{Err: originalErr}, expectedCode: codes.Internal, expectedMessage: originalMsg, expectedDetails: nil, @@ -509,7 +508,7 @@ func TestHandlePushErrorWithGRPC(t *testing.T) { require.Equal(t, tc.expectedMessage, stat.Message()) checkErrorWithStatusDetails(t, stat.Details(), tc.expectedDetails) if tc.doNotLogExpected { - var doNotLogError log.DoNotLogError + var doNotLogError middleware.DoNotLogError require.ErrorAs(t, handledErr, &doNotLogError) require.False(t, doNotLogError.ShouldLog(context.Background(), 0)) } @@ -534,8 +533,8 @@ func TestHandlePushErrorWithHTTPGRPC(t *testing.T) { expectedTranslation: originalErr, }, "a DoNotLog error of a generic error is not translated": { - err: log.DoNotLogError{Err: originalErr}, - expectedTranslation: log.DoNotLogError{Err: originalErr}, + err: middleware.DoNotLogError{Err: originalErr}, + expectedTranslation: middleware.DoNotLogError{Err: originalErr}, doNotLogExpected: true, }, "an unavailableError gets translated into an errorWithStatus Unavailable error": { @@ -552,7 +551,7 @@ func TestHandlePushErrorWithHTTPGRPC(t *testing.T) { "an instanceLimitReachedError gets translated into a non-loggable errorWithStatus Unavailable error": { err: newInstanceLimitReachedError("instance limit reached"), expectedTranslation: newErrorWithStatus( - log.DoNotLogError{Err: newInstanceLimitReachedError("instance limit reached")}, + middleware.DoNotLogError{Err: newInstanceLimitReachedError("instance limit reached")}, codes.Unavailable, ), doNotLogExpected: true, @@ -560,7 +559,7 @@ func TestHandlePushErrorWithHTTPGRPC(t *testing.T) { "a wrapped instanceLimitReachedError gets translated into a non-loggable errorWithStatus Unavailable error": { err: fmt.Errorf("wrapped: %w", newInstanceLimitReachedError("instance limit reached")), expectedTranslation: newErrorWithStatus( - log.DoNotLogError{Err: fmt.Errorf("wrapped: %w", newInstanceLimitReachedError("instance limit reached"))}, + middleware.DoNotLogError{Err: fmt.Errorf("wrapped: %w", newInstanceLimitReachedError("instance limit reached"))}, codes.Unavailable, ), doNotLogExpected: true, @@ -670,7 +669,7 @@ func TestHandlePushErrorWithHTTPGRPC(t *testing.T) { handledErr := handlePushErrorWithHTTPGRPC(tc.err) require.Equal(t, tc.expectedTranslation, handledErr) if tc.doNotLogExpected { - var doNotLogError log.DoNotLogError + var doNotLogError middleware.DoNotLogError require.ErrorAs(t, handledErr, &doNotLogError) require.False(t, doNotLogError.ShouldLog(context.Background(), 0)) } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 4cf7442178..a20e71c9b1 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -26,6 +26,7 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/status" "github.com/grafana/dskit/concurrency" + "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/tenant" @@ -814,7 +815,7 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, req *mimirpb.WriteReques // If we're using grpc handlers, we don't need to start/finish request here. if !i.cfg.LimitInflightRequestsUsingGrpcMethodLimiter { if err := i.StartPushRequest(); err != nil { - return util_log.DoNotLogError{Err: err} + return middleware.DoNotLogError{Err: err} } defer i.FinishPushRequest() } diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 8da2a669f2..1fb9d21094 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -41,6 +41,7 @@ import ( "github.com/grafana/mimir/pkg/storage/tsdb/bucketcache" "github.com/grafana/mimir/pkg/util" util_log "github.com/grafana/mimir/pkg/util/log" + "github.com/grafana/mimir/pkg/util/spanlogger" "github.com/grafana/mimir/pkg/util/validation" ) @@ -991,7 +992,7 @@ func (r *Ruler) Rules(ctx context.Context, in *RulesRequest) (*RulesResponse, er return nil, fmt.Errorf("no user id found in context") } - groupDescs, err := r.getLocalRules(userID, *in) + groupDescs, err := r.getLocalRules(ctx, userID, *in) if err != nil { return nil, err } @@ -1019,8 +1020,15 @@ func (fs StringFilterSet) IsFiltered(val string) bool { return !ok } -func (r *Ruler) getLocalRules(userID string, req RulesRequest) ([]*GroupStateDesc, error) { +func (r *Ruler) getLocalRules(ctx context.Context, userID string, req RulesRequest) ([]*GroupStateDesc, error) { + spanLog, _ := spanlogger.NewWithLogger(ctx, r.logger, "Ruler.getLocalRules") + defer spanLog.Finish() + + // Get the rule groups from the manager. We track the time it takes because the manager needs to + // take a lock to run GetRules() and we want to make sure we're not hanging here. + getRulesStart := time.Now() groups := r.manager.GetRules(userID) + spanLog.DebugLog("msg", "fetched rules from manager", "duration", time.Since(getRulesStart)) groupDescs := make([]*GroupStateDesc, 0, len(groups)) prefix := filepath.Join(r.cfg.RulePath, userID) + "/" @@ -1087,7 +1095,7 @@ func (r *Ruler) getLocalRules(userID string, req RulesRequest) ([]*GroupStateDes if !getAlertingRules { continue } - rule.ActiveAlerts() + alerts := []*AlertStateDesc{} for _, a := range rule.ActiveAlerts() { alerts = append(alerts, &AlertStateDesc{ diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 872fb1b60b..e10e4f2419 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -1218,7 +1218,7 @@ func TestRuler_NotifySyncRulesAsync_ShouldTriggerRulesSyncingAndCorrectlyHandleT var actualRulersWithRuleGroups int for _, ruler := range rulers { - actualRuleGroups, err := ruler.getLocalRules(userID, RulesRequest{Filter: AnyRule}) + actualRuleGroups, err := ruler.getLocalRules(ctx, userID, RulesRequest{Filter: AnyRule}) require.NoError(t, err) actualRuleGroupsCount += len(actualRuleGroups) @@ -1256,7 +1256,7 @@ func TestRuler_NotifySyncRulesAsync_ShouldTriggerRulesSyncingAndCorrectlyHandleT var actualRuleGroupsCountPerRuler []int for _, ruler := range rulers { - actualRuleGroups, err := ruler.getLocalRules(userID, RulesRequest{Filter: AnyRule}) + actualRuleGroups, err := ruler.getLocalRules(ctx, userID, RulesRequest{Filter: AnyRule}) require.NoError(t, err) actualRuleGroupsCountPerRuler = append(actualRuleGroupsCountPerRuler, len(actualRuleGroups)) } diff --git a/pkg/scheduler/queue/queue.go b/pkg/scheduler/queue/queue.go index 1cb438f5fa..30fcc90b63 100644 --- a/pkg/scheduler/queue/queue.go +++ b/pkg/scheduler/queue/queue.go @@ -193,7 +193,7 @@ func (q *RequestQueue) dispatcherLoop() { if needToDispatchQueries { currentElement := waitingGetNextRequestForQuerierCalls.Front() - for currentElement != nil && queueBroker.len() > 0 { + for currentElement != nil && !queueBroker.isEmpty() { call := currentElement.Value.(*nextRequestForQuerierCall) nextElement := currentElement.Next() // We have to capture the next element before calling Remove(), as Remove() clears it. @@ -205,7 +205,7 @@ func (q *RequestQueue) dispatcherLoop() { } } - if stopping && (queueBroker.len() == 0 || q.connectedQuerierWorkers.Load() == 0) { + if stopping && (queueBroker.isEmpty() || q.connectedQuerierWorkers.Load() == 0) { // Tell any waiting GetNextRequestForQuerier calls that nothing is coming. currentElement := waitingGetNextRequestForQuerierCalls.Front() @@ -241,7 +241,6 @@ func (q *RequestQueue) enqueueRequestToBroker(broker *queueBroker, r requestToEn } return err } - q.queueLength.WithLabelValues(string(r.tenantID)).Inc() // Call the successFn here to ensure we call it before sending this request to a waiting querier. diff --git a/pkg/scheduler/queue/queue_test.go b/pkg/scheduler/queue/queue_test.go index 0057008b29..5e4567751c 100644 --- a/pkg/scheduler/queue/queue_test.go +++ b/pkg/scheduler/queue/queue_test.go @@ -261,9 +261,9 @@ func TestRequestQueue_tryDispatchRequestToQuerier_ShouldReEnqueueAfterFailedSend maxQueriers: 0, // no sharding } - require.Nil(t, queueBroker.tenantQueues["tenant-1"]) + require.Nil(t, queueBroker.tenantQueuesTree.getNode(QueuePath{"root", "tenant-1"})) require.NoError(t, queueBroker.enqueueRequestBack(&tr)) - require.Equal(t, queueBroker.tenantQueues["tenant-1"].requests.Len(), 1) + require.False(t, queueBroker.tenantQueuesTree.getNode(QueuePath{"root", "tenant-1"}).isEmpty()) ctx, cancel := context.WithCancel(context.Background()) call := &nextRequestForQuerierCall{ @@ -278,5 +278,5 @@ func TestRequestQueue_tryDispatchRequestToQuerier_ShouldReEnqueueAfterFailedSend // indicating not to re-submit a request for nextRequestForQuerierCall for the querier require.True(t, queue.tryDispatchRequestToQuerier(queueBroker, call)) // assert request was re-enqueued for tenant after failed send - require.Equal(t, queueBroker.tenantQueues["tenant-1"].requests.Len(), 1) + require.False(t, queueBroker.tenantQueuesTree.getNode(QueuePath{"root", "tenant-1"}).isEmpty()) } diff --git a/pkg/scheduler/queue/tenant_queues.go b/pkg/scheduler/queue/tenant_queues.go index 97605d9b43..5f5208deae 100644 --- a/pkg/scheduler/queue/tenant_queues.go +++ b/pkg/scheduler/queue/tenant_queues.go @@ -6,7 +6,6 @@ package queue import ( - "container/list" "math/rand" "sort" "time" @@ -101,20 +100,16 @@ type queueTenant struct { // queueBroker encapsulates access to tenant queues for pending requests // and maintains consistency with the tenant-querier assignments type queueBroker struct { - tenantQueues map[TenantID]*tenantQueue + tenantQueuesTree *TreeQueue tenantQuerierAssignments tenantQuerierAssignments maxTenantQueueSize int } -type tenantQueue struct { - requests *list.List -} - func newQueueBroker(maxTenantQueueSize int, forgetDelay time.Duration) *queueBroker { return &queueBroker{ - tenantQueues: map[TenantID]*tenantQueue{}, + tenantQueuesTree: NewTreeQueue("root", maxTenantQueueSize), tenantQuerierAssignments: tenantQuerierAssignments{ queriersByID: map[QuerierID]*querierConn{}, querierIDsSorted: nil, @@ -127,22 +122,18 @@ func newQueueBroker(maxTenantQueueSize int, forgetDelay time.Duration) *queueBro } } -func (qb *queueBroker) len() int { - return len(qb.tenantQueues) +func (qb *queueBroker) isEmpty() bool { + return qb.tenantQueuesTree.isEmpty() } func (qb *queueBroker) enqueueRequestBack(request *tenantRequest) error { - queue, err := qb.getOrAddTenantQueue(request.tenantID, request.maxQueriers) + _, err := qb.tenantQuerierAssignments.getOrAddTenant(request.tenantID, request.maxQueriers) if err != nil { return err } - if queue.Len()+1 > qb.maxTenantQueueSize { - return ErrTooManyRequests - } - - queue.PushBack(request) - return nil + queuePath := QueuePath{qb.tenantQueuesTree.name, string(request.tenantID)} + return qb.tenantQueuesTree.EnqueueBackByPath(queuePath, request) } // enqueueRequestFront should only be used for re-enqueueing previously dequeued requests @@ -151,34 +142,13 @@ func (qb *queueBroker) enqueueRequestBack(request *tenantRequest) error { // max tenant queue size checks are skipped even though queue size violations // are not expected to occur when re-enqueuing a previously dequeued request. func (qb *queueBroker) enqueueRequestFront(request *tenantRequest) error { - queue, err := qb.getOrAddTenantQueue(request.tenantID, request.maxQueriers) + _, err := qb.tenantQuerierAssignments.getOrAddTenant(request.tenantID, request.maxQueriers) if err != nil { return err } - queue.PushFront(request) - return nil -} - -// getOrAddTenantQueue returns existing or new queue for tenant. -// maxQueriers is used to compute which queriers should handle requests for this tenant. -// If maxQueriers is <= 0, all queriers can handle this tenant's requests. -// If maxQueriers has changed since the last call, queriers for this are recomputed. -func (qb *queueBroker) getOrAddTenantQueue(tenantID TenantID, maxQueriers int) (*list.List, error) { - _, err := qb.tenantQuerierAssignments.getOrAddTenant(tenantID, maxQueriers) - if err != nil { - return nil, err - } - queue := qb.tenantQueues[tenantID] - - if queue == nil { - queue = &tenantQueue{ - requests: list.New(), - } - qb.tenantQueues[tenantID] = queue - } - - return queue.requests, nil + queuePath := QueuePath{qb.tenantQueuesTree.name, string(request.tenantID)} + return qb.tenantQueuesTree.EnqueueFrontByPath(queuePath, request) } func (qb *queueBroker) dequeueRequestForQuerier(lastTenantIndex int, querierID QuerierID) (*tenantRequest, TenantID, int, error) { @@ -187,24 +157,22 @@ func (qb *queueBroker) dequeueRequestForQuerier(lastTenantIndex int, querierID Q return nil, tenantID, tenantIndex, err } - tenantQueue := qb.tenantQueues[tenantID] - if tenantQueue == nil { - return nil, tenantID, tenantIndex, nil - } - - // queue will be nonempty as empty queues are deleted - queueElement := tenantQueue.requests.Front() - tenantQueue.requests.Remove(queueElement) + queuePath := QueuePath{qb.tenantQueuesTree.name, string(tenantID)} + _, queueElement := qb.tenantQueuesTree.DequeueByPath(queuePath) - if tenantQueue.requests.Len() == 0 { - qb.deleteQueue(tenantID) + queueNodeAfterDequeue := qb.tenantQueuesTree.getNode(queuePath) + if queueNodeAfterDequeue == nil { + // queue node was deleted due to being empty after dequeue + qb.tenantQuerierAssignments.removeTenant(tenantID) } - // re-casting to same type it was enqueued as; panic would indicate a bug - request := queueElement.Value.(*tenantRequest) + var request *tenantRequest + if queueElement != nil { + // re-casting to same type it was enqueued as; panic would indicate a bug + request = queueElement.(*tenantRequest) + } return request, tenantID, tenantIndex, nil - } func (qb *queueBroker) addQuerierConnection(querierID QuerierID) { @@ -220,16 +188,8 @@ func (qb *queueBroker) notifyQuerierShutdown(querierID QuerierID) { } func (qb *queueBroker) forgetDisconnectedQueriers(now time.Time) int { - return qb.tenantQuerierAssignments.forgetDisconnectedQueriers(now) -} - -func (qb *queueBroker) deleteQueue(tenantID TenantID) { - tenantQueue := qb.tenantQueues[tenantID] - if tenantQueue == nil { - return - } - delete(qb.tenantQueues, tenantID) - qb.tenantQuerierAssignments.removeTenant(tenantID) + numDisconnected := qb.tenantQuerierAssignments.forgetDisconnectedQueriers(now) + return numDisconnected } func (tqa *tenantQuerierAssignments) getNextTenantIDForQuerier(lastTenantIndex int, querierID QuerierID) (TenantID, int, error) { @@ -264,6 +224,14 @@ func (tqa *tenantQuerierAssignments) getNextTenantIDForQuerier(lastTenantIndex i return emptyTenantID, lastTenantIndex, nil } +func (tqa *tenantQuerierAssignments) getTenant(tenantID TenantID) (*queueTenant, error) { + if tenantID == emptyTenantID { + return nil, ErrInvalidTenantID + } + tenant := tqa.tenantsByID[tenantID] + return tenant, nil +} + func (tqa *tenantQuerierAssignments) getOrAddTenant(tenantID TenantID, maxQueriers int) (*queueTenant, error) { if tenantID == emptyTenantID { // empty tenantID is not allowed; "" is used for free spot diff --git a/pkg/scheduler/queue/tenant_queues_test.go b/pkg/scheduler/queue/tenant_queues_test.go index ce83dffc5a..765d2cc1cc 100644 --- a/pkg/scheduler/queue/tenant_queues_test.go +++ b/pkg/scheduler/queue/tenant_queues_test.go @@ -49,7 +49,7 @@ func TestQueues(t *testing.T) { lastTenantIndex = confirmOrderForQuerier(t, qb, "querier-1", lastTenantIndex, qTwo, qThree, qOne) // Remove one: ["" two three] - qb.deleteQueue("one") + qb.removeTenantQueue("one") assert.NoError(t, isConsistent(qb)) lastTenantIndex = confirmOrderForQuerier(t, qb, "querier-1", lastTenantIndex, qTwo, qThree, qTwo) @@ -60,17 +60,17 @@ func TestQueues(t *testing.T) { lastTenantIndex = confirmOrderForQuerier(t, qb, "querier-1", lastTenantIndex, qThree, qFour, qTwo, qThree) // Remove two: [four "" three] - qb.deleteQueue("two") + qb.removeTenantQueue("two") assert.NoError(t, isConsistent(qb)) lastTenantIndex = confirmOrderForQuerier(t, qb, "querier-1", lastTenantIndex, qFour, qThree, qFour) // Remove three: [four] - qb.deleteQueue("three") + qb.removeTenantQueue("three") assert.NoError(t, isConsistent(qb)) // Remove four: [] - qb.deleteQueue("four") + qb.removeTenantQueue("four") assert.NoError(t, isConsistent(qb)) req, _, _, err = qb.dequeueRequestForQuerier(lastTenantIndex, "querier-1") @@ -95,7 +95,7 @@ func TestQueuesOnTerminatingQuerier(t *testing.T) { // After notify shutdown for querier-2, it's expected to own no queue. qb.notifyQuerierShutdown("querier-2") tenantID, _, err := qb.tenantQuerierAssignments.getNextTenantIDForQuerier(-1, "querier-2") - tenantQueue := qb.tenantQueues[tenantID] + tenantQueue := qb.getQueue(tenantID) assert.Nil(t, tenantQueue) assert.Equal(t, emptyTenantID, tenantID) assert.Equal(t, ErrQuerierShuttingDown, err) @@ -106,7 +106,7 @@ func TestQueuesOnTerminatingQuerier(t *testing.T) { // After disconnecting querier-2, it's expected to own no queue. qb.tenantQuerierAssignments.removeQuerier("querier-2") tenantID, _, err = qb.tenantQuerierAssignments.getNextTenantIDForQuerier(-1, "querier-2") - tenantQueue = qb.tenantQueues[tenantID] + tenantQueue = qb.getQueue(tenantID) assert.Nil(t, tenantQueue) assert.Equal(t, emptyTenantID, tenantID) assert.Equal(t, ErrQuerierShuttingDown, err) @@ -206,6 +206,7 @@ func TestQueuesConsistency(t *testing.T) { switch r.Int() % 6 { case 0: queue, err := qb.getOrAddTenantQueue(generateTenant(r), 3) + //queueTenant, err := qb.tenantQuerierAssignments.getOrAddTenant(generateTenant(r), 3) assert.Nil(t, err) assert.NotNil(t, queue) case 1: @@ -213,7 +214,7 @@ func TestQueuesConsistency(t *testing.T) { _, tenantIndex, _ := qb.tenantQuerierAssignments.getNextTenantIDForQuerier(lastTenantIndexes[querierID], querierID) lastTenantIndexes[querierID] = tenantIndex case 2: - qb.deleteQueue(generateTenant(r)) + qb.removeTenantQueue(generateTenant(r)) case 3: q := generateQuerier(r) qb.addQuerierConnection(q) @@ -416,15 +417,42 @@ func getOrAdd(t *testing.T, qb *queueBroker, tenantID TenantID, maxQueriers int) reAddedQueue, err := qb.getOrAddTenantQueue(tenantID, maxQueriers) assert.Nil(t, err) assert.Equal(t, addedQueue, reAddedQueue) - return addedQueue + return addedQueue.localQueue +} + +func (qb *queueBroker) getOrAddTenantQueue(tenantID TenantID, maxQueriers int) (*TreeQueue, error) { + _, err := qb.tenantQuerierAssignments.getOrAddTenant(tenantID, maxQueriers) + if err != nil { + return nil, err + } + + queuePath := QueuePath{qb.tenantQueuesTree.name, string(tenantID)} + return qb.tenantQueuesTree.getOrAddNode(queuePath) +} + +func (qb *queueBroker) getQueue(tenantID TenantID) *TreeQueue { + tenant, err := qb.tenantQuerierAssignments.getTenant(tenantID) + if tenant == nil || err != nil { + return nil + } + + queuePath := QueuePath{qb.tenantQueuesTree.name, string(tenantID)} + tenantQueue := qb.tenantQueuesTree.getNode(queuePath) + return tenantQueue +} + +func (qb *queueBroker) removeTenantQueue(tenantID TenantID) bool { + qb.tenantQuerierAssignments.removeTenant(tenantID) + queuePath := QueuePath{qb.tenantQueuesTree.name, string(tenantID)} + return qb.tenantQueuesTree.deleteNode(queuePath) } func confirmOrderForQuerier(t *testing.T, qb *queueBroker, querier QuerierID, lastTenantIndex int, queues ...*list.List) int { for _, queue := range queues { var err error tenantID, _, err := qb.tenantQuerierAssignments.getNextTenantIDForQuerier(lastTenantIndex, querier) - tenantQueue := qb.tenantQueues[tenantID] - assert.Equal(t, queue, tenantQueue.requests) + tenantQueue := qb.getQueue(tenantID) + assert.Equal(t, queue, tenantQueue.localQueue) assert.NoError(t, isConsistent(qb)) assert.NoError(t, err) } @@ -438,11 +466,11 @@ func isConsistent(qb *queueBroker) error { tenantCount := 0 for ix, tenantID := range qb.tenantQuerierAssignments.tenantIDOrder { - tq := qb.tenantQueues[tenantID] - if tenantID != "" && tq == nil { + //tq := qb.tenantQueues[tenantID] + if tenantID != "" && qb.getQueue(tenantID) == nil { return fmt.Errorf("tenant %s doesn't have queue", tenantID) } - if tenantID == "" && tq != nil { + if tenantID == "" && qb.getQueue(tenantID) != nil { return fmt.Errorf("tenant %s shouldn't have queue", tenantID) } if tenantID == "" { @@ -470,9 +498,9 @@ func isConsistent(qb *queueBroker) error { } } - if tenantCount != len(qb.tenantQueues) { - return fmt.Errorf("inconsistent number of tenants list and tenant queues") - } + //if tenantCount != len(qb.tenantQueues) { + // return fmt.Errorf("inconsistent number of tenants list and tenant queues") + //} return nil } @@ -480,7 +508,7 @@ func isConsistent(qb *queueBroker) error { // getTenantsByQuerier returns the list of tenants handled by the provided QuerierID. func getTenantsByQuerier(broker *queueBroker, querierID QuerierID) []TenantID { var tenantIDs []TenantID - for tenantID := range broker.tenantQueues { + for _, tenantID := range broker.tenantQuerierAssignments.tenantIDOrder { querierSet := broker.tenantQuerierAssignments.tenantQuerierIDs[tenantID] if querierSet == nil { // If it's nil then all queriers can handle this tenant. diff --git a/pkg/scheduler/queue/tree_queue.go b/pkg/scheduler/queue/tree_queue.go new file mode 100644 index 0000000000..9ef69a3914 --- /dev/null +++ b/pkg/scheduler/queue/tree_queue.go @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package queue + +import ( + "container/list" + + "github.com/pkg/errors" +) + +type QueuePath []string //nolint:revive +type QueueIndex int //nolint:revive + +const localQueueIndex = -1 + +// TreeQueue is a hierarchical queue implementation with an arbitrary amount of child queues. +// +// TreeQueue internally maintains round-robin fair queuing across all of its queue dimensions. +// Each queuing dimension is modeled as a node in the tree, internally reachable through a QueuePath. +// +// The QueuePath is an ordered array of strings describing the path through the tree to the node, +// which contains the FIFO local queue of all items enqueued for that queuing dimension. +// +// When dequeuing from a given node, the node will round-robin equally between dequeuing directly +// from its own local queue and dequeuing recursively from its list of child TreeQueues. +// No queue at a given level of the tree is dequeued from consecutively unless all others +// at the same level of the tree are empty down to the leaf node. +type TreeQueue struct { + // name of the tree node will be set to its segment of the queue path + name string + maxQueueLen int + localQueue *list.List + index int + childQueueOrder []string + childQueueMap map[string]*TreeQueue +} + +func NewTreeQueue(name string, maxQueueLen int) *TreeQueue { + return &TreeQueue{ + name: name, + maxQueueLen: maxQueueLen, + localQueue: list.New(), + index: localQueueIndex, + childQueueMap: map[string]*TreeQueue{}, + childQueueOrder: nil, + } +} + +func (q *TreeQueue) isEmpty() bool { + // avoid recursion to make this a cheap operation + // + // Because we dereference empty child nodes during dequeuing, + // we assume that emptiness means there are no child nodes + // and nothing in this tree node's local queue. + // + // In reality a package member could attach empty child queues with getOrAddNode + // in order to get a functionally-empty tree that would report false for isEmpty. + // We assume this does not occur or is not relevant during normal operation. + return q.localQueue.Len() == 0 && len(q.childQueueMap) == 0 +} + +func (q *TreeQueue) EnqueueBackByPath(path QueuePath, v any) error { + if path[0] != q.name { + return errors.New("path must begin with root node name") + } + childQueue, err := q.getOrAddNode(path) + if err != nil { + return err + } + if childQueue.localQueue.Len()+1 > childQueue.maxQueueLen { + return ErrTooManyRequests + } + + childQueue.localQueue.PushBack(v) + return nil +} + +func (q *TreeQueue) EnqueueFrontByPath(path QueuePath, v any) error { + if path[0] != q.name { + return errors.New("path must begin with root node name") + } + childQueue, err := q.getOrAddNode(path) + if err != nil { + return err + } + childQueue.localQueue.PushFront(v) + return nil +} + +// getOrAddNode recursively adds queues based on given path +func (q *TreeQueue) getOrAddNode(path QueuePath) (*TreeQueue, error) { + if len(path) == 0 { + // non-nil tree must have at least the path equal to the root name + // not a recursion case; only occurs if empty path provided to root node + return nil, nil + } + + if path[0] != q.name { + return nil, errors.New("path must begin with this node name") + } + + childPath := path[1:] + if len(childPath) == 0 { + // no path left to create; we have arrived + return q, nil + } + + var childQueue *TreeQueue + var ok bool + + if childQueue, ok = q.childQueueMap[childPath[0]]; !ok { + // no child node matches next path segment + // create next child before recurring + childQueue = NewTreeQueue(childPath[0], q.maxQueueLen) + // add new child queue to ordered list for round-robining + q.childQueueOrder = append(q.childQueueOrder, childQueue.name) + // attach new child queue to lookup map + q.childQueueMap[childPath[0]] = childQueue + } + + return childQueue.getOrAddNode(childPath) + +} + +func (q *TreeQueue) getNode(path QueuePath) *TreeQueue { + if len(path) == 0 { + // non-nil tree must have at least the path equal to the root name + // not a recursion case; only occurs if empty path provided to root node + return nil + } + + childPath := path[1:] + if len(childPath) == 0 { + // no path left to search for; we have arrived + return q + } + + if childQueue, ok := q.childQueueMap[childPath[0]]; ok { + return childQueue.getNode(childPath) + } + + // no child node matches next path segment + return nil +} + +func (q *TreeQueue) DequeueByPath(path QueuePath) (QueuePath, any) { + childQueue := q.getNode(path) + if childQueue == nil { + return nil, nil + } + + dequeuedPathFromChild, v := childQueue.Dequeue() + + // perform cleanup if child node is empty after dequeuing recursively + if childQueue.isEmpty() { + delete(q.childQueueMap, childQueue.name) + directChildQueueName := dequeuedPathFromChild[0] + for i, name := range q.childQueueOrder { + if name == directChildQueueName { + q.childQueueOrder = append(q.childQueueOrder[:i], q.childQueueOrder[i+1:]...) + q.wrapIndex(false) + } + } + + } + + if v == nil { + // guard against slicing into nil path + return nil, nil + } + return append(path, dequeuedPathFromChild[1:]...), v +} + +func (q *TreeQueue) Dequeue() (QueuePath, any) { + var dequeuedPath QueuePath + var v any + initialLen := len(q.childQueueOrder) + + for iters := 0; iters <= initialLen && v == nil; iters++ { + //increment := true + + if q.index == localQueueIndex { + // dequeuing from local queue; either we have: + // 1. reached a leaf node, or + // 2. reached an inner node when it is the local queue's turn + if elem := q.localQueue.Front(); elem != nil { + q.localQueue.Remove(elem) + v = elem.Value + } + q.wrapIndex(true) + } else { + // dequeuing from child queue node; + // pick the child node whose turn it is and recur + childQueueName := q.childQueueOrder[q.index] + childQueue := q.childQueueMap[childQueueName] + dequeuedPath, v = childQueue.Dequeue() + + // perform cleanup if child node is empty after dequeuing recursively + if childQueue.isEmpty() { + delete(q.childQueueMap, childQueueName) + q.childQueueOrder = append(q.childQueueOrder[:q.index], q.childQueueOrder[q.index+1:]...) + // no need to increment; remainder of the slice has moved left to be under q.index + //increment = false + q.wrapIndex(false) + } else { + q.wrapIndex(true) + } + } + //q.wrapIndex(increment) + } + if v == nil { + // don't report path when nothing was dequeued + return nil, nil + } + return append(QueuePath{q.name}, dequeuedPath...), v +} + +func (q *TreeQueue) deleteNode(path QueuePath) bool { + if len(path) <= 1 { + // node cannot delete itself + return false + } + + parentPath, childQueueName := path[:len(path)-1], path[len(path)-1] + + parentNode := q.getNode(parentPath) + if parentNode == nil { + // not found + return false + } + + delete(parentNode.childQueueMap, childQueueName) + for i, name := range parentNode.childQueueOrder { + if name == childQueueName { + parentNode.childQueueOrder = append(q.childQueueOrder[:i], q.childQueueOrder[i+1:]...) + parentNode.wrapIndex(false) + } + } + return true +} + +func (q *TreeQueue) wrapIndex(increment bool) { + if increment { + q.index++ + } + if q.index >= len(q.childQueueOrder) { + q.index = localQueueIndex + } +} diff --git a/pkg/scheduler/queue/tree_queue_test.go b/pkg/scheduler/queue/tree_queue_test.go new file mode 100644 index 0000000000..df17921ed4 --- /dev/null +++ b/pkg/scheduler/queue/tree_queue_test.go @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package queue + +import ( + "container/list" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const maxTestQueueLen = 8 + +func TestTreeQueue(t *testing.T) { + + expectedTreeQueue := &TreeQueue{ + name: "root", + maxQueueLen: maxTestQueueLen, + localQueue: list.New(), + index: -1, + childQueueOrder: []string{"0", "1", "2"}, + childQueueMap: map[string]*TreeQueue{ + "0": { + name: "0", + maxQueueLen: maxTestQueueLen, + localQueue: list.New(), + index: -1, + childQueueOrder: nil, + childQueueMap: map[string]*TreeQueue{}, + }, + "1": { + name: "1", + maxQueueLen: maxTestQueueLen, + localQueue: list.New(), + index: -1, + childQueueOrder: []string{"0"}, + childQueueMap: map[string]*TreeQueue{ + "0": { + name: "0", + maxQueueLen: maxTestQueueLen, + localQueue: list.New(), + index: -1, + childQueueOrder: nil, + childQueueMap: map[string]*TreeQueue{}, + }, + }, + }, + "2": { + name: "2", + maxQueueLen: maxTestQueueLen, + localQueue: list.New(), + index: -1, + childQueueOrder: []string{"0", "1"}, + childQueueMap: map[string]*TreeQueue{ + "0": { + name: "0", + maxQueueLen: maxTestQueueLen, + localQueue: list.New(), + index: -1, + childQueueOrder: nil, + childQueueMap: map[string]*TreeQueue{}, + }, + "1": { + name: "1", + maxQueueLen: maxTestQueueLen, + localQueue: list.New(), + index: -1, + childQueueOrder: nil, + childQueueMap: map[string]*TreeQueue{}, + }, + }, + }, + }, + } + + root := NewTreeQueue("root", maxTestQueueLen) // creates path: root + + _, _ = root.getOrAddNode([]string{"root", "0"}) // creates paths: root:0 + _, _ = root.getOrAddNode([]string{"root", "1", "0"}) // creates paths: root:1 and root:1:0 + _, _ = root.getOrAddNode([]string{"root", "2", "0"}) // creates paths: root:2 and root:2:0 + _, _ = root.getOrAddNode([]string{"root", "2", "1"}) // creates paths: root:2:1 only, as root:2 already exists + + assert.Equal(t, expectedTreeQueue, root) + + child := root.getNode(QueuePath{"root", "0"}) + assert.NotNil(t, child) + + child = root.getNode(QueuePath{"root", "1"}) + assert.NotNil(t, child) + + child = root.getNode(QueuePath{"root", "1", "0"}) + assert.NotNil(t, child) + + child = root.getNode([]string{"root", "2"}) + assert.NotNil(t, child) + + child = root.getNode(QueuePath{"root", "2", "0"}) + assert.NotNil(t, child) + + child = root.getNode(QueuePath{"root", "2", "1"}) + assert.NotNil(t, child) + + // nonexistent paths + child = root.getNode(QueuePath{"root", "3"}) + + assert.Nil(t, child) + + child = root.getNode(QueuePath{"root", "1", "1"}) + + assert.Nil(t, child) + + child = root.getNode(QueuePath{"root", "2", "2"}) + assert.Nil(t, child) + + // enqueue in order + require.NoError(t, root.EnqueueBackByPath([]string{"root", "0"}, "root:0:val0")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "1"}, "root:1:val0")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "1"}, "root:1:val1")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "2"}, "root:2:val0")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "1", "0"}, "root:1:0:val0")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "1", "0"}, "root:1:0:val1")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "2", "0"}, "root:2:0:val0")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "2", "0"}, "root:2:0:val1")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "2", "1"}, "root:2:1:val0")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "2", "1"}, "root:2:1:val1")) + require.NoError(t, root.EnqueueBackByPath([]string{"root", "2", "1"}, "root:2:1:val2")) + + // note no queue at a given level is dequeued from twice in a row + // unless all others at the same level are empty down to the leaf node + expectedQueueOutput := []any{ + "root:0:val0", // root:0:localQueue is done + "root:1:val0", + "root:2:val0", // root:2:localQueue is done + "root:1:0:val0", + "root:2:0:val0", + "root:1:val1", // root:1:localQueue is done + "root:2:1:val0", + "root:1:0:val1", // root:1:0:localQueue is done; no other queues in root:1, so root:1 is done as well + "root:2:0:val1", // root:2:0 :localQueue is done + "root:1:0:val2", // this is enqueued during dequeueing + "root:2:1:val1", + "root:2:1:val2", // root:2:1:localQueue is done; no other queues in root:2, so root:2 is done as well + // back up to root; its local queue is done and all childQueueOrder are done, so the full tree is done + } + + expectedQueuePaths := []QueuePath{ + {"root", "0"}, + {"root", "1"}, + {"root", "2"}, + {"root", "1", "0"}, + {"root", "2", "0"}, + {"root", "1"}, + {"root", "2", "1"}, + {"root", "1", "0"}, + {"root", "2", "0"}, + {"root", "1", "0"}, + {"root", "2", "1"}, + {"root", "2", "1"}, + } + + var queueOutput []any + var queuePaths []QueuePath + for range expectedQueueOutput { + path, v := root.Dequeue() + if v == nil { + fmt.Println(path) + break + } + queueOutput = append(queueOutput, v) + queuePaths = append(queuePaths, path) + if v == "root:1:0:val1" { + // root:1 and all subqueues are completely exhausted; + // root:2 will be next in the rotation + // here we insert something new into root:1 to test that: + // - the new root:1 insert does not jump the line in front of root:2 + // - root:2 will not be dequeued from twice in a row now that there is a value in root:1 again + require.NoError(t, root.EnqueueBackByPath([]string{"root", "1", "0"}, "root:1:0:val2")) + } + } + assert.Equal(t, expectedQueueOutput, queueOutput) + assert.Equal(t, expectedQueuePaths, queuePaths) + + // Dequeue one more time; + path, v := root.Dequeue() + assert.Nil(t, v) // assert we get nil back, + assert.Nil(t, path) + assert.True(t, root.isEmpty()) // assert nothing in local or child queues +} + +func TestDequeuePath(t *testing.T) { + root := NewTreeQueue("root", maxTestQueueLen) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "0"}, "root:0:val0")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "1"}, "root:1:val0")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "1"}, "root:1:val1")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "2"}, "root:2:val0")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "1", "0"}, "root:1:0:val0")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "1", "0"}, "root:1:0:val1")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "2", "0"}, "root:2:0:val0")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "2", "0"}, "root:2:0:val1")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "2", "1"}, "root:2:1:val0")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "2", "1"}, "root:2:1:val1")) + require.NoError(t, root.EnqueueBackByPath(QueuePath{"root", "2", "1"}, "root:2:1:val2")) + + path := QueuePath{"root", "2"} + dequeuedPath, v := root.DequeueByPath(path) + assert.Equal(t, "root:2:val0", v) + assert.Equal(t, path, dequeuedPath[:len(path)]) + + dequeuedPath, v = root.DequeueByPath(path) + assert.Equal(t, "root:2:0:val0", v) + assert.Equal(t, path, dequeuedPath[:len(path)]) + + dequeuedPath, v = root.DequeueByPath(path) + assert.Equal(t, "root:2:1:val0", v) + assert.Equal(t, path, dequeuedPath[:len(path)]) + + dequeuedPath, v = root.DequeueByPath(path) + assert.Equal(t, "root:2:0:val1", v) + assert.Equal(t, path, dequeuedPath[:len(path)]) + + dequeuedPath, v = root.DequeueByPath(path) + assert.Equal(t, "root:2:1:val1", v) + assert.Equal(t, path, dequeuedPath[:len(path)]) + + dequeuedPath, v = root.DequeueByPath(path) + assert.Equal(t, "root:2:1:val2", v) + assert.Equal(t, path, dequeuedPath[:len(path)]) + + // root:2 is exhausted + dequeuedPath, v = root.DequeueByPath(path) + assert.Nil(t, v) + assert.Nil(t, dequeuedPath) + +} diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go index fc45f52c19..8f4b0a8bc0 100644 --- a/pkg/util/log/log.go +++ b/pkg/util/log/log.go @@ -6,7 +6,6 @@ package log import ( - "context" "fmt" "io" "os" @@ -119,9 +118,3 @@ func Flush() error { return nil } - -type DoNotLogError struct{ Err error } - -func (i DoNotLogError) Error() string { return i.Err.Error() } -func (i DoNotLogError) Unwrap() error { return i.Err } -func (i DoNotLogError) ShouldLog(_ context.Context, _ time.Duration) bool { return false } diff --git a/renovate.json b/renovate.json index 29614ab10d..898795c8cf 100644 --- a/renovate.json +++ b/renovate.json @@ -22,7 +22,8 @@ "github.com/grafana/regexp", "github.com/colega/go-yaml-yaml", "github.com/grafana/goautoneg", - "github.com/grafana/opentracing-contrib-go-stdlib" + "github.com/grafana/opentracing-contrib-go-stdlib", + "github.com/charleskorn/go-grpc", ], "enabled": false } diff --git a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go index 3012edd422..83ea023f3b 100644 --- a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go @@ -6,10 +6,12 @@ package httpgrpc import ( "context" + "errors" "fmt" "github.com/go-kit/log/level" "google.golang.org/grpc/metadata" + grpcstatus "google.golang.org/grpc/status" spb "github.com/gogo/googleapis/google/rpc" "github.com/gogo/protobuf/types" @@ -44,7 +46,7 @@ func ErrorFromHTTPResponse(resp *HTTPResponse) error { // HTTPResponseFromError converts a grpc error into an HTTP response func HTTPResponseFromError(err error) (*HTTPResponse, bool) { - s, ok := status.FromError(err) + s, ok := statusFromError(err) if !ok { return nil, false } @@ -63,6 +65,29 @@ func HTTPResponseFromError(err error) (*HTTPResponse, bool) { return &resp, true } +// statusFromError tries to cast the given error into status.Status. +// If the given error, or any error from its tree are a status.Status, +// that status.Status and the outcome true are returned. +// Otherwise, nil and the outcome false are returned. +// This implementation differs from status.FromError() because the +// latter checks only if the given error can be cast to status.Status, +// and doesn't check other errors in the given error's tree. +func statusFromError(err error) (*status.Status, bool) { + if err == nil { + return nil, false + } + type grpcStatus interface{ GRPCStatus() *grpcstatus.Status } + var gs grpcStatus + if errors.As(err, &gs) { + st := gs.GRPCStatus() + if st == nil { + return nil, false + } + return status.FromGRPCStatus(st), true + } + return nil, false +} + const ( MetadataMethod = "httpgrpc-method" MetadataURL = "httpgrpc-url" diff --git a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go index b0d808b7b7..35656c9434 100644 --- a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go @@ -27,6 +27,13 @@ import ( "github.com/grafana/dskit/middleware" ) +var ( + // DoNotLogErrorHeaderKey is a header key used for marking non-loggable errors. More precisely, if an HTTP response + // has a status code 5xx, and contains a header with key DoNotLogErrorHeaderKey and any values, the generated error + // will be marked as non-loggable. + DoNotLogErrorHeaderKey = http.CanonicalHeaderKey("X-DoNotLogError") +) + // Server implements HTTPServer. HTTPServer is a generated interface that gRPC // servers must implement. type Server struct { @@ -62,13 +69,18 @@ func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc. recorder := httptest.NewRecorder() s.handler.ServeHTTP(recorder, req) + header := recorder.Header() resp := &httpgrpc.HTTPResponse{ Code: int32(recorder.Code), - Headers: fromHeader(recorder.Header()), + Headers: fromHeader(header), Body: recorder.Body.Bytes(), } if recorder.Code/100 == 5 { - return nil, httpgrpc.ErrorFromHTTPResponse(resp) + err := httpgrpc.ErrorFromHTTPResponse(resp) + if _, ok := header[DoNotLogErrorHeaderKey]; ok { + err = middleware.DoNotLogError{Err: err} + } + return nil, err } return resp, nil } @@ -227,6 +239,9 @@ func toHeader(hs []*httpgrpc.Header, header http.Header) { func fromHeader(hs http.Header) []*httpgrpc.Header { result := make([]*httpgrpc.Header, 0, len(hs)) for k, vs := range hs { + if k == DoNotLogErrorHeaderKey { + continue + } result = append(result, &httpgrpc.Header{ Key: k, Values: vs, diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go index 7f5db7725c..feab364743 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go @@ -29,6 +29,12 @@ type OptionalLogging interface { ShouldLog(ctx context.Context, duration time.Duration) bool } +type DoNotLogError struct{ Err error } + +func (i DoNotLogError) Error() string { return i.Err.Error() } +func (i DoNotLogError) Unwrap() error { return i.Err } +func (i DoNotLogError) ShouldLog(_ context.Context, _ time.Duration) bool { return false } + // GRPCServerLog logs grpc requests, errors, and latency. type GRPCServerLog struct { Log log.Logger diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go index 680b9c5b40..157642c648 100644 --- a/vendor/github.com/grafana/dskit/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -98,6 +98,7 @@ type Config struct { ServerGracefulShutdownTimeout time.Duration `yaml:"graceful_shutdown_timeout"` HTTPServerReadTimeout time.Duration `yaml:"http_server_read_timeout"` + HTTPServerReadHeaderTimeout time.Duration `yaml:"http_server_read_header_timeout"` HTTPServerWriteTimeout time.Duration `yaml:"http_server_write_timeout"` HTTPServerIdleTimeout time.Duration `yaml:"http_server_idle_timeout"` @@ -168,7 +169,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.GRPCConnLimit, "server.grpc-conn-limit", 0, "Maximum number of simultaneous grpc connections, <=0 to disable") f.BoolVar(&cfg.RegisterInstrumentation, "server.register-instrumentation", true, "Register the intrumentation handlers (/metrics etc).") f.DurationVar(&cfg.ServerGracefulShutdownTimeout, "server.graceful-shutdown-timeout", 30*time.Second, "Timeout for graceful shutdowns") - f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for HTTP server") + f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for entire HTTP request, including headers and body.") + f.DurationVar(&cfg.HTTPServerReadHeaderTimeout, "server.http-read-header-timeout", 0, "Read timeout for HTTP request headers. If set to 0, value of -server.http-read-timeout is used.") f.DurationVar(&cfg.HTTPServerWriteTimeout, "server.http-write-timeout", 30*time.Second, "Write timeout for HTTP server") f.DurationVar(&cfg.HTTPServerIdleTimeout, "server.http-idle-timeout", 120*time.Second, "Idle timeout for HTTP server") f.IntVar(&cfg.GRPCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") @@ -457,10 +459,11 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { } httpServer := &http.Server{ - ReadTimeout: cfg.HTTPServerReadTimeout, - WriteTimeout: cfg.HTTPServerWriteTimeout, - IdleTimeout: cfg.HTTPServerIdleTimeout, - Handler: middleware.Merge(httpMiddleware...).Wrap(router), + ReadTimeout: cfg.HTTPServerReadTimeout, + ReadHeaderTimeout: cfg.HTTPServerReadHeaderTimeout, + WriteTimeout: cfg.HTTPServerWriteTimeout, + IdleTimeout: cfg.HTTPServerIdleTimeout, + Handler: middleware.Merge(httpMiddleware...).Wrap(router), } if httpTLSConfig != nil { httpServer.TLSConfig = httpTLSConfig diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go index 765f7595f2..29afb5e5bf 100644 --- a/vendor/github.com/prometheus/prometheus/rules/group.go +++ b/vendor/github.com/prometheus/prometheus/rules/group.go @@ -461,7 +461,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { }(time.Now()) if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { - logger = log.WithPrefix(g.logger, "traceID", sp.SpanContext().TraceID()) + logger = log.WithPrefix(logger, "traceID", sp.SpanContext().TraceID()) } g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() diff --git a/vendor/modules.txt b/vendor/modules.txt index 113ee9ad70..f1c6efa7fd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -534,7 +534,7 @@ github.com/gosimple/slug # github.com/grafana-tools/sdk v0.0.0-20220919052116-6562121319fc ## explicit; go 1.13 github.com/grafana-tools/sdk -# github.com/grafana/dskit v0.0.0-20231030022856-30221e73f47e +# github.com/grafana/dskit v0.0.0-20231031132813-52f4e8d82d59 ## explicit; go 1.20 github.com/grafana/dskit/backoff github.com/grafana/dskit/cache @@ -887,7 +887,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20231027081731-c0ddc1f2ec07 +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20231030232122-b7f66b93b9b5 ## explicit; go 1.20 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1469,7 +1469,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20231027081731-c0ddc1f2ec07 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20231030232122-b7f66b93b9b5 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6