Skip to content

Commit

Permalink
Merge branch 'grafana:main' into feat/helm-support-dedicated-ruler-re…
Browse files Browse the repository at this point in the history
…ad-path
  • Loading branch information
alex5517 authored May 23, 2024
2 parents 7b8f29f + 383d37a commit 5b13c59
Show file tree
Hide file tree
Showing 7 changed files with 144 additions and 30 deletions.
50 changes: 27 additions & 23 deletions development/mimir-microservices-mode/config/mimir.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@ distributor:
health_check_ingesters: true
ring:
kvstore:
store: consul
consul:
host: consul:8500
store: memberlist

ingester_client:
grpc_client_config:
Expand All @@ -22,9 +20,7 @@ ingester:
final_sleep: 0s
num_tokens: 512
kvstore:
store: consul
consul:
host: consul:8500
store: memberlist

# These memberlist options will be only used if memberlist is activated via CLI option.
memberlist:
Expand All @@ -37,9 +33,13 @@ blocks_storage:

tsdb:
dir: /tmp/mimir-tsdb-ingester
# Note: this value is intentionally set low to create a faster feedback loop
# in development. However, setting this lower than 2m can cause the ruler's
# write requests to fail with out of bounds errors
block_ranges_period: ["2m"]
# retention_period must be larger than block_ranges_period and querier.query_store_after
retention_period: 15m
ship_interval: 1m
block_ranges_period: [ 2h ]
retention_period: 3h

# Always use the PostingsForMatchers() cache in order to exercise it.
head_postings_for_matchers_cache_force: true
Expand All @@ -49,6 +49,11 @@ blocks_storage:
bucket_store:
sync_dir: /tmp/mimir-tsdb-querier
sync_interval: 1m
# ignore_blocks_within and sync_interval must be small enough for the store-gateways
# to discover & load new blocks shipped from the ingesters before they begin to be queried.
# With querier.query_store_after: 10m and sync_interval: 1m, anything larger than 4m causes issues.
# Slightly larger values for ignore_blocks_within work if sync_interval is reduced.
ignore_blocks_within: 4m

index_cache:
# Cache is configured via CLI flags. See docker-compose.jsonnet
Expand All @@ -71,9 +76,7 @@ ruler:
heartbeat_period: 5s
heartbeat_timeout: 15s
kvstore:
store: consul
consul:
host: consul:8500
store: memberlist

alertmanager_url: http://alertmanager-1:8031/alertmanager,http://alertmanager-2:8032/alertmanager,http://alertmanager-3:8033/alertmanager

Expand All @@ -97,9 +100,7 @@ alertmanager:
heartbeat_period: 5s
heartbeat_timeout: 15s
kvstore:
store: consul
consul:
host: consul:8500
store: memberlist

alertmanager_storage:
backend: s3
Expand All @@ -111,15 +112,15 @@ alertmanager_storage:
insecure: true

compactor:
compaction_interval: 30s
data_dir: /tmp/mimir-compactor
cleanup_interval: 1m
data_dir: "/tmp/mimir-compactor"
block_ranges: [ 4m, 8m, 16m ]
compaction_interval: 1m
compaction_concurrency: 2
cleanup_interval: 1m
tenant_cleanup_delay: 1m
sharding_ring:
kvstore:
store: consul
consul:
host: consul:8500
store: memberlist

store_gateway:
sharding_ring:
Expand All @@ -128,9 +129,7 @@ store_gateway:
heartbeat_timeout: 15s
wait_stability_min_duration: 0
kvstore:
store: consul
consul:
host: consul:8500
store: memberlist

frontend:
query_stats_enabled: true
Expand All @@ -156,6 +155,10 @@ frontend_worker:
# Uncomment to skip query-scheduler and enqueue queries directly in the query-frontend.
# frontend_address: "query-frontend:9007"

querier:
# query_store_after must be smaller than blocks_storage.tsdb.retention_period
query_store_after: 10m

query_scheduler:
# Change to "dns" to switch to query-scheduler DNS-based service discovery.
service_discovery_mode: "ring"
Expand All @@ -170,6 +173,7 @@ limits:
ingestion_rate: 50000
native_histograms_ingestion_enabled: true
cardinality_analysis_enabled: true
query_ingesters_within: 20m

runtime_config:
file: ./config/runtime.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@ weight: 65

# Configure Grafana Mimir object storage backend

Grafana Mimir can use different object storage services to persist blocks containing the metrics data, as well as recording rules and alertmanager state.
Grafana Mimir can use different object storage services to persist blocks containing the metrics data, as well as recording rules and Alertmanager state.

Mimir doesn't create the configured storage bucket, you must create it yourself.
The supported backends are:

- [Amazon S3](https://aws.amazon.com/s3/) (and compatible implementations like [MinIO](https://min.io/))
Expand Down
4 changes: 2 additions & 2 deletions operations/helm/charts/mimir-distributed/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
apiVersion: v2
version: 5.4.0-weekly.290
appVersion: r290
version: 5.4.0-weekly.291
appVersion: r291
description: "Grafana Mimir"
home: https://grafana.com/docs/helm-charts/mimir-distributed/latest/
icon: https://grafana.com/static/img/logos/logo-mimir.svg
Expand Down
4 changes: 2 additions & 2 deletions operations/helm/charts/mimir-distributed/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Helm chart for deploying [Grafana Mimir](https://grafana.com/docs/mimir/latest/)

For the full documentation, visit [Grafana mimir-distributed Helm chart documentation](https://grafana.com/docs/helm-charts/mimir-distributed/latest/).

> **Note:** The documentation version is derived from the Helm chart version which is 5.4.0-weekly.290.
> **Note:** The documentation version is derived from the Helm chart version which is 5.4.0-weekly.291.
When upgrading from Helm chart version 4.X, please see [Migrate the Helm chart from version 4.x to 5.0](https://grafana.com/docs/helm-charts/mimir-distributed/latest/migration-guides/migrate-helm-chart-4.x-to-5.0/).
When upgrading from Helm chart version 3.x, please see [Migrate from single zone to zone-aware replication with Helm](https://grafana.com/docs/helm-charts/mimir-distributed/latest/migration-guides/migrate-from-single-zone-with-helm/).
Expand All @@ -14,7 +14,7 @@ When upgrading from Helm chart version 2.1, please see [Upgrade the Grafana Mimi

# mimir-distributed

![Version: 5.4.0-weekly.290](https://img.shields.io/badge/Version-5.4.0--weekly.290-informational?style=flat-square) ![AppVersion: r290](https://img.shields.io/badge/AppVersion-r290-informational?style=flat-square)
![Version: 5.4.0-weekly.291](https://img.shields.io/badge/Version-5.4.0--weekly.291-informational?style=flat-square) ![AppVersion: r291](https://img.shields.io/badge/AppVersion-r291-informational?style=flat-square)

Grafana Mimir

Expand Down
4 changes: 2 additions & 2 deletions operations/helm/charts/mimir-distributed/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ image:
# -- Grafana Mimir container image repository. Note: for Grafana Enterprise Metrics use the value 'enterprise.image.repository'
repository: grafana/mimir
# -- Grafana Mimir container image tag. Note: for Grafana Enterprise Metrics use the value 'enterprise.image.tag'
tag: r290-bef47e2
tag: r291-04e6b7f
# -- Container pull policy - shared between Grafana Mimir and Grafana Enterprise Metrics
pullPolicy: IfNotPresent
# -- Optionally specify an array of imagePullSecrets - shared between Grafana Mimir and Grafana Enterprise Metrics
Expand Down Expand Up @@ -3702,7 +3702,7 @@ enterprise:
# -- Grafana Enterprise Metrics container image repository. Note: for Grafana Mimir use the value 'image.repository'
repository: grafana/enterprise-metrics
# -- Grafana Enterprise Metrics container image tag. Note: for Grafana Mimir use the value 'image.tag'
tag: r290-3aa63d8f
tag: r291-370336ff
# Note: pullPolicy and optional pullSecrets are set in toplevel 'image' section, not here

# In order to use Grafana Enterprise Metrics features, you will need to provide the contents of your Grafana Enterprise Metrics
Expand Down
58 changes: 58 additions & 0 deletions pkg/alertmanager/log.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
// SPDX-License-Identifier: AGPL-3.0-only

package alertmanager

import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
alertingLogging "github.com/grafana/alerting/logging"
)

// alertingLogger implements the alertingLogging.Logger interface.
type alertingLogger struct {
kitLogger log.Logger
}

// newLoggerFactory returns a function that implements the alertingLogging.LoggerFactory interface.
//
//lint:ignore U1000 Ignore unused functions for now, they will be used to create the Grafana notifiers.
func newLoggerFactory(logger log.Logger) alertingLogging.LoggerFactory {
return func(loggerName string, ctx ...any) alertingLogging.Logger {
keyvals := append([]any{"logger", loggerName}, ctx...)
return &alertingLogger{kitLogger: log.With(logger, keyvals...)}
}
}

func (l *alertingLogger) New(ctx ...any) alertingLogging.Logger {
return &alertingLogger{log.With(l.kitLogger, ctx...)}
}

func (l *alertingLogger) Log(keyvals ...any) error {
return l.kitLogger.Log(keyvals...)
}

func (l *alertingLogger) Debug(msg string, ctx ...any) {
args := buildKeyvals(msg, ctx)
level.Debug(l.kitLogger).Log(args...)
}

func (l *alertingLogger) Info(msg string, ctx ...any) {
args := buildKeyvals(msg, ctx)
level.Info(l.kitLogger).Log(args...)
}

func (l *alertingLogger) Warn(msg string, ctx ...any) {
args := buildKeyvals(msg, ctx)
level.Warn(l.kitLogger).Log(args...)
}

func (l *alertingLogger) Error(msg string, ctx ...any) {
args := buildKeyvals(msg, ctx)
level.Error(l.kitLogger).Log(args...)
}

// buildKeyvals builds the keyvals for the log message.
// It adds "msg" and the message string as the first two elements.
func buildKeyvals(msg string, ctx []any) []any {
return append([]any{"msg", msg}, ctx...)
}
50 changes: 50 additions & 0 deletions pkg/alertmanager/log_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
// SPDX-License-Identifier: AGPL-3.0-only

package alertmanager

import (
"testing"

"github.com/stretchr/testify/require"
)

func TestBuildKeyvals(t *testing.T) {
tests := []struct {
name string
msg string
ctx []any
want []any
}{
{
name: "empty context slice",
msg: "test message",
ctx: []any{},
want: []any{"msg", "test message"},
},
{
name: "nil context slice",
msg: "test message",
ctx: nil,
want: []any{"msg", "test message"},
},
{
name: "context slice with one element",
msg: "test message",
ctx: []any{"key1"},
want: []any{"msg", "test message", "key1"},
},
{
name: "context slice with two elements",
msg: "test message",
ctx: []any{"key1", "value1"},
want: []any{"msg", "test message", "key1", "value1"},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got := buildKeyvals(test.msg, test.ctx)
require.Equal(t, test.want, got)
})
}
}

0 comments on commit 5b13c59

Please sign in to comment.