From 5d2dac077db0cd0f2df56a72f065384e2927cea0 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Mon, 9 Dec 2024 17:17:10 -0800 Subject: [PATCH] Create a goroutine worker pool to send data from distributors to ingesters. (#6406) * Creating a worker pool to be used on distributors Signed-off-by: alanprot * metric + test Signed-off-by: alanprot * Changelog Signed-off-by: alanprot --------- Signed-off-by: alanprot --- CHANGELOG.md | 3 +- docs/configuration/config-file-reference.md | 7 ++ pkg/alertmanager/distributor.go | 2 +- pkg/alertmanager/multitenant.go | 2 +- pkg/distributor/distributor.go | 16 +++- pkg/ring/batch.go | 15 +++- pkg/ring/ring_test.go | 31 ++++++-- pkg/util/worker_pool.go | 85 ++++++++++++++++++++ pkg/util/worker_pool_test.go | 87 +++++++++++++++++++++ 9 files changed, 234 insertions(+), 14 deletions(-) create mode 100644 pkg/util/worker_pool.go create mode 100644 pkg/util/worker_pool_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index a9c4d8c67c..69fc2081ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,7 +45,8 @@ * [ENHANCEMENT] Distributor: Expose `cortex_label_size_bytes` native histogram metric. #6372 * [ENHANCEMENT] Add new option `-server.grpc_server-num-stream-workers` to configure the number of worker goroutines that should be used to process incoming streams. #6386 * [ENHANCEMENT] Distributor: Return HTTP 5XX instead of HTTP 4XX when instance limits are hit. #6358 -* [ENHANCEMENT] Ingester: Make sure unregistered ingester joining the ring after WAL replay #6277 +* [ENHANCEMENT] Ingester: Add a new `-distributor.num-push-workers` flag to use a goroutine worker pool when sending data from distributor to ingesters. #6406 +* [ENHANCEMENT] Distributor: Create a goroutine worker pool to send data from distributors to ingesters. * [BUGFIX] Runtime-config: Handle absolute file paths when working directory is not / #6224 * [BUGFIX] Ruler: Allow rule evaluation to complete during shutdown. #6326 * [BUGFIX] Ring: update ring with new ip address when instance is lost, rejoins, but heartbeat is disabled #6271 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index f610a24a72..aa5ecd2948 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -2692,6 +2692,13 @@ ring: # CLI flag: -distributor.ring.instance-interface-names [instance_interface_names: | default = [eth0 en0]] +# EXPERIMENTAL: Number of go routines to handle push calls from distributors to +# ingesters. When no workers are available, a new goroutine will be spawned +# automatically. If set to 0 (default), workers are disabled, and a new +# goroutine will be created for each push request. +# CLI flag: -distributor.num-push-workers +[num_push_workers: | default = 0] + instance_limits: # Max ingestion rate (samples/sec) that this distributor will accept. This # limit is per-distributor, not per-tenant. Additional push requests will be diff --git a/pkg/alertmanager/distributor.go b/pkg/alertmanager/distributor.go index d1a7781493..68be1e2499 100644 --- a/pkg/alertmanager/distributor.go +++ b/pkg/alertmanager/distributor.go @@ -161,7 +161,7 @@ func (d *Distributor) doQuorum(userID string, w http.ResponseWriter, r *http.Req var responses []*httpgrpc.HTTPResponse var responsesMtx sync.Mutex grpcHeaders := httpToHttpgrpcHeaders(r.Header) - err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, []uint32{shardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error { + err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, nil, []uint32{shardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error { // Use a background context to make sure all alertmanagers get the request even if we return early. localCtx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), userID), opentracing.SpanFromContext(r.Context())) sp, localCtx := opentracing.StartSpanFromContext(localCtx, "Distributor.doQuorum") diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 654487d15d..97f98707aa 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -1099,7 +1099,7 @@ func (am *MultitenantAlertmanager) ReplicateStateForUser(ctx context.Context, us level.Debug(am.logger).Log("msg", "message received for replication", "user", userID, "key", part.Key) selfAddress := am.ringLifecycler.GetInstanceAddr() - err := ring.DoBatch(ctx, RingOp, am.ring, []uint32{shardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error { + err := ring.DoBatch(ctx, RingOp, am.ring, nil, []uint32{shardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error { if desc.GetAddr() == selfAddress { return nil } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 6e7e283238..05a57a6213 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -123,6 +123,8 @@ type Distributor struct { latestSeenSampleTimestampPerUser *prometheus.GaugeVec validateMetrics *validation.ValidateMetrics + + asyncExecutor util.AsyncExecutor } // Config contains the configuration required to @@ -160,6 +162,11 @@ type Config struct { // from quorum number of zones will be included to reduce data merged and improve performance. ZoneResultsQuorumMetadata bool `yaml:"zone_results_quorum_metadata" doc:"hidden"` + // Number of go routines to handle push calls from distributors to ingesters. + // If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request. + // When no workers are available, a new goroutine will be spawned automatically. + NumPushWorkers int `yaml:"num_push_workers"` + // Limits for distributor InstanceLimits InstanceLimits `yaml:"instance_limits"` @@ -193,6 +200,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.ShardingStrategy, "distributor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") f.BoolVar(&cfg.ZoneResultsQuorumMetadata, "distributor.zone-results-quorum-metadata", false, "Experimental, this flag may change in the future. If zone awareness and this both enabled, when querying metadata APIs (labels names and values for now), only results from quorum number of zones will be included.") + f.IntVar(&cfg.NumPushWorkers, "distributor.num-push-workers", 0, "EXPERIMENTAL: Number of go routines to handle push calls from distributors to ingesters. When no workers are available, a new goroutine will be spawned automatically. If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.") f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, "distributor.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.") f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, "distributor.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.") @@ -366,6 +374,12 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove }, []string{"user"}), validateMetrics: validation.NewValidateMetrics(reg), + asyncExecutor: util.NewNoOpExecutor(), + } + + if cfg.NumPushWorkers > 0 { + util_log.WarnExperimentalUse("Distributor: using goroutine worker pool") + d.asyncExecutor = util.NewWorkerPool("distributor", cfg.NumPushWorkers, reg) } promauto.With(reg).NewGauge(prometheus.GaugeOpts{ @@ -823,7 +837,7 @@ func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, s op = ring.Write } - return ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error { + return ring.DoBatch(ctx, op, subRing, d.asyncExecutor, keys, func(ingester ring.InstanceDesc, indexes []int) error { timeseries := make([]cortexpb.PreallocTimeseries, 0, len(indexes)) var metadata []*cortexpb.MetricMetadata diff --git a/pkg/ring/batch.go b/pkg/ring/batch.go index 7f063c20b0..da44e1d8a8 100644 --- a/pkg/ring/batch.go +++ b/pkg/ring/batch.go @@ -8,9 +8,14 @@ import ( "go.uber.org/atomic" "google.golang.org/grpc/status" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" ) +var ( + noOpExecutor = util.NewNoOpExecutor() +) + type batchTracker struct { rpcsPending atomic.Int32 rpcsFailed atomic.Int32 @@ -66,12 +71,16 @@ func (i *itemTracker) getError() error { // cleanup() is always called, either on an error before starting the batches or after they all finish. // // Not implemented as a method on Ring so we can test separately. -func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { +func DoBatch(ctx context.Context, op Operation, r ReadRing, e util.AsyncExecutor, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { if r.InstancesCount() <= 0 { cleanup() return fmt.Errorf("DoBatch: InstancesCount <= 0") } + if e == nil { + e = noOpExecutor + } + expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() itemTrackers := make([]itemTracker, len(keys)) instances := make(map[string]instance, r.InstancesCount()) @@ -115,11 +124,11 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb wg.Add(len(instances)) for _, i := range instances { - go func(i instance) { + e.Submit(func() { err := callback(i.desc, i.indexes) tracker.record(i, err) wg.Done() - }(i) + }) } // Perform cleanup at the end. diff --git a/pkg/ring/ring_test.go b/pkg/ring/ring_test.go index 47bb91c676..a5937e2e8e 100644 --- a/pkg/ring/ring_test.go +++ b/pkg/ring/ring_test.go @@ -73,12 +73,29 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) { } rnd := rand.New(rand.NewSource(time.Now().UnixNano())) keys := make([]uint32, numKeys) - // Generate a batch of N random keys, and look them up - b.ResetTimer() - for i := 0; i < b.N; i++ { - generateKeys(rnd, numKeys, keys) - err := DoBatch(ctx, Write, &r, keys, callback, cleanup) - require.NoError(b, err) + + tc := map[string]struct { + exe util.AsyncExecutor + }{ + "noOpExecutor": { + exe: noOpExecutor, + }, + "workerExecutor": { + exe: util.NewWorkerPool("test", 100, prometheus.NewPedanticRegistry()), + }, + } + + for n, c := range tc { + b.Run(n, func(b *testing.B) { + // Generate a batch of N random keys, and look them up + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + generateKeys(rnd, numKeys, keys) + err := DoBatch(ctx, Write, &r, c.exe, keys, callback, cleanup) + require.NoError(b, err) + } + }) } } @@ -167,7 +184,7 @@ func TestDoBatchZeroInstances(t *testing.T) { ringDesc: desc, strategy: NewDefaultReplicationStrategy(), } - require.Error(t, DoBatch(ctx, Write, &r, keys, callback, cleanup)) + require.Error(t, DoBatch(ctx, Write, &r, nil, keys, callback, cleanup)) } func TestAddIngester(t *testing.T) { diff --git a/pkg/util/worker_pool.go b/pkg/util/worker_pool.go new file mode 100644 index 0000000000..8ebaad60e2 --- /dev/null +++ b/pkg/util/worker_pool.go @@ -0,0 +1,85 @@ +package util + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// This code was based on: https://github.com/grpc/grpc-go/blob/66ba4b264d26808cb7af3c86eee66e843472915e/server.go + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +type AsyncExecutor interface { + Submit(f func()) + Stop() +} + +type noOpExecutor struct{} + +func (n noOpExecutor) Stop() {} + +func NewNoOpExecutor() AsyncExecutor { + return &noOpExecutor{} +} + +func (n noOpExecutor) Submit(f func()) { + go f() +} + +type workerPoolExecutor struct { + serverWorkerChannel chan func() + closeOnce sync.Once + + fallbackTotal prometheus.Counter +} + +func NewWorkerPool(name string, numWorkers int, reg prometheus.Registerer) AsyncExecutor { + wp := &workerPoolExecutor{ + serverWorkerChannel: make(chan func()), + fallbackTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "worker_pool_fallback_total", + Help: "The total number additional go routines that needed to be created to run jobs.", + ConstLabels: prometheus.Labels{"name": name}, + }), + } + + for i := 0; i < numWorkers; i++ { + go wp.run() + } + + return wp +} + +func (s *workerPoolExecutor) Stop() { + s.closeOnce.Do(func() { + close(s.serverWorkerChannel) + }) +} + +func (s *workerPoolExecutor) Submit(f func()) { + select { + case s.serverWorkerChannel <- f: + default: + s.fallbackTotal.Inc() + go f() + } +} + +func (s *workerPoolExecutor) run() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel + if !ok { + return + } + f() + } + go s.run() +} diff --git a/pkg/util/worker_pool_test.go b/pkg/util/worker_pool_test.go new file mode 100644 index 0000000000..f6294f5a8a --- /dev/null +++ b/pkg/util/worker_pool_test.go @@ -0,0 +1,87 @@ +package util + +import ( + "bytes" + "sync" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/require" +) + +func TestNewWorkerPool_CreateMultiplesPoolsWithSameRegistry(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + wp1 := NewWorkerPool("test1", 100, reg) + defer wp1.Stop() + wp2 := NewWorkerPool("test2", 100, reg) + defer wp2.Stop() +} + +func TestWorkerPool_TestMetric(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + workerPool := NewWorkerPool("test1", 1, reg) + defer workerPool.Stop() + + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_worker_pool_fallback_total The total number additional go routines that needed to be created to run jobs. + # TYPE cortex_worker_pool_fallback_total counter + cortex_worker_pool_fallback_total{name="test1"} 0 +`), "cortex_worker_pool_fallback_total")) + + wg := &sync.WaitGroup{} + wg.Add(1) + + // Block the first job + workerPool.Submit(func() { + wg.Wait() + }) + + // create an extra job to increment the metric + workerPool.Submit(func() {}) + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_worker_pool_fallback_total The total number additional go routines that needed to be created to run jobs. + # TYPE cortex_worker_pool_fallback_total counter + cortex_worker_pool_fallback_total{name="test1"} 1 +`), "cortex_worker_pool_fallback_total")) + + wg.Done() +} + +func TestWorkerPool_ShouldFallbackWhenAllWorkersAreBusy(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + numberOfWorkers := 10 + workerPool := NewWorkerPool("test1", numberOfWorkers, reg) + defer workerPool.Stop() + + m := sync.Mutex{} + blockerWg := sync.WaitGroup{} + blockerWg.Add(numberOfWorkers) + + // Lets lock all submited jobs + m.Lock() + + for i := 0; i < numberOfWorkers; i++ { + workerPool.Submit(func() { + defer blockerWg.Done() + m.Lock() + m.Unlock() //nolint:staticcheck + }) + } + + // At this point all workers should be busy. lets try to create a new job + wg := sync.WaitGroup{} + wg.Add(1) + workerPool.Submit(func() { + defer wg.Done() + }) + + // Make sure the last job ran to the end + wg.Wait() + + // Lets release the jobs + m.Unlock() + + blockerWg.Wait() + +}