Skip to content

Commit

Permalink
Merge branch 'master' into fix-labelset-counter-race
Browse files Browse the repository at this point in the history
Signed-off-by: Alan Protasio <[email protected]>
  • Loading branch information
alanprot authored Dec 10, 2024
2 parents 854b99f + 5d2dac0 commit 9fe559b
Show file tree
Hide file tree
Showing 9 changed files with 234 additions and 13 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@
* [BUGFIX] Ring: update ring with new ip address when instance is lost, rejoins, but heartbeat is disabled. #6271
* [BUGFIX] Ingester: Fix regression on usage of cortex_ingester_queried_chunks. #6398
* [BUGFIX] Ingester: Fix possible race condition when `active series per LabelSet` is configured. #6409
* [ENHANCEMENT] Ingester: Add a new `-distributor.num-push-workers` flag to use a goroutine worker pool when sending data from distributor to ingesters. #6406
* [ENHANCEMENT] Distributor: Create a goroutine worker pool to send data from distributors to ingesters.

## 1.18.1 2024-10-14

Expand Down
7 changes: 7 additions & 0 deletions docs/configuration/config-file-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -2692,6 +2692,13 @@ ring:
# CLI flag: -distributor.ring.instance-interface-names
[instance_interface_names: <list of string> | default = [eth0 en0]]
# EXPERIMENTAL: Number of go routines to handle push calls from distributors to
# ingesters. When no workers are available, a new goroutine will be spawned
# automatically. If set to 0 (default), workers are disabled, and a new
# goroutine will be created for each push request.
# CLI flag: -distributor.num-push-workers
[num_push_workers: <int> | default = 0]
instance_limits:
# Max ingestion rate (samples/sec) that this distributor will accept. This
# limit is per-distributor, not per-tenant. Additional push requests will be
Expand Down
2 changes: 1 addition & 1 deletion pkg/alertmanager/distributor.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func (d *Distributor) doQuorum(userID string, w http.ResponseWriter, r *http.Req
var responses []*httpgrpc.HTTPResponse
var responsesMtx sync.Mutex
grpcHeaders := httpToHttpgrpcHeaders(r.Header)
err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, []uint32{shardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error {
err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, nil, []uint32{shardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error {
// Use a background context to make sure all alertmanagers get the request even if we return early.
localCtx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), userID), opentracing.SpanFromContext(r.Context()))
sp, localCtx := opentracing.StartSpanFromContext(localCtx, "Distributor.doQuorum")
Expand Down
2 changes: 1 addition & 1 deletion pkg/alertmanager/multitenant.go
Original file line number Diff line number Diff line change
Expand Up @@ -1099,7 +1099,7 @@ func (am *MultitenantAlertmanager) ReplicateStateForUser(ctx context.Context, us
level.Debug(am.logger).Log("msg", "message received for replication", "user", userID, "key", part.Key)

selfAddress := am.ringLifecycler.GetInstanceAddr()
err := ring.DoBatch(ctx, RingOp, am.ring, []uint32{shardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error {
err := ring.DoBatch(ctx, RingOp, am.ring, nil, []uint32{shardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error {
if desc.GetAddr() == selfAddress {
return nil
}
Expand Down
16 changes: 15 additions & 1 deletion pkg/distributor/distributor.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,8 @@ type Distributor struct {
latestSeenSampleTimestampPerUser *prometheus.GaugeVec

validateMetrics *validation.ValidateMetrics

asyncExecutor util.AsyncExecutor
}

// Config contains the configuration required to
Expand Down Expand Up @@ -160,6 +162,11 @@ type Config struct {
// from quorum number of zones will be included to reduce data merged and improve performance.
ZoneResultsQuorumMetadata bool `yaml:"zone_results_quorum_metadata" doc:"hidden"`

// Number of go routines to handle push calls from distributors to ingesters.
// If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.
// When no workers are available, a new goroutine will be spawned automatically.
NumPushWorkers int `yaml:"num_push_workers"`

// Limits for distributor
InstanceLimits InstanceLimits `yaml:"instance_limits"`

Expand Down Expand Up @@ -193,6 +200,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&cfg.ShardingStrategy, "distributor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", ")))
f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.")
f.BoolVar(&cfg.ZoneResultsQuorumMetadata, "distributor.zone-results-quorum-metadata", false, "Experimental, this flag may change in the future. If zone awareness and this both enabled, when querying metadata APIs (labels names and values for now), only results from quorum number of zones will be included.")
f.IntVar(&cfg.NumPushWorkers, "distributor.num-push-workers", 0, "EXPERIMENTAL: Number of go routines to handle push calls from distributors to ingesters. When no workers are available, a new goroutine will be spawned automatically. If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.")

f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, "distributor.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.")
f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, "distributor.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.")
Expand Down Expand Up @@ -366,6 +374,12 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove
}, []string{"user"}),

validateMetrics: validation.NewValidateMetrics(reg),
asyncExecutor: util.NewNoOpExecutor(),
}

if cfg.NumPushWorkers > 0 {
util_log.WarnExperimentalUse("Distributor: using goroutine worker pool")
d.asyncExecutor = util.NewWorkerPool("distributor", cfg.NumPushWorkers, reg)
}

promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Expand Down Expand Up @@ -823,7 +837,7 @@ func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, s
op = ring.Write
}

return ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error {
return ring.DoBatch(ctx, op, subRing, d.asyncExecutor, keys, func(ingester ring.InstanceDesc, indexes []int) error {
timeseries := make([]cortexpb.PreallocTimeseries, 0, len(indexes))
var metadata []*cortexpb.MetricMetadata

Expand Down
15 changes: 12 additions & 3 deletions pkg/ring/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,14 @@ import (
"go.uber.org/atomic"
"google.golang.org/grpc/status"

"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/httpgrpcutil"
)

var (
noOpExecutor = util.NewNoOpExecutor()
)

type batchTracker struct {
rpcsPending atomic.Int32
rpcsFailed atomic.Int32
Expand Down Expand Up @@ -66,12 +71,16 @@ func (i *itemTracker) getError() error {
// cleanup() is always called, either on an error before starting the batches or after they all finish.
//
// Not implemented as a method on Ring so we can test separately.
func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
func DoBatch(ctx context.Context, op Operation, r ReadRing, e util.AsyncExecutor, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
if r.InstancesCount() <= 0 {
cleanup()
return fmt.Errorf("DoBatch: InstancesCount <= 0")
}

if e == nil {
e = noOpExecutor
}

expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount()
itemTrackers := make([]itemTracker, len(keys))
instances := make(map[string]instance, r.InstancesCount())
Expand Down Expand Up @@ -115,11 +124,11 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb

wg.Add(len(instances))
for _, i := range instances {
go func(i instance) {
e.Submit(func() {
err := callback(i.desc, i.indexes)
tracker.record(i, err)
wg.Done()
}(i)
})
}

// Perform cleanup at the end.
Expand Down
31 changes: 24 additions & 7 deletions pkg/ring/ring_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,29 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) {
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
keys := make([]uint32, numKeys)
// Generate a batch of N random keys, and look them up
b.ResetTimer()
for i := 0; i < b.N; i++ {
generateKeys(rnd, numKeys, keys)
err := DoBatch(ctx, Write, &r, keys, callback, cleanup)
require.NoError(b, err)

tc := map[string]struct {
exe util.AsyncExecutor
}{
"noOpExecutor": {
exe: noOpExecutor,
},
"workerExecutor": {
exe: util.NewWorkerPool("test", 100, prometheus.NewPedanticRegistry()),
},
}

for n, c := range tc {
b.Run(n, func(b *testing.B) {
// Generate a batch of N random keys, and look them up
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
generateKeys(rnd, numKeys, keys)
err := DoBatch(ctx, Write, &r, c.exe, keys, callback, cleanup)
require.NoError(b, err)
}
})
}
}

Expand Down Expand Up @@ -167,7 +184,7 @@ func TestDoBatchZeroInstances(t *testing.T) {
ringDesc: desc,
strategy: NewDefaultReplicationStrategy(),
}
require.Error(t, DoBatch(ctx, Write, &r, keys, callback, cleanup))
require.Error(t, DoBatch(ctx, Write, &r, nil, keys, callback, cleanup))
}

func TestAddIngester(t *testing.T) {
Expand Down
85 changes: 85 additions & 0 deletions pkg/util/worker_pool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
package util

import (
"sync"

"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)

// This code was based on: https://github.com/grpc/grpc-go/blob/66ba4b264d26808cb7af3c86eee66e843472915e/server.go

// serverWorkerResetThreshold defines how often the stack must be reset. Every
// N requests, by spawning a new goroutine in its place, a worker can reset its
// stack so that large stacks don't live in memory forever. 2^16 should allow
// each goroutine stack to live for at least a few seconds in a typical
// workload (assuming a QPS of a few thousand requests/sec).
const serverWorkerResetThreshold = 1 << 16

type AsyncExecutor interface {
Submit(f func())
Stop()
}

type noOpExecutor struct{}

func (n noOpExecutor) Stop() {}

func NewNoOpExecutor() AsyncExecutor {
return &noOpExecutor{}
}

func (n noOpExecutor) Submit(f func()) {
go f()
}

type workerPoolExecutor struct {
serverWorkerChannel chan func()
closeOnce sync.Once

fallbackTotal prometheus.Counter
}

func NewWorkerPool(name string, numWorkers int, reg prometheus.Registerer) AsyncExecutor {
wp := &workerPoolExecutor{
serverWorkerChannel: make(chan func()),
fallbackTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "cortex",
Name: "worker_pool_fallback_total",
Help: "The total number additional go routines that needed to be created to run jobs.",
ConstLabels: prometheus.Labels{"name": name},
}),
}

for i := 0; i < numWorkers; i++ {
go wp.run()
}

return wp
}

func (s *workerPoolExecutor) Stop() {
s.closeOnce.Do(func() {
close(s.serverWorkerChannel)
})
}

func (s *workerPoolExecutor) Submit(f func()) {
select {
case s.serverWorkerChannel <- f:
default:
s.fallbackTotal.Inc()
go f()
}
}

func (s *workerPoolExecutor) run() {
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
f, ok := <-s.serverWorkerChannel
if !ok {
return
}
f()
}
go s.run()
}
87 changes: 87 additions & 0 deletions pkg/util/worker_pool_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
package util

import (
"bytes"
"sync"
"testing"

"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/require"
)

func TestNewWorkerPool_CreateMultiplesPoolsWithSameRegistry(t *testing.T) {
reg := prometheus.NewPedanticRegistry()
wp1 := NewWorkerPool("test1", 100, reg)
defer wp1.Stop()
wp2 := NewWorkerPool("test2", 100, reg)
defer wp2.Stop()
}

func TestWorkerPool_TestMetric(t *testing.T) {
reg := prometheus.NewPedanticRegistry()
workerPool := NewWorkerPool("test1", 1, reg)
defer workerPool.Stop()

require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
# HELP cortex_worker_pool_fallback_total The total number additional go routines that needed to be created to run jobs.
# TYPE cortex_worker_pool_fallback_total counter
cortex_worker_pool_fallback_total{name="test1"} 0
`), "cortex_worker_pool_fallback_total"))

wg := &sync.WaitGroup{}
wg.Add(1)

// Block the first job
workerPool.Submit(func() {
wg.Wait()
})

// create an extra job to increment the metric
workerPool.Submit(func() {})
require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(`
# HELP cortex_worker_pool_fallback_total The total number additional go routines that needed to be created to run jobs.
# TYPE cortex_worker_pool_fallback_total counter
cortex_worker_pool_fallback_total{name="test1"} 1
`), "cortex_worker_pool_fallback_total"))

wg.Done()
}

func TestWorkerPool_ShouldFallbackWhenAllWorkersAreBusy(t *testing.T) {
reg := prometheus.NewPedanticRegistry()
numberOfWorkers := 10
workerPool := NewWorkerPool("test1", numberOfWorkers, reg)
defer workerPool.Stop()

m := sync.Mutex{}
blockerWg := sync.WaitGroup{}
blockerWg.Add(numberOfWorkers)

// Lets lock all submited jobs
m.Lock()

for i := 0; i < numberOfWorkers; i++ {
workerPool.Submit(func() {
defer blockerWg.Done()
m.Lock()
m.Unlock() //nolint:staticcheck
})
}

// At this point all workers should be busy. lets try to create a new job
wg := sync.WaitGroup{}
wg.Add(1)
workerPool.Submit(func() {
defer wg.Done()
})

// Make sure the last job ran to the end
wg.Wait()

// Lets release the jobs
m.Unlock()

blockerWg.Wait()

}

0 comments on commit 9fe559b

Please sign in to comment.