Skip to content

Commit

Permalink
add benchmarks
Browse files Browse the repository at this point in the history
Signed-off-by: Ben Ye <[email protected]>
  • Loading branch information
yeya24 committed Nov 2, 2023
1 parent 69ee5d9 commit 5335cbd
Showing 1 changed file with 299 additions and 4 deletions.
303 changes: 299 additions & 4 deletions pkg/storage/tsdb/inmemory_index_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,21 @@ import (
"bytes"
"context"
"fmt"
"strconv"
"strings"
"testing"

"github.com/efficientgo/core/testutil"
"github.com/go-kit/log"
"github.com/oklog/ulid"
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/stretchr/testify/require"
storecache "github.com/thanos-io/thanos/pkg/store/cache"
"github.com/thanos-io/thanos/pkg/tenancy"
"math/rand"
"strconv"
"strings"
"testing"
"time"
)

func TestInMemoryIndexCache_UpdateItem(t *testing.T) {
Expand Down Expand Up @@ -139,3 +141,296 @@ func TestInMemoryIndexCacheSetOverflow(t *testing.T) {
cache.StoreSeries(id, 2, []byte(sb.String()), tenancy.DefaultTenant)
testutil.Equals(t, float64(1), prom_testutil.ToFloat64(counter))
}

//func BenchmarkInMemoryIndexCacheStore(b *testing.B) {
// logger := log.NewNopLogger()
// reg := prometheus.NewRegistry()
// cfg := InMemoryIndexCacheConfig{
// MaxSizeBytes: uint64(storecache.DefaultInMemoryIndexCacheConfig.MaxSize),
// }
// cache, err := newInMemoryIndexCache(cfg, logger, reg)
// require.NoError(b, err)
//
// blockID := ulid.MustNew(ulid.Now(), nil)
// // 1KB is a common size for series
// data := make([]byte, 1024)
// r := rand.New(rand.NewSource(time.Now().Unix()))
// r.Read(data)
// b.ReportAllocs()
// b.ResetTimer()
// for i := 0; i < b.N; i++ {
// cache.StoreSeries(blockID, storage.SeriesRef(i), data, tenancy.DefaultTenant)
// }
//}

func BenchmarkInMemoryIndexCacheStore(b *testing.B) {
logger := log.NewNopLogger()
cfg := InMemoryIndexCacheConfig{
MaxSizeBytes: uint64(storecache.DefaultInMemoryIndexCacheConfig.MaxSize),
}

blockID := ulid.MustNew(ulid.Now(), nil)
r := rand.New(rand.NewSource(time.Now().Unix()))
// 1KB is a common size for series
seriesData := make([]byte, 1024)
r.Read(seriesData)
// 10MB might happen for large postings.
postingData := make([]byte, 10*1024*1024)
r.Read(postingData)

b.Run("FastCache", func(b *testing.B) {
cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry())
require.NoError(b, err)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant)
}
})

b.Run("ThanosCache", func(b *testing.B) {
cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig)
require.NoError(b, err)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant)
}
})

b.Run("FastCacheLargeItem", func(b *testing.B) {
cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry())
require.NoError(b, err)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.StoreSeries(blockID, storage.SeriesRef(i), postingData, tenancy.DefaultTenant)
}
})

b.Run("ThanosCacheLargeItem", func(b *testing.B) {
cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig)
require.NoError(b, err)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.StoreSeries(blockID, storage.SeriesRef(i), postingData, tenancy.DefaultTenant)
}
})
}

func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) {
logger := log.NewNopLogger()
cfg := InMemoryIndexCacheConfig{
MaxSizeBytes: uint64(storecache.DefaultInMemoryIndexCacheConfig.MaxSize),
}

blockID := ulid.MustNew(ulid.Now(), nil)
r := rand.New(rand.NewSource(time.Now().Unix()))
// 1KB is a common size for series
seriesData := make([]byte, 1024)
r.Read(seriesData)
// 10MB might happen for large postings.
postingData := make([]byte, 10*1024*1024)
r.Read(postingData)

b.Run("FastCache", func(b *testing.B) {
cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry())
require.NoError(b, err)
ch := make(chan int)
b.ReportAllocs()
b.ResetTimer()

for i := 0; i < 500; i++ {
go func() {
for j := range ch {
cache.StoreSeries(blockID, storage.SeriesRef(j), seriesData, tenancy.DefaultTenant)
testutil.Ok(b, err)
}
}()
}

for i := 0; i < b.N; i++ {
ch <- i
}
close(ch)
})

b.Run("ThanosCache", func(b *testing.B) {
cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig)
require.NoError(b, err)
ch := make(chan int)
b.ReportAllocs()
b.ResetTimer()

for i := 0; i < 500; i++ {
go func() {
for j := range ch {
cache.StoreSeries(blockID, storage.SeriesRef(j), seriesData, tenancy.DefaultTenant)
testutil.Ok(b, err)
}
}()
}

for i := 0; i < b.N; i++ {
ch <- i
}
close(ch)
})

b.Run("FastCacheLargeItem", func(b *testing.B) {
cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry())
require.NoError(b, err)
ch := make(chan int)
b.ReportAllocs()
b.ResetTimer()

for i := 0; i < 500; i++ {
go func() {
for j := range ch {
cache.StoreSeries(blockID, storage.SeriesRef(j), postingData, tenancy.DefaultTenant)
testutil.Ok(b, err)
}
}()
}

for i := 0; i < b.N; i++ {
ch <- i
}
close(ch)
})

b.Run("ThanosCacheLargeItem", func(b *testing.B) {
cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig)
require.NoError(b, err)
ch := make(chan int)
b.ReportAllocs()
b.ResetTimer()

for i := 0; i < 500; i++ {
go func() {
for j := range ch {
cache.StoreSeries(blockID, storage.SeriesRef(j), postingData, tenancy.DefaultTenant)
testutil.Ok(b, err)
}
}()
}

for i := 0; i < b.N; i++ {
ch <- i
}
close(ch)
})
}

func BenchmarkInMemoryIndexCacheFetch(b *testing.B) {
logger := log.NewNopLogger()
cfg := InMemoryIndexCacheConfig{
MaxSizeBytes: uint64(storecache.DefaultInMemoryIndexCacheConfig.MaxSize),
}

blockID := ulid.MustNew(ulid.Now(), nil)
r := rand.New(rand.NewSource(time.Now().Unix()))
// 1KB is a common size for series
seriesData := make([]byte, 1024)
r.Read(seriesData)
ctx := context.Background()
items := 10000
ids := make([]storage.SeriesRef, items)
for i := 0; i < items; i++ {
ids[i] = storage.SeriesRef(i)
}

b.Run("FastCache", func(b *testing.B) {
cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry())
require.NoError(b, err)
for i := 0; i < items; i++ {
cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant)
}
})

b.Run("ThanosCache", func(b *testing.B) {
cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig)
require.NoError(b, err)
for i := 0; i < items; i++ {
cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant)
}
})
}

func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) {
logger := log.NewNopLogger()
cfg := InMemoryIndexCacheConfig{
MaxSizeBytes: uint64(storecache.DefaultInMemoryIndexCacheConfig.MaxSize),
}

blockID := ulid.MustNew(ulid.Now(), nil)
r := rand.New(rand.NewSource(time.Now().Unix()))
// 1KB is a common size for series
seriesData := make([]byte, 1024)
r.Read(seriesData)
ctx := context.Background()
items := 10000
ids := make([]storage.SeriesRef, items)
for i := 0; i < items; i++ {
ids[i] = storage.SeriesRef(i)
}

b.Run("FastCache", func(b *testing.B) {
cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry())
require.NoError(b, err)
for i := 0; i < items; i++ {
cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant)
}
b.ReportAllocs()
b.ResetTimer()

ch := make(chan int)
for i := 0; i < 500; i++ {
go func() {
for range ch {
cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant)
}
}()
}

for i := 0; i < b.N; i++ {
ch <- i
}
close(ch)
})

b.Run("ThanosCache", func(b *testing.B) {
cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig)
require.NoError(b, err)
for i := 0; i < items; i++ {
cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant)
}
b.ReportAllocs()
b.ResetTimer()

ch := make(chan int)
for i := 0; i < 500; i++ {
go func() {
for range ch {
cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant)
}
}()
}

for i := 0; i < b.N; i++ {
ch <- i
}
close(ch)
})
}

0 comments on commit 5335cbd

Please sign in to comment.