diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml new file mode 100644 index 0000000..fe8f524 --- /dev/null +++ b/.github/workflows/main.yaml @@ -0,0 +1,75 @@ +name: CI + +on: + push: + branches: + - 'master' + pull_request: + +jobs: + mod: + name: Mod + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Read Go version + run: echo "GO_VERSION=$(cat ./.go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - uses: actions/cache@v3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-gomod-${{ hashFiles('**/go.sum') }} + - run: go mod download + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Read Go version + run: echo "GO_VERSION=$(cat ./.go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + version: latest + args: "--timeout 3m0s" + test: + name: Test + runs-on: ubuntu-latest + needs: [build] + env: + GOCACHE: "/tmp/go/cache" + steps: + - uses: actions/checkout@v3 + - name: Read Go version + run: echo "GO_VERSION=$(cat ./.go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - uses: actions/cache@v3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-gomod-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-gomod- + - uses: actions/cache@v3 + with: + path: /tmp/go/cache + key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-go-build-${{ github.ref }}- + ${{ runner.os }}-go-build- + - name: Run tests + run: |- + # Run race tests (excludes race_test.go) + go test ./... -race -shuffle=on + # Upload coverage file on non-race tests + go test ./... -coverprofile=coverage.txt -shuffle=on + - name: Upload coverage data + uses: codecov/codecov-action@v2 + with: + files: ./coverage.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2016ef8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +/.idea +/.vscode + +/sc diff --git a/.go-version b/.go-version new file mode 100644 index 0000000..7440683 --- /dev/null +++ b/.go-version @@ -0,0 +1 @@ +1.18.0 \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..5866004 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 motoki317 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..b347286 --- /dev/null +++ b/README.md @@ -0,0 +1,31 @@ +# sc + +[![GitHub release](https://img.shields.io/github/release/motoki31/sc.svg)](https://github.com/motoki317/sc/releases/) +![CI main](https://github.com/motoki317/sc/actions/workflows/main.yaml/badge.svg) +[![codecov](https://codecov.io/gh/motoki317/sc/branch/main/graph/badge.svg)](https://codecov.io/gh/motoki317/sc) +[![Go Reference](https://pkg.go.dev/badge/github.com/motoki317/sc.svg)](https://pkg.go.dev/github.com/motoki317/sc) + +sc is a simple golang in-memory caching library, with easily configurable implementations. + +## Notable Features + +- Simple to use; the only methods are Get(), GetFresh(), and Forget(). + - There is no Set() method - this is an intentional design choice to make the use easier. +- Supports 1.18 generics - both key and value are generic. + - No `interface{}` even in internal implementations. +- Supported cache backends + - Built-in map (default) - lightweight, but does not evict items. + - LRU (`WithLRUBackend(cap)` option) - automatically evicts overflown items. +- Prevents [cache stampede](https://en.wikipedia.org/wiki/Cache_stampede) problem idiomatically. +- All methods are safe to be called from multiple goroutines. +- Allows graceful cache replacement (if `freshFor` < `ttl`) - only one goroutine is launched in the background to re-fetch the value. +- Allows strict request coalescing (`EnableStrictCoalescing()` option) - ensures that all returned values are fresh (a niche use-case). + +## Usage + +See [reference](https://pkg.go.dev/github.com/motoki317/sc). + +## Borrowed Ideas + +- [go-chi/stampede: Function and HTTP request coalescer](https://github.com/go-chi/stampede) +- [singleflight package - golang.org/x/sync/singleflight - pkg.go.dev](https://pkg.go.dev/golang.org/x/sync/singleflight) diff --git a/backend.go b/backend.go new file mode 100644 index 0000000..aac4b49 --- /dev/null +++ b/backend.go @@ -0,0 +1,61 @@ +package sc + +import ( + "sync" + + "github.com/dboslee/lru" +) + +// backend represents a cache backend. +// Backend implementations are expected to be goroutine-safe. +type backend[K comparable, V any] interface { + Get(key K) (v V, ok bool) + Set(key K, v V) + Delete(key K) +} + +// Interface guard +var ( + _ backend[string, string] = &mapBackend[string, string]{} + _ backend[string, string] = lruBackend[string, string]{} +) + +type mapBackend[K comparable, V any] struct { + mu sync.RWMutex + m map[K]V +} + +func (m *mapBackend[K, V]) Get(key K) (v V, ok bool) { + m.mu.RLock() + v, ok = m.m[key] + m.mu.RUnlock() + return +} + +func (m *mapBackend[K, V]) Set(key K, v V) { + m.mu.Lock() + m.m[key] = v + m.mu.Unlock() +} + +func (m *mapBackend[K, V]) Delete(key K) { + m.mu.Lock() + delete(m.m, key) + m.mu.Unlock() +} + +type lruBackend[K comparable, V any] struct { + *lru.SyncCache[K, V] +} + +func (l lruBackend[K, V]) Get(key K) (v V, ok bool) { + return l.SyncCache.Get(key) +} + +func (l lruBackend[K, V]) Set(key K, v V) { + l.SyncCache.Set(key, v) +} + +func (l lruBackend[K, V]) Delete(key K) { + l.SyncCache.Delete(key) +} diff --git a/benchmark_test.go b/benchmark_test.go new file mode 100644 index 0000000..9965095 --- /dev/null +++ b/benchmark_test.go @@ -0,0 +1,71 @@ +package sc + +import ( + "context" + "testing" + "time" +) + +func BenchmarkCache_Map(b *testing.B) { + replaceFn := func(ctx context.Context, key string) (string, error) { + return "value", nil + } + cache, err := New[string, string](replaceFn, 1*time.Second, 1*time.Second, WithMapBackend()) + if err != nil { + b.Error(err) + } + + ctx := context.Background() + b.StartTimer() + for i := 0; i < b.N; i++ { + _, _ = cache.Get(ctx, "key") + } +} + +func BenchmarkCache_MapStrict(b *testing.B) { + replaceFn := func(ctx context.Context, key string) (string, error) { + return "value", nil + } + cache, err := New[string, string](replaceFn, 1*time.Second, 1*time.Second, WithMapBackend(), EnableStrictCoalescing()) + if err != nil { + b.Error(err) + } + + ctx := context.Background() + b.StartTimer() + for i := 0; i < b.N; i++ { + _, _ = cache.Get(ctx, "key") + } +} + +func BenchmarkCache_LRU(b *testing.B) { + replaceFn := func(ctx context.Context, key string) (string, error) { + return "value", nil + } + cache, err := New[string, string](replaceFn, 1*time.Second, 1*time.Second, WithLRUBackend(10)) + if err != nil { + b.Error(err) + } + + ctx := context.Background() + b.StartTimer() + for i := 0; i < b.N; i++ { + _, _ = cache.Get(ctx, "key") + } +} + +func BenchmarkCache_LRUStrict(b *testing.B) { + replaceFn := func(ctx context.Context, key string) (string, error) { + return "value", nil + } + cache, err := New[string, string](replaceFn, 1*time.Second, 1*time.Second, WithLRUBackend(10), EnableStrictCoalescing()) + if err != nil { + b.Error(err) + } + + ctx := context.Background() + b.StartTimer() + for i := 0; i < b.N; i++ { + _, _ = cache.Get(ctx, "key") + } +} diff --git a/cache.go b/cache.go new file mode 100644 index 0000000..806a561 --- /dev/null +++ b/cache.go @@ -0,0 +1,156 @@ +package sc + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/dboslee/lru" +) + +type replaceFunc[K comparable, V any] func(ctx context.Context, key K) (V, error) + +// New creates a new cache instance. +// You can specify ttl longer than freshFor to achieve 'graceful cache replacement', where stale item is served via Get +// while a single goroutine is launched in the background to retrieve a fresh item. +func New[K comparable, V any](replaceFn replaceFunc[K, V], freshFor, ttl time.Duration, options ...CacheOption) (*Cache[K, V], error) { + if replaceFn == nil { + return nil, errors.New("replaceFn cannot be nil") + } + if freshFor < 0 || ttl < 0 { + return nil, errors.New("freshFor and ttl needs to be non-negative") + } + if freshFor > ttl { + return nil, errors.New("freshFor cannot be longer than ttl") + } + + config := defaultConfig() + for _, option := range options { + option(&config) + } + + var b backend[K, value[V]] + switch config.backend { + case cacheBackendMap: + b = &mapBackend[K, value[V]]{m: make(map[K]value[V], config.capacity)} + case cacheBackendLRU: + if config.capacity == 0 { + return nil, errors.New("capacity needs to be greater than 0 for LRU cache") + } + b = lruBackend[K, value[V]]{lru.NewSync[K, value[V]](lru.WithCapacity(config.capacity))} + default: + return nil, errors.New("unknown cache backend") + } + + return &Cache[K, V]{ + values: b, + calls: make(map[K]*call[V]), + fn: replaceFn, + freshFor: freshFor, + ttl: ttl, + strictCoalescing: config.enableStrictCoalescing, + }, nil +} + +// Cache represents a single cache instance. +// All methods are safe to be called from multiple goroutines. +// All cache implementations prevent the 'cache stampede' problem by coalescing multiple requests to the same key. +// +// Notice that Cache doesn't have Set(key K, value V) method - this is intentional. Users are expected to delegate +// the cache replacement logic to Cache by simply calling Get or GetFresh. +type Cache[K comparable, V any] struct { + values backend[K, value[V]] + calls map[K]*call[V] + cm sync.Mutex // cm protects calls + fn replaceFunc[K, V] + freshFor, ttl time.Duration + strictCoalescing bool +} + +// Get retrieves an item from the cache. +// Returns the found value and a nil error if found. +// May return a stale item (older than freshFor, but younger than ttl) while a single goroutine is launched +// in the background to update the cache. +// Returns a zero-value and a non-nil error if replaceFn returns an error. +func (c *Cache[K, V]) Get(ctx context.Context, key K) (V, error) { + return c.get(ctx, key, false) +} + +// GetFresh is similar to Get, but if a stale item is found, it waits to retrieve a fresh item instead. +func (c *Cache[K, V]) GetFresh(ctx context.Context, key K) (V, error) { + return c.get(ctx, key, true) +} + +// Forget instructs the cache to forget about the key. +// Corresponding item will be deleted, ongoing cache replacement results (if any) will not be added to the cache, +// and any future Get and GetFresh calls will immediately retrieve a new item. +func (c *Cache[K, V]) Forget(key K) { + c.cm.Lock() + if ca, ok := c.calls[key]; ok { + ca.forgotten = true + } + delete(c.calls, key) + c.values.Delete(key) + c.cm.Unlock() +} + +func (c *Cache[K, V]) get(ctx context.Context, key K, needFresh bool) (V, error) { + t0 := time.Now() +retry: + val, ok := c.values.Get(key) + + // value exists and is fresh - just return + if ok && val.isFresh(t0, c.freshFor) { + return val.v, nil + } + + // value exists and is stale, and we're OK with serving it stale while updating in the background + if ok && !needFresh && !val.isExpired(t0, c.ttl) { + c.cm.Lock() + cl, ok := c.calls[key] + if !ok { + cl = &call[V]{} + cl.wg.Add(1) + c.calls[key] = cl + go c.set(ctx, cl, key, c.fn) + } + c.cm.Unlock() + return val.v, nil // serve stale contents + } + + // value doesn't exist or is expired, or is stale, and we need it fresh - sync update + c.cm.Lock() + cl, ok := c.calls[key] + if ok { + c.cm.Unlock() + cl.wg.Wait() + if c.strictCoalescing { + goto retry // compare with the time replaceFn was executed to make sure we are always serving a fresh value + } + return cl.val, cl.err + } + + cl = &call[V]{} + cl.wg.Add(1) + c.calls[key] = cl + c.cm.Unlock() + + c.set(ctx, cl, key, c.fn) + return cl.val, cl.err +} + +func (c *Cache[K, V]) set(ctx context.Context, cl *call[V], key K, fn func(ctx context.Context, key K) (V, error)) { + t := time.Now() + cl.val, cl.err = fn(ctx, key) + + c.cm.Lock() // careful with the lock order + if !cl.forgotten { + if cl.err == nil { + c.values.Set(key, value[V]{v: cl.val, t: t}) + } + delete(c.calls, key) // this deletion needs to be inside 'if !cl.forgotten' block, because there may be a new ongoing call + } + c.cm.Unlock() + cl.wg.Done() +} diff --git a/cache_test.go b/cache_test.go new file mode 100644 index 0000000..980688c --- /dev/null +++ b/cache_test.go @@ -0,0 +1,422 @@ +package sc + +import ( + "context" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + t.Parallel() + + fn := func(ctx context.Context, s string) (string, error) { return "", nil } + + t.Run("defaults to map", func(t *testing.T) { + t.Parallel() + + c, err := New[string, string](fn, 0, 0) + assert.NoError(t, err) + assert.IsType(t, &Cache[string, string]{}, c) + assert.IsType(t, &mapBackend[string, value[string]]{}, c.values) + assert.False(t, c.strictCoalescing) + }) + + t.Run("invalid backend", func(t *testing.T) { + t.Parallel() + + // A test case just to increase coverage to 100% + // Normal users should not be able to reach the "unknown cache backend" path + _, err := New[string, string](fn, 0, 0, func(c *cacheConfig) { c.backend = -1 }) + assert.Error(t, err) + }) + + t.Run("invalid replaceFn", func(t *testing.T) { + t.Parallel() + + _, err := New[string, string](nil, 0, 0) + assert.Error(t, err) + }) + + t.Run("invalid freshFor", func(t *testing.T) { + t.Parallel() + + _, err := New[string, string](fn, -1, 0) + assert.Error(t, err) + }) + + t.Run("invalid ttl", func(t *testing.T) { + t.Parallel() + + _, err := New[string, string](fn, 0, -1) + assert.Error(t, err) + }) + + t.Run("invalid freshFor and ttl configuration", func(t *testing.T) { + t.Parallel() + + _, err := New[string, string](fn, 2*time.Minute, 1*time.Minute) + assert.Error(t, err) + }) + + t.Run("map cache", func(t *testing.T) { + t.Parallel() + + c, err := New[string, string](fn, 0, 0, WithMapBackend()) + assert.NoError(t, err) + assert.IsType(t, &Cache[string, string]{}, c) + assert.IsType(t, &mapBackend[string, value[string]]{}, c.values) + assert.False(t, c.strictCoalescing) + }) + + t.Run("map cache with capacity", func(t *testing.T) { + t.Parallel() + + c, err := New[string, string](fn, 0, 0, WithMapBackend(), WithCapacity(10)) + assert.NoError(t, err) + assert.IsType(t, &Cache[string, string]{}, c) + assert.IsType(t, &mapBackend[string, value[string]]{}, c.values) + assert.False(t, c.strictCoalescing) + }) + + t.Run("strict map cache", func(t *testing.T) { + t.Parallel() + + c, err := New[string, string](fn, 0, 0, WithMapBackend(), EnableStrictCoalescing()) + assert.NoError(t, err) + assert.IsType(t, &Cache[string, string]{}, c) + assert.IsType(t, &mapBackend[string, value[string]]{}, c.values) + assert.True(t, c.strictCoalescing) + }) + + t.Run("strict map cache with capacity", func(t *testing.T) { + t.Parallel() + + c, err := New[string, string](fn, 0, 0, WithMapBackend(), EnableStrictCoalescing(), WithCapacity(10)) + assert.NoError(t, err) + assert.IsType(t, &Cache[string, string]{}, c) + assert.IsType(t, &mapBackend[string, value[string]]{}, c.values) + assert.True(t, c.strictCoalescing) + }) + + t.Run("LRU needs capacity set", func(t *testing.T) { + t.Parallel() + + _, err := New[string, string](fn, 0, 0, WithLRUBackend(10), WithCapacity(0)) + assert.Error(t, err) + }) + + t.Run("struct LRU needs capacity set", func(t *testing.T) { + t.Parallel() + + _, err := New[string, string](fn, 0, 0, WithLRUBackend(10), EnableStrictCoalescing(), WithCapacity(0)) + assert.Error(t, err) + }) + + t.Run("LRU cache", func(t *testing.T) { + t.Parallel() + + c, err := New[string, string](fn, 0, 0, WithLRUBackend(10)) + assert.NoError(t, err) + assert.IsType(t, &Cache[string, string]{}, c) + assert.IsType(t, lruBackend[string, value[string]]{}, c.values) + assert.False(t, c.strictCoalescing) + }) + + t.Run("strict LRU cache", func(t *testing.T) { + t.Parallel() + + c, err := New[string, string](fn, 0, 0, WithLRUBackend(10), EnableStrictCoalescing()) + assert.NoError(t, err) + assert.IsType(t, &Cache[string, string]{}, c) + assert.IsType(t, lruBackend[string, value[string]]{}, c.values) + assert.True(t, c.strictCoalescing) + }) +} + +func TestCache_Get(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + cacheOpts []CacheOption + }{ + {name: "map cache", cacheOpts: []CacheOption{WithMapBackend()}}, + {name: "strict map cache", cacheOpts: []CacheOption{WithMapBackend(), EnableStrictCoalescing()}}, + {name: "LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10)}}, + {name: "strict LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10), EnableStrictCoalescing()}}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + t.Log("replaceFn triggered") + assert.Equal(t, "k1", key) + atomic.AddInt64(&cnt, 1) + time.Sleep(500 * time.Millisecond) + return "result1", nil + } + cache, err := New[string, string](replaceFn, 1*time.Second, 1*time.Second, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + // repeat test multiple times + for x := 0; x < 5; x++ { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + // watch for number of goroutines to make sure only one goroutine is launched to trigger replaceFn + // t.Logf("numGoroutines = %d", runtime.NumGoroutine()) + wg.Add(1) + go func() { + defer wg.Done() + val, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "result1", val) + }() + } + wg.Wait() + + // ensure single call + assert.EqualValues(t, 1, cnt) + // assert t=500ms + assert.InDelta(t, 500*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + } + }) + } +} + +func TestCache_GetFresh(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + cacheOpts []CacheOption + }{ + {name: "map cache", cacheOpts: []CacheOption{WithMapBackend()}}, + {name: "strict map cache", cacheOpts: []CacheOption{WithMapBackend(), EnableStrictCoalescing()}}, + {name: "LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10)}}, + {name: "strict LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10), EnableStrictCoalescing()}}, + } + + for _, c := range cases { + c := c + t.Run("normal "+c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + t.Log("cache.Get(t1, ...)") + assert.Equal(t, "k1", key) + + // some expensive op.. + time.Sleep(500 * time.Millisecond) + atomic.AddInt64(&cnt, 1) + + return "result1", nil + } + cache, err := New[string, string](replaceFn, 1*time.Second, 1*time.Second, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + // repeat test multiple times + for x := 0; x < 5; x++ { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + val, err := cache.GetFresh(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "result1", val) + }() + } + wg.Wait() + + // ensure single call + assert.EqualValues(t, 1, cnt) + // assert t=500ms + assert.InDelta(t, 500*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + } + }) + t.Run("sync fetch "+c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + assert.Equal(t, "k1", key) + atomic.AddInt64(&cnt, 1) + time.Sleep(500 * time.Millisecond) + return "result1", nil + } + cache, err := New[string, string](replaceFn, 250*time.Millisecond, 1*time.Second, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + // t=0ms, 1st call group + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + val, err := cache.GetFresh(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "result1", val) + }() + } + wg.Wait() + assert.EqualValues(t, 1, cnt) + // assert t=500ms + assert.InDelta(t, 500*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + + // t=500ms, 2nd call group -> has stale values, but needs to fetch fresh values + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + val, err := cache.GetFresh(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "result1", val) + }() + } + wg.Wait() + assert.EqualValues(t, 2, cnt) + // assert t=1000ms + assert.InDelta(t, 1000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }) + } +} + +func TestCache_Forget(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + cacheOpts []CacheOption + }{ + {name: "map cache", cacheOpts: []CacheOption{WithMapBackend()}}, + {name: "strict map cache", cacheOpts: []CacheOption{WithMapBackend(), EnableStrictCoalescing()}}, + {name: "LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10)}}, + {name: "strict LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10), EnableStrictCoalescing()}}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + assert.Equal(t, "k1", key) + atomic.AddInt64(&cnt, 1) + time.Sleep(500 * time.Millisecond) + return "result1", nil + } + cache, err := New[string, string](replaceFn, 1*time.Second, 1*time.Second, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + var wg sync.WaitGroup + // t=0ms, 1st call + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "result1", v) + }() + time.Sleep(500 * time.Millisecond) + // t=500ms, Forget, then 2nd call + cache.Forget("k1") + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "result1", v) + }() + wg.Wait() + // t=1000ms, assert replaceFn was triggered twice + assert.EqualValues(t, 2, cnt) + assert.InDelta(t, 1000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }) + } +} + +func TestCache_MultipleValues(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + cacheOpts []CacheOption + }{ + {name: "map cache", cacheOpts: []CacheOption{WithMapBackend()}}, + {name: "strict map cache", cacheOpts: []CacheOption{WithMapBackend(), EnableStrictCoalescing()}}, + {name: "LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10)}}, + {name: "strict LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10), EnableStrictCoalescing()}}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + atomic.AddInt64(&cnt, 1) + time.Sleep(500 * time.Millisecond) + return "result-" + key, nil + } + cache, err := New[string, string](replaceFn, 1*time.Second, 1*time.Second, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + var wg sync.WaitGroup + // t=0ms, 1st group call + for i := 0; i < 50; i++ { + k := "k" + strconv.Itoa(i%5) + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), k) + assert.NoError(t, err) + assert.Equal(t, "result-"+k, v) + // assert t=500ms + assert.InDelta(t, 500*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + } + wg.Wait() + // assert replaceFn was triggered exactly 5 times + assert.EqualValues(t, 5, cnt) + // assert t=500ms + assert.InDelta(t, 500*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + + time.Sleep(1 * time.Second) + // t=1500ms, 2nd group call + for i := 0; i < 50; i++ { + k := "k" + strconv.Itoa(i%6) + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), k) + assert.NoError(t, err) + assert.Equal(t, "result-"+k, v) + // assert t=2000ms + assert.InDelta(t, 2000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + } + wg.Wait() + // assert replaceFn was triggered exactly 11 times + assert.EqualValues(t, 11, cnt) + // assert t=2000ms + assert.InDelta(t, 2000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }) + } +} diff --git a/call.go b/call.go new file mode 100644 index 0000000..3228535 --- /dev/null +++ b/call.go @@ -0,0 +1,19 @@ +package sc + +import ( + "sync" +) + +// call is an in-flight or completed cache replacement call. +type call[V any] struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val V + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool +} diff --git a/config.go b/config.go new file mode 100644 index 0000000..16e2b70 --- /dev/null +++ b/config.go @@ -0,0 +1,66 @@ +package sc + +type CacheOption func(c *cacheConfig) + +type cacheConfig struct { + enableStrictCoalescing bool + backend cacheBackendType + capacity int +} + +type cacheBackendType int + +const ( + cacheBackendMap cacheBackendType = iota + cacheBackendLRU +) + +func defaultConfig() cacheConfig { + return cacheConfig{ + enableStrictCoalescing: false, + backend: cacheBackendMap, + capacity: 0, + } +} + +// WithCapacity sets the cache's capacity. +func WithCapacity(capacity int) CacheOption { + return func(c *cacheConfig) { + c.capacity = capacity + } +} + +// WithMapBackend specifies to use the built-in map for storing cache items (the default). +// Note that the default map backend will not evict old cache items. If your key's cardinality is high, consider using +// other backends such as LRU. +func WithMapBackend() CacheOption { + return func(c *cacheConfig) { + c.backend = cacheBackendMap + } +} + +// WithLRUBackend specifies to use LRU for storing cache items. +// Capacity needs to be greater than 0. +func WithLRUBackend(capacity int) CacheOption { + return func(c *cacheConfig) { + c.backend = cacheBackendLRU + c.capacity = capacity + } +} + +// EnableStrictCoalescing enables strict coalescing check with a slight overhead; the check prevents requests +// coming later in time to be coalesced with already stale response initiated by requests earlier in time. +// This is similar to the behavior of Cache.Forget, but different in that this does not start a new request until +// the current one finishes or Cache.Forget is called. +// +// This is a generalization of so-called 'zero-time-cache', where the original zero-time-cache behavior is +// achievable with zero freshFor/ttl values. cf: https://qiita.com/methane/items/27ccaee5b989fb5fca72 +// +// This is only useful if the freshFor/ttl value is very short (as in the range of a few hundred milliseconds) or +// the request takes a very long time to finish, and you need fresh values for each response. +// Most users should not need this behavior. +func EnableStrictCoalescing() CacheOption { + return func(c *cacheConfig) { + c.enableStrictCoalescing = true + } +} diff --git a/doc.go b/doc.go new file mode 100644 index 0000000..0a9f95b --- /dev/null +++ b/doc.go @@ -0,0 +1,2 @@ +// Package sc provides a simple, idiomatic in-memory caching system. +package sc diff --git a/example_test.go b/example_test.go new file mode 100644 index 0000000..d1df23b --- /dev/null +++ b/example_test.go @@ -0,0 +1,49 @@ +package sc_test + +import ( + "context" + "fmt" + "time" + + "github.com/motoki317/sc" +) + +type Person struct { + Name string + Age int +} + +func (p Person) String() string { + return fmt.Sprintf("%s: %d", p.Name, p.Age) +} + +func retrievePerson(_ context.Context, name string) (*Person, error) { + // Query to database or something... + return &Person{ + Name: name, + Age: 25, + }, nil +} + +func Example() { + // Production code should not ignore errors + cache, _ := sc.New[string, *Person](retrievePerson, 1*time.Minute, 2*time.Minute, sc.WithLRUBackend(500)) + + // Query the values - the cache will automatically trigger 'retrieveMyBigStruct' for each key. + a, _ := cache.Get(context.Background(), "Alice") + b, _ := cache.Get(context.Background(), "Bob") + fmt.Println(a) // Use the values... + fmt.Println(b) + + // Previous results are reused + a, _ = cache.Get(context.Background(), "Alice") + b, _ = cache.Get(context.Background(), "Bob") + fmt.Println(a) // Use the values... + fmt.Println(b) + + // Output: + // Alice: 25 + // Bob: 25 + // Alice: 25 + // Bob: 25 +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..7182b80 --- /dev/null +++ b/go.mod @@ -0,0 +1,14 @@ +module github.com/motoki317/sc + +go 1.18 + +require ( + github.com/dboslee/lru v0.0.2-0.20220315224841-724ebd8433e2 + github.com/stretchr/testify v1.7.1 +) + +require ( + github.com/davecgh/go-spew v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..a2ee685 --- /dev/null +++ b/go.sum @@ -0,0 +1,13 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dboslee/lru v0.0.2-0.20220315224841-724ebd8433e2 h1:EyebzUzb2UbGg5ofoFzdGl+KADFtxt8ErKAFK7KyZmM= +github.com/dboslee/lru v0.0.2-0.20220315224841-724ebd8433e2/go.mod h1:vDIFJHUqr1vdYKAdG9x3r+zFWP0i9uJqQWpB6nSuHxM= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/race_test.go b/race_test.go new file mode 100644 index 0000000..3b55b41 --- /dev/null +++ b/race_test.go @@ -0,0 +1,352 @@ +//go:build !race + +package sc + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCache_BackGroundFetch(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + cacheOpts []CacheOption + }{ + {name: "map cache", cacheOpts: []CacheOption{WithMapBackend()}}, + {name: "strict map cache", cacheOpts: []CacheOption{WithMapBackend(), EnableStrictCoalescing()}}, + {name: "LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10)}}, + {name: "strict LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10), EnableStrictCoalescing()}}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + assert.Equal(t, "k1", key) + atomic.AddInt64(&cnt, 1) + time.Sleep(500 * time.Millisecond) + return "result1", nil + } + cache, err := New[string, string](replaceFn, 250*time.Millisecond, 1*time.Second, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + // t=0ms, 1st call group + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + val, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "result1", val) + }() + } + wg.Wait() + assert.EqualValues(t, 1, cnt) + // assert t=500ms + assert.InDelta(t, 500*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + + // t=500ms, 2nd call group -> returns stale values, one goroutine is launched in the background to trigger replaceFn + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + val, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "result1", val) + }() + } + wg.Wait() + assert.EqualValues(t, 2, cnt) // NOTE: causes race condition on cnt + // assert t=500ms + assert.InDelta(t, 500*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }) + } +} + +func TestCache_NoStrictCoalescing(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + cacheOpts []CacheOption + }{ + {name: "map cache", cacheOpts: []CacheOption{WithMapBackend()}}, + {name: "LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10)}}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + atomic.AddInt64(&cnt, 1) + assert.Equal(t, "k1", key) + time.Sleep(1 * time.Second) + return "value1", nil + } + cache, err := New[string, string](replaceFn, 500*time.Millisecond, 500*time.Millisecond, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + var wg sync.WaitGroup + // t=0ms, 1st call -> triggers replaceFn + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("1st call return") + // assert t=1000ms + assert.InDelta(t, 1000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(250 * time.Millisecond) + // t=250ms, assert replaceFn was called only once + assert.EqualValues(t, 1, cnt) // NOTE: causes race condition on cnt + // t=250ms, 2nd call -> should not trigger replaceFn, to be coalesced with the 1st call + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("2nd call return") + // assert t=250ms + assert.InDelta(t, 1000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(500 * time.Millisecond) + // t=750ms, assert replaceFn was called only once + assert.EqualValues(t, 1, cnt) + // t=750ms, 3rd call -> returns stale value, to be coalesced with the 1st and 2nd call + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("3rd call return") + // assert t=1000ms + assert.InDelta(t, 1000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(500 * time.Millisecond) + wg.Wait() + // assert t=1250ms + assert.InDelta(t, 1250*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + // t=1250ms, assert replaceFn was called only once + assert.EqualValues(t, 1, cnt) + // t=1250ms, 4th call -> should trigger replaceFn + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("4th call return") + // assert t=2250ms + assert.InDelta(t, 2250*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(1250 * time.Millisecond) + wg.Wait() + // t=2500ms, all calls should have finished + assert.EqualValues(t, 2, cnt) + // assert t=2500ms + assert.InDelta(t, 2500*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }) + } +} + +func TestCache_StrictCoalescing(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + cacheOpts []CacheOption + }{ + {name: "strict map cache", cacheOpts: []CacheOption{WithMapBackend(), EnableStrictCoalescing()}}, + {name: "strict LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10), EnableStrictCoalescing()}}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + atomic.AddInt64(&cnt, 1) + assert.Equal(t, "k1", key) + time.Sleep(1 * time.Second) + return "value1", nil + } + cache, err := New[string, string](replaceFn, 500*time.Millisecond, 500*time.Millisecond, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + var wg sync.WaitGroup + // t=0ms, 1st call -> triggers replaceFn + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("1st call return") + // assert t=1000ms + assert.InDelta(t, 1000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(250 * time.Millisecond) + // t=250ms, assert replaceFn was called only once + assert.EqualValues(t, 1, cnt) // NOTE: causes race condition on cnt + // t=250ms, 2nd call -> should not trigger replaceFn, to be coalesced with the 1st call + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("2nd call return") + // assert t=1000ms + assert.InDelta(t, 1000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(500 * time.Millisecond) + // t=750ms, assert replaceFn was called only once + assert.EqualValues(t, 1, cnt) + // t=750ms, 3rd call -> should trigger replaceFn after the first call returns + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("3rd call return") + // assert t=2000ms + assert.InDelta(t, 2000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(500 * time.Millisecond) + // t=1250ms, assert replaceFn was called twice + assert.EqualValues(t, 2, cnt) + // t=1250ms, 4th call -> should be coalesced with the 3rd call + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("4th call return") + // assert t=2000ms + assert.InDelta(t, 2000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(1 * time.Second) + wg.Wait() + // t=2250ms, all calls should have finished + assert.EqualValues(t, 2, cnt) + // assert t=2250ms + assert.InDelta(t, 2250*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }) + } +} + +func TestCache_ZeroTimeCache(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + cacheOpts []CacheOption + }{ + {name: "strict map cache", cacheOpts: []CacheOption{WithMapBackend(), EnableStrictCoalescing()}}, + {name: "strict LRU cache", cacheOpts: []CacheOption{WithLRUBackend(10), EnableStrictCoalescing()}}, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var cnt int64 + replaceFn := func(ctx context.Context, key string) (string, error) { + atomic.AddInt64(&cnt, 1) + assert.Equal(t, "k1", key) + time.Sleep(1 * time.Second) + return "value1", nil + } + cache, err := New[string, string](replaceFn, 0, 0, c.cacheOpts...) + assert.NoError(t, err) + + t0 := time.Now() + var wg sync.WaitGroup + // t=0ms, 1st call -> triggers replaceFn + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("1st call return") + // assert t=1000ms + assert.InDelta(t, 1000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(250 * time.Millisecond) + // t=250ms, assert replaceFn was called only once + assert.EqualValues(t, 1, cnt) // NOTE: causes race condition on cnt + // t=250ms, 2nd call -> should NOT be coalesced with the 1st call + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("2nd call return") + // assert t=2000ms + assert.InDelta(t, 2000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(500 * time.Millisecond) + // t=750ms, assert replaceFn was called only once + assert.EqualValues(t, 1, cnt) + // t=750ms, 3rd call -> should be coalesced with the 2nd call + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("3rd call return") + // assert t=2000ms + assert.InDelta(t, 2000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(500 * time.Millisecond) + // t=1250ms, assert replaceFn was called twice + assert.EqualValues(t, 2, cnt) + // t=1250ms, 4th call -> should NOT be coalesced with the 3rd call + wg.Add(1) + go func() { + defer wg.Done() + v, err := cache.Get(context.Background(), "k1") + assert.NoError(t, err) + assert.Equal(t, "value1", v) + t.Log("4th call return") + // assert t=3000ms + assert.InDelta(t, 3000*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }() + time.Sleep(2 * time.Second) + wg.Wait() + // t=3250ms, all calls should have finished + assert.EqualValues(t, 3, cnt) + // assert t=3250ms + assert.InDelta(t, 3250*time.Millisecond, time.Since(t0), float64(100*time.Millisecond)) + }) + } +} diff --git a/value.go b/value.go new file mode 100644 index 0000000..94879e2 --- /dev/null +++ b/value.go @@ -0,0 +1,18 @@ +package sc + +import ( + "time" +) + +type value[V any] struct { + v V + t time.Time // the time the value was acquired +} + +func (v *value[V]) isFresh(now time.Time, freshFor time.Duration) bool { + return now.Before(v.t.Add(freshFor)) +} + +func (v *value[V]) isExpired(now time.Time, ttl time.Duration) bool { + return now.After(v.t.Add(ttl)) +} diff --git a/value_test.go b/value_test.go new file mode 100644 index 0000000..9f18c08 --- /dev/null +++ b/value_test.go @@ -0,0 +1,66 @@ +package sc + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_value_isFresh(t *testing.T) { + now := time.Date(2000, 1, 1, 12, 0, 0, 0, time.UTC) + type args struct { + now time.Time + freshFor time.Duration + } + tests := []struct { + name string + t time.Time + args args + want bool + }{ + {"not fresh", now.Add(-10 * time.Minute), args{now, 5 * time.Minute}, false}, + {"fresh", now.Add(-3 * time.Minute), args{now, 5 * time.Minute}, true}, + {"fresh (future)", now.Add(3 * time.Minute), args{now, 5 * time.Minute}, true}, + {"fresh (distant future)", now.Add(30 * time.Minute), args{now, 5 * time.Minute}, true}, + {"fresh (now)", now, args{now, 5 * time.Minute}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &value[string]{ + v: "", + t: tt.t, + } + assert.Equalf(t, tt.want, v.isFresh(tt.args.now, tt.args.freshFor), "isFresh(%v, %v)", tt.args.now, tt.args.freshFor) + }) + } +} + +func Test_value_isExpired(t *testing.T) { + now := time.Date(2000, 1, 1, 12, 0, 0, 0, time.UTC) + type args struct { + now time.Time + ttl time.Duration + } + tests := []struct { + name string + t time.Time + args args + want bool + }{ + {"expired", now.Add(-10 * time.Minute), args{now, 5 * time.Minute}, true}, + {"not expired", now.Add(-3 * time.Minute), args{now, 5 * time.Minute}, false}, + {"not expired (future)", now.Add(3 * time.Minute), args{now, 5 * time.Minute}, false}, + {"not expired (distant future)", now.Add(30 * time.Minute), args{now, 5 * time.Minute}, false}, + {"not expired (now)", now, args{now, 5 * time.Minute}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &value[string]{ + v: "", + t: tt.t, + } + assert.Equalf(t, tt.want, v.isExpired(tt.args.now, tt.args.ttl), "isExpired(%v, %v)", tt.args.now, tt.args.ttl) + }) + } +}