From 517fc971ecfc98da96f8dd523acce98077da75b6 Mon Sep 17 00:00:00 2001 From: msaf1980 Date: Mon, 27 Mar 2023 17:31:19 +0500 Subject: [PATCH 1/5] update vendored dependecies --- go.mod | 3 +- go.sum | 8 +- .../github.com/msaf1980/go-syncutils/LICENSE | 21 + .../msaf1980/go-syncutils/atomic/bool.go | 53 ++ .../msaf1980/go-syncutils/atomic/bool_ext.go | 14 + .../msaf1980/go-syncutils/atomic/duration.go | 75 +++ .../msaf1980/go-syncutils/atomic/error.go | 77 +++ .../msaf1980/go-syncutils/atomic/error_ext.go | 37 ++ .../msaf1980/go-syncutils/atomic/float32.go | 103 ++++ .../msaf1980/go-syncutils/atomic/float64.go | 103 ++++ .../msaf1980/go-syncutils/atomic/int32.go | 86 +++ .../msaf1980/go-syncutils/atomic/int64.go | 86 +++ .../msaf1980/go-syncutils/atomic/nocopy.go | 14 + .../go-syncutils/atomic/pointer_go118.go | 11 + .../atomic/pointer_go118_pre119.go | 46 ++ .../go-syncutils/atomic/pointer_go119.go | 40 ++ .../msaf1980/go-syncutils/atomic/string.go | 72 +++ .../go-syncutils/atomic/string_ext.go | 54 ++ .../msaf1980/go-syncutils/atomic/time.go | 68 +++ .../msaf1980/go-syncutils/atomic/time_ext.go | 36 ++ .../msaf1980/go-syncutils/atomic/uint32.go | 86 +++ .../msaf1980/go-syncutils/atomic/uint64.go | 86 +++ .../msaf1980/go-syncutils/atomic/uintptr.go | 86 +++ .../go-syncutils/atomic/unsafe_pointer.go | 58 +++ .../msaf1980/go-syncutils/atomic/value.go | 29 ++ .../msaf1980/go-syncutils/lock/chanmutex.go | 67 +++ .../msaf1980/go-syncutils/lock/condchan.go | 165 ++++++ .../msaf1980/go-syncutils/lock/mutex.go | 165 ++++++ .../msaf1980/go-syncutils/lock/nocopy.go | 14 + .../msaf1980/go-syncutils/lock/pmutex.go | 491 ++++++++++++++++++ .../msaf1980/go-syncutils/lock/rwmutex.go | 293 +++++++++++ .../stretchr/testify/assert/assertions.go | 78 ++- .../github.com/stretchr/testify/mock/mock.go | 12 +- vendor/modules.txt | 6 +- 34 files changed, 2591 insertions(+), 52 deletions(-) create mode 100644 vendor/github.com/msaf1980/go-syncutils/LICENSE create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/bool.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/bool_ext.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/duration.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/error.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/error_ext.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/float32.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/float64.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/int32.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/int64.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/nocopy.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go118.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go118_pre119.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go119.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/string.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/string_ext.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/time.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/time_ext.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/uint32.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/uint64.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/uintptr.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/unsafe_pointer.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/atomic/value.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/chanmutex.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/condchan.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/mutex.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/nocopy.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/pmutex.go create mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/rwmutex.go diff --git a/go.mod b/go.mod index 23866f770..88fdd2a43 100644 --- a/go.mod +++ b/go.mod @@ -17,13 +17,14 @@ require ( github.com/msaf1980/go-expirecache v0.0.2 github.com/msaf1980/go-metrics v0.0.14 github.com/msaf1980/go-stringutils v0.1.4 + github.com/msaf1980/go-syncutils v0.0.3 github.com/msaf1980/go-timeutils v0.0.3 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.13.1 github.com/prometheus/client_model v0.3.0 github.com/prometheus/prometheus v0.40.2 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 go.uber.org/zap v1.21.0 ) diff --git a/go.sum b/go.sum index c60f3b0c8..acaea36ef 100644 --- a/go.sum +++ b/go.sum @@ -587,6 +587,10 @@ github.com/msaf1980/go-metrics v0.0.14/go.mod h1:8VcR8MdyvIJpcVLOVFKbhb27+60tXy0 github.com/msaf1980/go-stringutils v0.1.2/go.mod h1:AxmV/6JuQUAtZJg5XmYATB5ZwCWgtpruVHY03dswRf8= github.com/msaf1980/go-stringutils v0.1.4 h1:UwsIT0hplHVucqbknk3CoNqKkmIuSHhsbBldXxyld5U= github.com/msaf1980/go-stringutils v0.1.4/go.mod h1:AxmV/6JuQUAtZJg5XmYATB5ZwCWgtpruVHY03dswRf8= +github.com/msaf1980/go-syncutils v0.0.2 h1:F7lTtojuZUHFH9Cs6yRz4SRnvmttSV2qD6nEvseCFVg= +github.com/msaf1980/go-syncutils v0.0.2/go.mod h1:zoZwQNkDATcfKq5lQPK6dmJT7Z01COxw/vd8bcJyC9w= +github.com/msaf1980/go-syncutils v0.0.3 h1:bd6+yTSB8/CmpG7M6j1gq5sJMyPqecjJcBf19s2Y6u4= +github.com/msaf1980/go-syncutils v0.0.3/go.mod h1:zoZwQNkDATcfKq5lQPK6dmJT7Z01COxw/vd8bcJyC9w= github.com/msaf1980/go-timeutils v0.0.3 h1:c0NIpJBcU6KoMeMCPdnbGFcaP4sm7VCwoW1cdgsmUkU= github.com/msaf1980/go-timeutils v0.0.3/go.mod h1:r252j2O/ZLuwNMp/rlSYhbQdxg6glZ3MzgvskE/ItGY= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -715,8 +719,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= github.com/tevino/abool v1.2.0/go.mod h1:qc66Pna1RiIsPa7O4Egxxs9OqkuxDX55zznh9K07Tzg= diff --git a/vendor/github.com/msaf1980/go-syncutils/LICENSE b/vendor/github.com/msaf1980/go-syncutils/LICENSE new file mode 100644 index 000000000..4822803ac --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Michail Safronov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/bool.go b/vendor/github.com/msaf1980/go-syncutils/atomic/bool.go new file mode 100644 index 000000000..49f70cc1e --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/bool.go @@ -0,0 +1,53 @@ +package atomic + +import ( + "strconv" + "sync/atomic" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ noCopy + + v uint32 +} + +// NewBool creates a new Bool. +func NewBool(val bool) *Bool { + return &Bool{v: boolToUint32(val)} +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(atomic.LoadUint32(&x.v)) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(val bool) { + atomic.StoreUint32(&x.v, boolToUint32(val)) +} + +// CompareAndSwap is an atomic compare-and-swap for bool values. +func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) { + return atomic.CompareAndSwapUint32(&x.v, boolToUint32(old), boolToUint32(new)) +} + +// Swap atomically stores the given bool and returns the old value. +func (x *Bool) Swap(val bool) (old bool) { + return truthy(atomic.SwapUint32(&x.v, boolToUint32(val))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() (old bool) { + for { + old := b.Load() + if b.CompareAndSwap(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/bool_ext.go b/vendor/github.com/msaf1980/go-syncutils/atomic/bool_ext.go new file mode 100644 index 000000000..cfc485186 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/bool_ext.go @@ -0,0 +1,14 @@ +package atomic + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToUint32(b bool) uint32 { + if b { + return 1 + } + return 0 +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/duration.go b/vendor/github.com/msaf1980/go-syncutils/atomic/duration.go new file mode 100644 index 000000000..78000f0db --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/duration.go @@ -0,0 +1,75 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ noCopy + + v int64 +} + +// NewDuration creates a new Duration. +func NewDuration(val time.Duration) *Duration { + return &Duration{v: val.Nanoseconds()} +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(atomic.LoadInt64(&x.v)) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(val time.Duration) { + atomic.StoreInt64(&x.v, val.Nanoseconds()) +} + +// CompareAndSwap is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) { + return atomic.CompareAndSwapInt64(&x.v, old.Nanoseconds(), new.Nanoseconds()) +} + +// Swap atomically stores the given time.Duration and returns the old value. +func (x *Duration) Swap(val time.Duration) (old time.Duration) { + return time.Duration(atomic.SwapInt64(&x.v, val.Nanoseconds())) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (x *Duration) Add(delta time.Duration) time.Duration { + return time.Duration(atomic.AddInt64(&x.v, delta.Nanoseconds())) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (x *Duration) Sub(delta time.Duration) time.Duration { + return time.Duration(atomic.AddInt64(&x.v, -delta.Nanoseconds())) +} + +// String encodes the wrapped value as a string. +func (x *Duration) String() string { + return x.Load().String() +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/error.go b/vendor/github.com/msaf1980/go-syncutils/atomic/error.go new file mode 100644 index 000000000..4a3ef3329 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/error.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ noCopy + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(val error) *Error { + x := &Error{} + if val != _zeroError { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(val error) { + x.v.Store(packError(val)) +} + +// StoreIfNil atomically stores the passed error (if no previous error). +func (x *Error) StoreIfNil(val error) (swapped bool) { + return x.CompareAndSwap(nil, val) +} + +// CompareAndSwap is an atomic compare-and-swap for error values. +func (x *Error) CompareAndSwap(old, new error) (swapped bool) { + if x.v.CompareAndSwap(packError(old), packError(new)) { + return true + } + + if old == _zeroError { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packError(new)) + } + + return false +} + +// Swap atomically stores the given error and returns the old +// value. +func (x *Error) Swap(val error) (old error) { + return unpackError(x.v.Swap(packError(val))) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/error_ext.go b/vendor/github.com/msaf1980/go-syncutils/atomic/error_ext.go new file mode 100644 index 000000000..a46f98656 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/error_ext.go @@ -0,0 +1,37 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/float32.go b/vendor/github.com/msaf1980/go-syncutils/atomic/float32.go new file mode 100644 index 000000000..9f49ae902 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/float32.go @@ -0,0 +1,103 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" + "sync/atomic" +) + +// Float32 is an atomic type-safe wrapper for float32 values. +type Float32 struct { + _ noCopy + + v uint32 +} + +var _zeroFloat32 float32 + +// NewFloat32 creates a new Float32. +func NewFloat32(val float32) *Float32 { + if val == _zeroFloat32 { + return &Float32{} + } else { + return &Float32{v: math.Float32bits(val)} + } +} + +// Load atomically loads the wrapped float32. +func (x *Float32) Load() float32 { + return math.Float32frombits(atomic.LoadUint32(&x.v)) +} + +// Store atomically stores the passed float32. +func (x *Float32) Store(val float32) { + atomic.StoreUint32(&x.v, math.Float32bits(val)) +} + +// Swap atomically stores the given float32 and returns the old value. +func (x *Float32) Swap(val float32) (old float32) { + return math.Float32frombits(atomic.SwapUint32(&x.v, math.Float32bits(val))) +} + +// Add atomically adds to the wrapped float32 and returns the new value. +func (x *Float32) Add(delta float32) float32 { + for { + old := x.Load() + new := old + delta + if x.CompareAndSwap(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float32 and returns the new value. +func (x *Float32) Sub(delta float32) float32 { + return x.Add(-delta) +} + +// CompareAndSwap is an atomic compare-and-swap for float32 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (x *Float32) CompareAndSwap(old, new float32) (swapped bool) { + return atomic.CompareAndSwapUint32(&x.v, math.Float32bits(old), math.Float32bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float32) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/float64.go b/vendor/github.com/msaf1980/go-syncutils/atomic/float64.go new file mode 100644 index 000000000..23fe853e0 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/float64.go @@ -0,0 +1,103 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" + "sync/atomic" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ noCopy + + v uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(val float64) *Float64 { + if val == _zeroFloat64 { + return &Float64{} + } else { + return &Float64{v: math.Float64bits(val)} + } +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&x.v)) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(val float64) { + atomic.StoreUint64(&x.v, math.Float64bits(val)) +} + +// Swap atomically stores the given float64 and returns the old value. +func (x *Float64) Swap(val float64) (old float64) { + return math.Float64frombits(atomic.SwapUint64(&x.v, math.Float64bits(val))) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (x *Float64) Add(delta float64) float64 { + for { + old := x.Load() + new := old + delta + if x.CompareAndSwap(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (x *Float64) Sub(delta float64) float64 { + return x.Add(-delta) +} + +// CompareAndSwap is an atomic compare-and-swap for float64 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (x *Float64) CompareAndSwap(old, new float64) (swapped bool) { + return atomic.CompareAndSwapUint64(&x.v, math.Float64bits(old), math.Float64bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/int32.go b/vendor/github.com/msaf1980/go-syncutils/atomic/int32.go new file mode 100644 index 000000000..375798d2e --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/int32.go @@ -0,0 +1,86 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ noCopy + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(val int32) *Int32 { + return &Int32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(delta int32) int32 { + return atomic.AddInt32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(delta int32) int32 { + return atomic.AddInt32(&i.v, -delta) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(val int32) { + atomic.StoreInt32(&i.v, val) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(val int32) (old int32) { + return atomic.SwapInt32(&i.v, val) +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/int64.go b/vendor/github.com/msaf1980/go-syncutils/atomic/int64.go new file mode 100644 index 000000000..c9954bb5d --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/int64.go @@ -0,0 +1,86 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ noCopy + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(val int64) *Int64 { + return &Int64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(delta int64) int64 { + return atomic.AddInt64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(delta int64) int64 { + return atomic.AddInt64(&i.v, -delta) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(val int64) { + atomic.StoreInt64(&i.v, val) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(val int64) (old int64) { + return atomic.SwapInt64(&i.v, val) +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/nocopy.go b/vendor/github.com/msaf1980/go-syncutils/atomic/nocopy.go new file mode 100644 index 000000000..f1814ce90 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/nocopy.go @@ -0,0 +1,14 @@ +package atomic + +// noCopy may be added to structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +// +// Note that it must not be embedded, due to the Lock and Unlock methods. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go118.go b/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go118.go new file mode 100644 index 000000000..c5e606e91 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go118.go @@ -0,0 +1,11 @@ +//go:build go1.18 +// +build go1.18 + +package atomic + +import "fmt" + +// String returns a human readable representation of a Pointer's underlying value. +func (p *Pointer[T]) String() string { + return fmt.Sprint(p.Load()) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go118_pre119.go b/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go118_pre119.go new file mode 100644 index 000000000..09d212ef7 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go118_pre119.go @@ -0,0 +1,46 @@ +//go:build go1.18 && !go1.19 +// +build go1.18,!go1.19 + +package atomic + +import "unsafe" + +type Pointer[T any] struct { + // Mention *T in a field to disallow conversion between Pointer types. + // See go.dev/issue/56603 for more details. + // Use *T, not T, to avoid spurious recursive type definition errors. + _ [0]*T + + _ noCopy + + p UnsafePointer +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(unsafe.Pointer(v)) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return (*T)(p.p.Load()) +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(unsafe.Pointer(val)) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return (*T)(p.p.Swap(unsafe.Pointer(val))) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go119.go b/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go119.go new file mode 100644 index 000000000..5de00bea1 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/pointer_go119.go @@ -0,0 +1,40 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.19 +// +build go1.19 + +package atomic + +import "sync/atomic" + +// Pointer is an atomic pointer of type *T. +type Pointer[T any] struct { + atomic.Pointer[T] +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.Store(v) + } + return &p +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/string.go b/vendor/github.com/msaf1980/go-syncutils/atomic/string.go new file mode 100644 index 000000000..02b2aaa8f --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/string.go @@ -0,0 +1,72 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ noCopy + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(val string) *String { + x := &String{} + if val != _zeroString { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + return unpackString(x.v.Load()) +} + +// Store atomically stores the passed string. +func (x *String) Store(val string) { + x.v.Store(packString(val)) +} + +// CompareAndSwap is an atomic compare-and-swap for string values. +func (x *String) CompareAndSwap(old, new string) (swapped bool) { + if x.v.CompareAndSwap(packString(old), packString(new)) { + return true + } + + if old == _zeroString { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packString(new)) + } + + return false +} + +// Swap atomically stores the given string and returns the old +// value. +func (x *String) Swap(val string) (old string) { + return unpackString(x.v.Swap(packString(val))) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/string_ext.go b/vendor/github.com/msaf1980/go-syncutils/atomic/string_ext.go new file mode 100644 index 000000000..019109c86 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/string_ext.go @@ -0,0 +1,54 @@ +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go + +func packString(s string) interface{} { + return s +} + +func unpackString(v interface{}) string { + if s, ok := v.(string); ok { + return s + } + return "" +} + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/time.go b/vendor/github.com/msaf1980/go-syncutils/atomic/time.go new file mode 100644 index 000000000..b65364d93 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/time.go @@ -0,0 +1,68 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "time" +) + +// Time is an atomic type-safe wrapper for time.Time values. +type Time struct { + _ noCopy + + v Value +} + +var _zeroTime time.Time + +// NewTime creates a new Time. +func NewTime(val time.Time) *Time { + x := &Time{} + if val != _zeroTime { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Time. +func (x *Time) Load() time.Time { + return unpackTime(x.v.Load()) +} + +// Store atomically stores the passed time.Time. +func (x *Time) Store(val time.Time) { + x.v.Store(packTime(val)) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (x *Time) CompareAndSwap(old, new time.Time) (swapped bool) { + if old == _zeroTime { + return x.v.CompareAndSwap(nil, new) + } + return x.v.CompareAndSwap(old, new) +} + +// Swap atomically swaps the wrapped time.Time and returns the old value. +func (x *Time) Swap(val time.Time) (old time.Time) { + return unpackTime(x.v.Swap(val)) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/time_ext.go b/vendor/github.com/msaf1980/go-syncutils/atomic/time_ext.go new file mode 100644 index 000000000..1e3dc978a --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/time_ext.go @@ -0,0 +1,36 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go + +func packTime(t time.Time) interface{} { + return t +} + +func unpackTime(v interface{}) time.Time { + if t, ok := v.(time.Time); ok { + return t + } + return time.Time{} +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/uint32.go b/vendor/github.com/msaf1980/go-syncutils/atomic/uint32.go new file mode 100644 index 000000000..12f9363ef --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/uint32.go @@ -0,0 +1,86 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ noCopy + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(val uint32) *Uint32 { + return &Uint32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(delta uint32) uint32 { + return atomic.AddUint32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(delta uint32) uint32 { + return atomic.AddUint32(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(val uint32) { + atomic.StoreUint32(&i.v, val) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(val uint32) (old uint32) { + return atomic.SwapUint32(&i.v, val) +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/uint64.go b/vendor/github.com/msaf1980/go-syncutils/atomic/uint64.go new file mode 100644 index 000000000..83d4a6061 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/uint64.go @@ -0,0 +1,86 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ noCopy + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(val uint64) *Uint64 { + return &Uint64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(delta uint64) uint64 { + return atomic.AddUint64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(delta uint64) uint64 { + return atomic.AddUint64(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(val uint64) { + atomic.StoreUint64(&i.v, val) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(val uint64) (old uint64) { + return atomic.SwapUint64(&i.v, val) +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/uintptr.go b/vendor/github.com/msaf1980/go-syncutils/atomic/uintptr.go new file mode 100644 index 000000000..3a35b21b0 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/uintptr.go @@ -0,0 +1,86 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" + "sync/atomic" +) + +// Uintptr is an atomic wrapper around uintptr. +type Uintptr struct { + _ noCopy + + v uintptr +} + +// NewUintptr creates a new Uintptr. +func NewUintptr(val uintptr) *Uintptr { + return &Uintptr{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uintptr) Load() uintptr { + return atomic.LoadUintptr(&i.v) +} + +// Add atomically adds to the wrapped uintptr and returns the new value. +func (i *Uintptr) Add(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uintptr and returns the new value. +func (i *Uintptr) Sub(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uintptr and returns the new value. +func (i *Uintptr) Inc() uintptr { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uintptr and returns the new value. +func (i *Uintptr) Dec() uintptr { + return i.Sub(1) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { + return atomic.CompareAndSwapUintptr(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uintptr) Store(val uintptr) { + atomic.StoreUintptr(&i.v, val) +} + +// Swap atomically swaps the wrapped uintptr and returns the old value. +func (i *Uintptr) Swap(val uintptr) (old uintptr) { + return atomic.SwapUintptr(&i.v, val) +} + +// String encodes the wrapped value as a string. +func (i *Uintptr) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/unsafe_pointer.go b/vendor/github.com/msaf1980/go-syncutils/atomic/unsafe_pointer.go new file mode 100644 index 000000000..06b19cae9 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/unsafe_pointer.go @@ -0,0 +1,58 @@ +// Copyright (c) 2021-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "unsafe" +) + +// UnsafePointer is an atomic wrapper around unsafe.Pointer. +type UnsafePointer struct { + _ noCopy + + v unsafe.Pointer +} + +// NewUnsafePointer creates a new UnsafePointer. +func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { + return &UnsafePointer{v: val} +} + +// Load atomically loads the wrapped value. +func (p *UnsafePointer) Load() unsafe.Pointer { + return atomic.LoadPointer(&p.v) +} + +// Store atomically stores the passed value. +func (p *UnsafePointer) Store(val unsafe.Pointer) { + atomic.StorePointer(&p.v, val) +} + +// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. +func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { + return atomic.SwapPointer(&p.v, val) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) { + return atomic.CompareAndSwapPointer(&p.v, old, new) +} diff --git a/vendor/github.com/msaf1980/go-syncutils/atomic/value.go b/vendor/github.com/msaf1980/go-syncutils/atomic/value.go new file mode 100644 index 000000000..88fb4d778 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/atomic/value.go @@ -0,0 +1,29 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value +} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/chanmutex.go b/vendor/github.com/msaf1980/go-syncutils/lock/chanmutex.go new file mode 100644 index 000000000..70758052f --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/lock/chanmutex.go @@ -0,0 +1,67 @@ +package lock + +import ( + "context" + "time" +) + +// ChanMutex is the struct implementing Mutex by channel. +type ChanMutex struct { + lockChan chan struct{} +} + +// NewChanMutex returns ChanMutex. +func NewChanMutex() *ChanMutex { + return &ChanMutex{ + lockChan: make(chan struct{}, 1), + } +} + +// Lock acquires the lock. +// If it is currently held by others, Lock will wait until it has a chance to acquire it. +func (m *ChanMutex) Lock() { + m.lockChan <- struct{}{} +} + +// Unlock releases the lock. +func (m *ChanMutex) Unlock() { + <-m.lockChan +} + +// TryLock attempts to acquire the lock without blocking. +// Return false if someone is holding it now. +func (m *ChanMutex) TryLock() bool { + select { + case m.lockChan <- struct{}{}: + return true + default: + return false + } +} + +// LockWithContext attempts to acquire the lock, blocking until resources +// are available or ctx is done (timeout or cancellation). +func (m *ChanMutex) LockWithContext(ctx context.Context) bool { + select { + case m.lockChan <- struct{}{}: + return true + case <-ctx.Done(): + // timeout or cancellation + return false + } +} + +// LockWithTimeout attempts to acquire the lock within a period of time. +// Return false if spending time is more than duration and no chance to acquire it. +func (m *ChanMutex) LockWithTimeout(duration time.Duration) bool { + + t := time.After(duration) + + select { + case m.lockChan <- struct{}{}: + return true + case <-t: + // timeout + return false + } +} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/condchan.go b/vendor/github.com/msaf1980/go-syncutils/lock/condchan.go new file mode 100644 index 000000000..a2d972c2c --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/lock/condchan.go @@ -0,0 +1,165 @@ +package lock + +import ( + "context" + "sync" + "time" +) + +// CondChan implements a condition variable, a rendezvous point for goroutines waiting for or announcing the occurrence +// of an event. +// +// A Cond must not be copied after first use. +type CondChan struct { + _ noCopy + + ch chan struct{} + L sync.Mutex +} + +func (cc *CondChan) waitCh() <-chan struct{} { + + if cc.ch == nil { + cc.ch = make(chan struct{}) + } + ch := cc.ch + + return ch + +} + +// Wait atomically unlocks cc.Lockand suspends execution of the calling goroutine. +// It is required for the caller to hold cc.Lock during the call. +func (cc *CondChan) Wait() { + + ch := cc.waitCh() + + cc.L.Unlock() + + <-ch + + cc.L.Lock() +} + +// WaitU atomically unlocks cc.Lockand suspends execution of the calling goroutine. +// It is required for the caller to hold cc.Lock during the call. +// After execution, cc.Lock is unlocked +func (cc *CondChan) WaitU() { + + ch := cc.waitCh() + + cc.L.Unlock() + + <-ch + +} + +// WaitWithContext attempts to wait with context. +// It is required for the caller to hold cc.Lock during the call. +func (cc *CondChan) WaitWithContext(ctx context.Context) (ok bool) { + + ch := cc.waitCh() + + cc.L.Unlock() + + select { + case <-ch: + cc.L.Lock() + ok = true + case <-ctx.Done(): + // timeout or cancellation + } + + return + +} + +// WaitUWithContext attempts to wait with context. +// It is required for the caller to hold cc.Lock during the call. +// After execution, cc.Lock is unlocked +func (cc *CondChan) WaitUWithContext(ctx context.Context) (ok bool) { + + ch := cc.waitCh() + + cc.L.Unlock() + + select { + case <-ch: + ok = true + case <-ctx.Done(): + // timeout or cancellation + } + + return + +} + +// WaitWithTimeout attempts to wait with timeout. +// After later resuming execution, Wait locks cc.Lock before returning. +func (cc *CondChan) WaitWithTimeout(duration time.Duration) (ok bool) { + + t := time.After(duration) + + ch := cc.waitCh() + + cc.L.Unlock() + + select { + case <-ch: + cc.L.Lock() + ok = true + case <-t: + // timeout + } + + return + +} + +// WaitUWithTimeout attempts to wait with timeout. +// After later resuming execution, Wait locks cc.Lock before returning. +// After execution, cc.Lock is unlocked +func (cc *CondChan) WaitUWithTimeout(duration time.Duration) (ok bool) { + + t := time.After(duration) + + ch := cc.waitCh() + + cc.L.Unlock() + + select { + case <-ch: + ok = true + case <-t: + // timeout + } + + return + +} + +// Signal wakes one goroutine waiting on cc, if there is any. +// It is required for the caller to hold cc.Lock during the call. +func (cc *CondChan) Signal() { + + if cc.ch == nil { + return + } + select { + case cc.ch <- struct{}{}: + default: + } + +} + +// Broadcast wakes all goroutines waiting on cc. +// It is required for the caller to hold cc.Lock during the call. +func (cc *CondChan) Broadcast() { + + if cc.ch == nil { + return + } + close(cc.ch) + cc.ch = make(chan struct{}) + +} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/mutex.go b/vendor/github.com/msaf1980/go-syncutils/lock/mutex.go new file mode 100644 index 000000000..0e257f758 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/lock/mutex.go @@ -0,0 +1,165 @@ +package lock + +import ( + "context" + "sync" + "sync/atomic" + "time" +) + +const tmLocked int32 = 1 // lock + +// Mutex - Try Mutex +type Mutex struct { + state int32 + mx sync.Mutex + ch chan struct{} +} + +func (m *Mutex) chGet() chan struct{} { + + m.mx.Lock() + if m.ch == nil { + m.ch = make(chan struct{}, 1) + } + r := m.ch + m.mx.Unlock() + return r + +} + +func (m *Mutex) tryChGet() (chan struct{}, bool) { + + if !m.mx.TryLock() { + return nil, false + } + if m.ch == nil { + m.ch = make(chan struct{}, 1) + } + r := m.ch + m.mx.Unlock() + return r, true + +} + +func (m *Mutex) chClose() { + // it's need only when exists parallel + // to make faster need add counter to add drop listners of chan + + var o chan struct{} + m.mx.Lock() + if m.ch != nil { + o = m.ch + m.ch = nil + } + m.mx.Unlock() + + if o != nil { + close(o) + } + +} + +// Lock - locks mutex +func (m *Mutex) Lock() { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + + return + } + + // Slow way + m.lockS() +} + +// TryLock - try locks mutex +func (m *Mutex) TryLock() bool { + return atomic.CompareAndSwapInt32(&m.state, 0, -1) +} + +// Unlock - unlocks mutex +func (m *Mutex) Unlock() { + if atomic.CompareAndSwapInt32(&m.state, -1, 0) { + m.chClose() + return + } + + panic("Mutex: Unlock fail") +} + +// LockWithContext - try locks mutex with context +func (m *Mutex) LockWithContext(ctx context.Context) bool { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + return true + } + + // Slow way + return m.lockST(ctx) +} + +// LockD - try locks mutex with time duration +func (m *Mutex) LockWithTimeout(d time.Duration) bool { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + return true + } + + // Slow way + return m.lockSD(d) +} + +func (m *Mutex) lockS() { + ch := m.chGet() + for { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + + return + } + + select { + case <-ch: + ch = m.chGet() + } + } + +} + +func (m *Mutex) lockST(ctx context.Context) bool { + ch := m.chGet() + for { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + + return true + } + + if ctx == nil { + return false + } + + select { + case <-ch: + ch = m.chGet() + case <-ctx.Done(): + return false + } + + } +} + +func (m *Mutex) lockSD(d time.Duration) bool { + // may be use context.WithTimeout(context.Background(), d) however NO it's not fun + t := time.After(d) + ch := m.chGet() + for { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + + return true + } + + select { + case <-ch: + ch = m.chGet() + case <-t: + return false + } + + } +} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/nocopy.go b/vendor/github.com/msaf1980/go-syncutils/lock/nocopy.go new file mode 100644 index 000000000..88f6fc3f2 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/lock/nocopy.go @@ -0,0 +1,14 @@ +package lock + +// noCopy may be added to structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +// +// Note that it must not be embedded, due to the Lock and Unlock methods. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/pmutex.go b/vendor/github.com/msaf1980/go-syncutils/lock/pmutex.go new file mode 100644 index 000000000..5e424b12f --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/lock/pmutex.go @@ -0,0 +1,491 @@ +package lock + +import ( + "context" + "fmt" + "sync" + "time" +) + +// PMutex - Read Write Try Mutex with change priority (Promote and Reduce) +// F methods (like LockF and TryLockF) Locks mutex if mutex already locked then this methods will be first in lock queue +// Promote - lock mutex from RLock to Lock +// Reduce - lock mutex from Lock to RLock +type PMutex struct { + state int32 + mx sync.Mutex + ch chan struct{} +} + +func (m *PMutex) chGet() chan struct{} { + + m.mx.Lock() + if m.ch == nil { + m.ch = make(chan struct{}, 1) + } + r := m.ch + m.mx.Unlock() + return r + +} + +// chClose - unlocks other routines needs mx.Lock +func (m *PMutex) chClose() { + // it's need only when exists parallel + // to make faster need add counter to add drop listners of chan + + var o chan struct{} + + if m.ch != nil { + o = m.ch + m.ch = nil + } + if o != nil { + close(o) + } + +} + +// Lock - locks mutex +func (m *PMutex) Lock() { + + m.mx.Lock() + + if m.state == 0 { + m.state = -1 + m.mx.Unlock() + return + } + m.mx.Unlock() + // Slow way + m.lockS() +} + +// TryLock - try locks mutex +func (m *PMutex) TryLock() (ok bool) { + + m.mx.Lock() + + if m.state == 0 { + m.state = -1 + ok = true + } + + m.mx.Unlock() + + return +} + +// Unlock - unlocks mutex +func (m *PMutex) Unlock() { + + m.mx.Lock() + + if m.state == -1 { + m.state = 0 + m.chClose() + } else { + panic(fmt.Sprintf("PMutex: Unlock fail (%v)", m.state)) + } + m.mx.Unlock() +} + +// Reduce - lock mutex from Lock to RLock +func (m *PMutex) Reduce() { + + m.mx.Lock() + + if m.state == -1 { + m.state = 1 + m.chClose() + } else { + panic(fmt.Sprintf("PMutex: Reduce fail (%v)", m.state)) + } + m.mx.Unlock() +} + +// LockWithContext - try locks mutex with context +func (m *PMutex) LockWithContext(ctx context.Context) bool { + + m.mx.Lock() + + if m.state == 0 { + m.state = -1 + m.mx.Unlock() + return true + } + m.mx.Unlock() + + // Slow way + return m.lockST(ctx) +} + +// LockWithTimeout - try locks mutex with time duration +func (m *PMutex) LockWithTimeout(d time.Duration) bool { + m.mx.Lock() + + if m.state == 0 { + m.state = -1 + m.mx.Unlock() + return true + } + m.mx.Unlock() + + // Slow way + return m.lockSD(d) +} + +// RLock - read locks mutex +func (m *PMutex) RLock() { + m.mx.Lock() + + if m.state >= 0 { + m.state++ + m.mx.Unlock() + return + } + m.mx.Unlock() + + // Slow way + m.rlockS() +} + +// TryRLock - read locks mutex +func (m *PMutex) TryRLock() (ok bool) { + m.mx.Lock() + + if m.state >= 0 { + m.state++ + ok = true + } + m.mx.Unlock() + + return +} + +// RUnlock - unlocks mutex +func (m *PMutex) RUnlock() { + + m.mx.Lock() + + if m.state > 0 { + m.state-- + if m.state <= 1 { + m.chClose() + } + } else { + panic(fmt.Sprintf("PMutex: RUnlock fail (%v)", m.state)) + } + + m.mx.Unlock() +} + +// RLockWithContext - try read locks mutex with context +func (m *PMutex) RLockWithContext(ctx context.Context) bool { + m.mx.Lock() + + if m.state >= 0 { + m.state++ + m.mx.Unlock() + return true + } + m.mx.Unlock() + + // Slow way + return m.rlockST(ctx) +} + +// RLockWithTimeout - try read locks mutex with time duration +func (m *PMutex) RLockWithTimeout(d time.Duration) bool { + m.mx.Lock() + + if m.state >= 0 { + m.state++ + m.mx.Unlock() + return true + } + m.mx.Unlock() + + // Slow way + return m.rlockSD(d) +} + +func (m *PMutex) lockS() { + + ch := m.chGet() + for { + + m.mx.Lock() + if m.state == 0 { + m.state = -1 + m.mx.Unlock() + return + } + m.mx.Unlock() + + select { + case <-ch: + ch = m.chGet() + } + } +} + +func (m *PMutex) lockST(ctx context.Context) bool { + + ch := m.chGet() + for { + + m.mx.Lock() + if m.state == 0 { + m.state = -1 + m.mx.Unlock() + return true + } + m.mx.Unlock() + + if ctx == nil { + return false + } + + select { + case <-ch: + ch = m.chGet() + case <-ctx.Done(): + return false + } + } +} + +func (m *PMutex) lockSD(d time.Duration) bool { + // may be use context.WithTimeout(context.Background(), d) however NO it's not fun + t := time.After(d) + + ch := m.chGet() + for { + + m.mx.Lock() + if m.state == 0 { + m.state = -1 + m.mx.Unlock() + return true + } + m.mx.Unlock() + + select { + case <-ch: + ch = m.chGet() + case <-t: + return false + } + + } +} + +func (m *PMutex) rlockS() { + + ch := m.chGet() + for { + + m.mx.Lock() + if m.state >= 0 { + m.state++ + m.mx.Unlock() + return + } + m.mx.Unlock() + + select { + case <-ch: + ch = m.chGet() + } + + } +} + +func (m *PMutex) rlockST(ctx context.Context) bool { + + ch := m.chGet() + for { + + m.mx.Lock() + if m.state >= 0 { + m.state++ + m.mx.Unlock() + return true + } + m.mx.Unlock() + + if ctx == nil { + return false + } + + select { + case <-ch: + ch = m.chGet() + case <-ctx.Done(): + return false + } + + } +} + +func (m *PMutex) rlockSD(d time.Duration) bool { + + t := time.After(d) + + ch := m.chGet() + for { + m.mx.Lock() + if m.state >= 0 { + m.state++ + m.mx.Unlock() + return true + } + m.mx.Unlock() + + select { + case <-ch: + ch = m.chGet() + case <-t: + return false + } + + } +} + +// Promote - lock mutex from RLock to Lock +// !!! use carefully - can produce deadlock, if promote from two grouroutines +func (m *PMutex) Promote() { + m.mx.Lock() + + if m.state == 1 { + m.state = -1 + m.mx.Unlock() + return + } + m.mx.Unlock() + + // Slow way + m.promoteS() +} + +// TryPromote - lock mutex from RLock to Lock +func (m *PMutex) TryPromote() (ok bool) { + m.mx.Lock() + + if m.state == 1 { + m.state = -1 + ok = true + } + m.mx.Unlock() + + return +} + +// PromoteWithContext - try locks mutex from RLock to Lock with context +// !!! If returns false then mutex is UNLOCKED if true mutex is locked as Lock +func (m *PMutex) PromoteWithContext(ctx context.Context) bool { + m.mx.Lock() + + if m.state == 1 { + m.state = -1 + m.mx.Unlock() + return true + } + m.mx.Unlock() + + // Slow way + return m.promoteST(ctx) +} + +// PromoteWithTimeout - try locks mutex from RLock to Lock with time duration +// !!! If returns false then mutex is UNLOCKED if true mutex is locked as Lock +func (m *PMutex) PromoteWithTimeout(d time.Duration) bool { + m.mx.Lock() + + if m.state == 1 { + m.state = -1 + m.mx.Unlock() + return true + } + m.mx.Unlock() + + // Slow way + return m.promoteSD(d) +} + +func (m *PMutex) promoteS() { + + ch := m.chGet() + for { + m.mx.Lock() + if m.state == 1 { + m.state = -1 + m.mx.Unlock() + return + } + m.mx.Unlock() + + select { + case <-ch: + ch = m.chGet() + } + } + +} + +func (m *PMutex) promoteST(ctx context.Context) bool { + + ch := m.chGet() + for { + + m.mx.Lock() + if m.state == 1 { + m.state = -1 + m.mx.Unlock() + return true + } + m.mx.Unlock() + + if ctx == nil { + return false + } + + select { + case <-ch: + ch = m.chGet() + case <-ctx.Done(): + m.RUnlock() + return false + } + + } + +} + +func (m *PMutex) promoteSD(d time.Duration) bool { + + t := time.After(d) + + ch := m.chGet() + for { + + m.mx.Lock() + if m.state == 1 { + m.state = -1 + m.mx.Unlock() + return true + + } + m.mx.Unlock() + + select { + case <-ch: + ch = m.chGet() + case <-t: + m.RUnlock() + return false + } + + } +} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/rwmutex.go b/vendor/github.com/msaf1980/go-syncutils/lock/rwmutex.go new file mode 100644 index 000000000..7f0c89ed1 --- /dev/null +++ b/vendor/github.com/msaf1980/go-syncutils/lock/rwmutex.go @@ -0,0 +1,293 @@ +package lock + +import ( + "context" + "sync" + "sync/atomic" + "time" +) + +// RWMutex - Read Write and Try Mutex with change priority (Promote and Reduce) +type RWMutex struct { + state int32 + mx sync.Mutex + ch chan struct{} +} + +func (m *RWMutex) chGet() chan struct{} { + m.mx.Lock() + if m.ch == nil { + m.ch = make(chan struct{}, 1) + } + r := m.ch + m.mx.Unlock() + return r +} + +func (m *RWMutex) tryChGet() (chan struct{}, bool) { + + if !m.mx.TryLock() { + return nil, false + } + if m.ch == nil { + m.ch = make(chan struct{}, 1) + } + r := m.ch + m.mx.Unlock() + + return r, true + +} + +func (m *RWMutex) chClose() { + // it's need only when exists parallel + // to make faster need add counter to add drop listners of chan + + var o chan struct{} + m.mx.Lock() + if m.ch != nil { + o = m.ch + m.ch = nil + } + m.mx.Unlock() + + if o != nil { + close(o) + } + +} + +// Lock - locks mutex +func (m *RWMutex) Lock() { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + + return + } + + // Slow way + m.lockS() +} + +// TryLock - try locks mutex +func (m *RWMutex) TryLock() bool { + return atomic.CompareAndSwapInt32(&m.state, 0, -1) +} + +// Unlock - unlocks mutex +func (m *RWMutex) Unlock() { + if atomic.CompareAndSwapInt32(&m.state, -1, 0) { + m.chClose() + return + } + + panic("RWMutex: Unlock fail") +} + +// LockWithContext - try locks mutex with context +func (m *RWMutex) LockWithContext(ctx context.Context) bool { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + return true + } + + // Slow way + return m.lockST(ctx) +} + +// LockD - try locks mutex with time duration +func (m *RWMutex) LockWithTimeout(d time.Duration) bool { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + return true + } + + // Slow way + return m.lockSD(d) +} + +// RLock - read locks mutex +func (m *RWMutex) RLock() { + k := atomic.LoadInt32(&m.state) + if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { + return + } + + // Slow way + m.rlockS() +} + +// TryRLock - try read locks mutex +func (m *RWMutex) TryRLock() bool { + k := atomic.LoadInt32(&m.state) + if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { + return true + } else if k == -1 { + return false + } + + // Slow way + if m.mx.TryLock() { + k := atomic.LoadInt32(&m.state) + if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { + m.mx.Unlock() + return true + } else if k == -1 { + m.mx.Unlock() + return false + } + } + + return false +} + +// RUnlock - unlocks mutex +func (m *RWMutex) RUnlock() { + i := atomic.AddInt32(&m.state, -1) + if i > 0 { + return + } else if i == 0 { + m.chClose() + return + } + + panic("RWMutex: RUnlock fail") +} + +// RLockWithContext - try read locks mutex with context +func (m *RWMutex) RLockWithContext(ctx context.Context) bool { + k := atomic.LoadInt32(&m.state) + if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { + return true + } + + // Slow way + return m.rlockST(ctx) +} + +// RLockWithDuration - try read locks mutex with time duration +func (m *RWMutex) RLockWithTimeout(d time.Duration) bool { + k := atomic.LoadInt32(&m.state) + if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { + return true + } + + // Slow way + return m.rlockSD(d) +} + +func (m *RWMutex) lockS() { + ch := m.chGet() + for { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + + return + } + + select { + case <-ch: + ch = m.chGet() + } + } + +} + +func (m *RWMutex) lockST(ctx context.Context) bool { + ch := m.chGet() + for { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + + return true + } + + if ctx == nil { + return false + } + + select { + case <-ch: + ch = m.chGet() + case <-ctx.Done(): + return false + } + + } +} + +func (m *RWMutex) lockSD(d time.Duration) bool { + // may be use context.WithTimeout(context.Background(), d) however NO it's not fun + t := time.After(d) + ch := m.chGet() + for { + if atomic.CompareAndSwapInt32(&m.state, 0, -1) { + + return true + } + + select { + case <-ch: + ch = m.chGet() + case <-t: + return false + } + + } +} + +func (m *RWMutex) rlockS() { + + ch := m.chGet() + var k int32 + for { + k = atomic.LoadInt32(&m.state) + if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { + return + } + + select { + case <-ch: + ch = m.chGet() + } + + } + +} + +func (m *RWMutex) rlockST(ctx context.Context) bool { + ch := m.chGet() + var k int32 + for { + k = atomic.LoadInt32(&m.state) + if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { + return true + } + + if ctx == nil { + return false + } + + select { + case <-ch: + ch = m.chGet() + case <-ctx.Done(): + return false + } + + } +} + +func (m *RWMutex) rlockSD(d time.Duration) bool { + ch := m.chGet() + t := time.After(d) + var k int32 + for { + k = atomic.LoadInt32(&m.state) + if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { + return true + } + + select { + case <-ch: + ch = m.chGet() + case <-t: + return false + } + } +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index fa1245b18..2924cf3a1 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,7 +8,6 @@ import ( "fmt" "math" "os" - "path/filepath" "reflect" "regexp" "runtime" @@ -141,12 +140,11 @@ func CallerInfo() []string { } parts := strings.Split(file, "/") - file = parts[len(parts)-1] if len(parts) > 1 { + filename := parts[len(parts)-1] dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - path, _ := filepath.Abs(file) - callers = append(callers, fmt.Sprintf("%s:%d", path, line)) + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) } } @@ -530,7 +528,7 @@ func isNil(object interface{}) bool { []reflect.Kind{ reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, kind) if isNilableKind && value.IsNil() { @@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + if !av.IsValid() { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) } } return true } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) } } @@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { + if !av.IsValid() { + return true + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { return true } } @@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index f0af8246c..e6ff8dfeb 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -218,16 +218,22 @@ func (c *Call) Unset() *Call { foundMatchingCall := false - for i, call := range c.Parent.ExpectedCalls { + // in-place filter slice for calls to be removed - iterate from 0'th to last skipping unnecessary ones + var index int // write index + for _, call := range c.Parent.ExpectedCalls { if call.Method == c.Method { _, diffCount := call.Arguments.Diff(c.Arguments) if diffCount == 0 { foundMatchingCall = true - // Remove from ExpectedCalls - c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...) + // Remove from ExpectedCalls - just skip it + continue } } + c.Parent.ExpectedCalls[index] = call + index++ } + // trim slice up to last copied index + c.Parent.ExpectedCalls = c.Parent.ExpectedCalls[:index] if !foundMatchingCall { unlockOnce.Do(c.unlock) diff --git a/vendor/modules.txt b/vendor/modules.txt index 4406b7b9e..c1f7e632c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -218,6 +218,10 @@ github.com/msaf1980/go-metrics/graphite # github.com/msaf1980/go-stringutils v0.1.4 ## explicit; go 1.16 github.com/msaf1980/go-stringutils +# github.com/msaf1980/go-syncutils v0.0.3 +## explicit; go 1.18 +github.com/msaf1980/go-syncutils/atomic +github.com/msaf1980/go-syncutils/lock # github.com/msaf1980/go-timeutils v0.0.3 ## explicit; go 1.19 github.com/msaf1980/go-timeutils/duration @@ -331,7 +335,7 @@ github.com/shurcooL/httpfs/union # github.com/stretchr/objx v0.5.0 ## explicit; go 1.12 github.com/stretchr/objx -# github.com/stretchr/testify v1.8.1 +# github.com/stretchr/testify v1.8.2 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/mock From 86893c76064d8c38a60be465abeedd323f96d536 Mon Sep 17 00:00:00 2001 From: Michail Safronov Date: Tue, 8 Aug 2023 02:00:25 +0500 Subject: [PATCH 2/5] feat(limiter): adaptive limiter (based on load average) --- .github/workflows/tests-sd.yml | 37 ++ config/config.go | 199 +++++-- config/config_test.go | 357 ++++++++++++- deploy/doc/config.md | 8 +- doc/config.md | 36 +- doc/config.md.orig | 399 ++++++++++++++ go.mod | 2 +- go.sum | 2 - graphite-clickhouse.go | 37 +- limiter/alimiter.go | 162 ++++++ limiter/alimiter_test.go | 188 +++++++ limiter/wlimiter.go | 17 +- load_avg/load_avg.go | 33 ++ load_avg/load_avg_default.go | 20 + load_avg/load_avg_linux.go | 41 ++ load_avg/load_avg_test.go | 50 ++ sd/nginx/nginx.go | 344 ++++++++++++ sd/nginx/nginx_test.go | 289 +++++++++++ sd/register.go | 97 ++++ sd/utils/utils.go | 99 ++++ tests/consul.sh | 8 + tests/limitera/carbon-clickhouse.conf.tpl | 45 ++ ...e-clickhouse-internal-aggr-cached.conf.tpl | 50 ++ tests/limitera/test.toml | 59 +++ tests/limitermax/carbon-clickhouse.conf.tpl | 45 ++ ...e-clickhouse-internal-aggr-cached.conf.tpl | 47 ++ tests/limitermax/test.toml | 59 +++ tests/limiterw/carbon-clickhouse.conf.tpl | 45 ++ ...e-clickhouse-internal-aggr-cached.conf.tpl | 47 ++ tests/limiterw/test.toml | 59 +++ tests/limiterwn/carbon-clickhouse.conf.tpl | 45 ++ ...e-clickhouse-internal-aggr-cached.conf.tpl | 50 ++ tests/limiterwn/test.toml | 59 +++ .../msaf1980/go-syncutils/lock/chanmutex.go | 67 --- .../msaf1980/go-syncutils/lock/condchan.go | 165 ------ .../msaf1980/go-syncutils/lock/mutex.go | 165 ------ .../msaf1980/go-syncutils/lock/nocopy.go | 14 - .../msaf1980/go-syncutils/lock/pmutex.go | 491 ------------------ .../msaf1980/go-syncutils/lock/rwmutex.go | 293 ----------- vendor/modules.txt | 9 - 40 files changed, 2946 insertions(+), 1293 deletions(-) create mode 100644 .github/workflows/tests-sd.yml create mode 100644 doc/config.md.orig create mode 100644 limiter/alimiter.go create mode 100644 limiter/alimiter_test.go create mode 100644 load_avg/load_avg.go create mode 100644 load_avg/load_avg_default.go create mode 100644 load_avg/load_avg_linux.go create mode 100644 load_avg/load_avg_test.go create mode 100644 sd/nginx/nginx.go create mode 100644 sd/nginx/nginx_test.go create mode 100644 sd/register.go create mode 100644 sd/utils/utils.go create mode 100755 tests/consul.sh create mode 100644 tests/limitera/carbon-clickhouse.conf.tpl create mode 100644 tests/limitera/graphite-clickhouse-internal-aggr-cached.conf.tpl create mode 100644 tests/limitera/test.toml create mode 100644 tests/limitermax/carbon-clickhouse.conf.tpl create mode 100644 tests/limitermax/graphite-clickhouse-internal-aggr-cached.conf.tpl create mode 100644 tests/limitermax/test.toml create mode 100644 tests/limiterw/carbon-clickhouse.conf.tpl create mode 100644 tests/limiterw/graphite-clickhouse-internal-aggr-cached.conf.tpl create mode 100644 tests/limiterw/test.toml create mode 100644 tests/limiterwn/carbon-clickhouse.conf.tpl create mode 100644 tests/limiterwn/graphite-clickhouse-internal-aggr-cached.conf.tpl create mode 100644 tests/limiterwn/test.toml delete mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/chanmutex.go delete mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/condchan.go delete mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/mutex.go delete mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/nocopy.go delete mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/pmutex.go delete mode 100644 vendor/github.com/msaf1980/go-syncutils/lock/rwmutex.go diff --git a/.github/workflows/tests-sd.yml b/.github/workflows/tests-sd.yml new file mode 100644 index 000000000..ee98a71e1 --- /dev/null +++ b/.github/workflows/tests-sd.yml @@ -0,0 +1,37 @@ +name: Tests register in SD + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + + tests: + env: + CGO_ENABLED: 0 + name: Test register in SD + runs-on: ubuntu-latest + strategy: + matrix: + go: + - ^1 + steps: + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + + - name: Check out code + uses: actions/checkout@v2 + + - name: Start consul + run: | + ./tests/consul.sh 1.15.2 > /tmp/consul.log & + sleep 30 + shell: bash + + - name: Test + run: go test ./sd/nginx -tags=test_sd -v diff --git a/config/config.go b/config/config.go index 7c13764fa..1742e3177 100644 --- a/config/config.go +++ b/config/config.go @@ -31,6 +31,51 @@ import ( "github.com/lomik/graphite-clickhouse/metrics" ) +type SDType uint8 + +const ( + SDNone SDType = iota + SDNginx // https://github.com/weibocom/nginx-upsync-module +) + +var sdTypeStrings map[SDType]string = map[SDType]string{SDNone: "", SDNginx: "nginx"} + +func (a *SDType) Set(value string) error { + switch value { + case "nginx": + *a = SDNginx + case "", "0": + *a = SDNone + default: + return fmt.Errorf("invalid sd type %q", value) + } + return nil +} + +func (a *SDType) UnmarshalText(data []byte) error { + return a.Set(string(data)) +} + +func (a *SDType) MarshalText() ([]byte, error) { + return []byte(a.String()), nil +} + +func (a *SDType) UnmarshalJSON(data []byte) error { + return a.Set(string(data)) +} + +func (a *SDType) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} + +func (a *SDType) String() string { + return sdTypeStrings[*a] +} + +func (a *SDType) Type() string { + return "service_discovery_type" +} + // Cache config type CacheConfig struct { Type string `toml:"type" json:"type" comment:"cache type"` @@ -57,7 +102,14 @@ type Common struct { Blacklist []*regexp.Regexp `toml:"-" json:"-"` // compiled TargetBlacklist MemoryReturnInterval time.Duration `toml:"memory-return-interval" json:"memory-return-interval" comment:"daemon will return the freed memory to the OS when it>0"` HeadersToLog []string `toml:"headers-to-log" json:"headers-to-log" comment:"additional request headers to log"` - FindCacheConfig CacheConfig `toml:"find-cache" json:"find-cache" comment:"find/tags cache config"` + + BaseWeight int `toml:"base_weight" json:"base_weight" comment:"service discovery base weight (on idle)"` + SDType SDType `toml:"service-discovery-type" json:"service-discovery-type" comment:"service discovery type"` + SD string `toml:"service-discovery" json:"service-discovery" comment:"service discovery address (consul)"` + SDNamespace string `toml:"service-discovery-ns" json:"service-discovery-ns" comment:"service discovery namespace (graphite by default)"` + SDDc []string `toml:"service-discovery-ds" json:"service-discovery-ds" comment:"service discovery datacenters (first - is primary, in other register as backup)"` + + FindCacheConfig CacheConfig `toml:"find-cache" json:"find-cache" comment:"find/tags cache config"` FindCache cache.BytesCache `toml:"-" json:"-"` } @@ -96,8 +148,9 @@ var IndexReverse = map[string]uint8{ var IndexReverseNames = []string{"auto", "direct", "reversed"} type UserLimits struct { - MaxQueries int `toml:"max-queries" json:"max-queries" comment:"Max queries to fetch data"` - MaxConcurrent int `toml:"max-concurrent" json:"max-concurrent" comment:"Maximum concurrent queries to fetch data"` + MaxQueries int `toml:"max-queries" json:"max-queries" comment:"Max queries to fetch data"` + ConcurrentQueries int `toml:"concurrent-queries" json:"concurrent-queries" comment:"Concurrent queries to fetch data"` + AdaptiveQueries int `toml:"adaptive-queries" json:"adaptive-queries" comment:"Adaptive queries (based on load average) for increase/decrease concurrent queries"` Limiter limiter.ServerLimiter `toml:"-" json:"-"` } @@ -107,8 +160,9 @@ type QueryParam struct { URL string `toml:"url" json:"url" comment:"url for queries with durations greater or equal than"` DataTimeout time.Duration `toml:"data-timeout" json:"data-timeout" comment:"total timeout to fetch data"` - MaxQueries int `toml:"max-queries" json:"max-queries" comment:"Max queries to fetch data"` - MaxConcurrent int `toml:"max-concurrent" json:"max-concurrent" comment:"Maximum concurrent queries to fetch data"` + MaxQueries int `toml:"max-queries" json:"max-queries" comment:"Max queries to fetch data"` + ConcurrentQueries int `toml:"concurrent-queries" json:"concurrent-queries" comment:"Concurrent queries to fetch data"` + AdaptiveQueries int `toml:"adaptive-queries" json:"adaptive-queries" comment:"Adaptive queries (based on load average) for increase/decrease concurrent queries"` Limiter limiter.ServerLimiter `toml:"-" json:"-"` } @@ -139,38 +193,46 @@ func binarySearchQueryParamLe(a []QueryParam, duration time.Duration, start, end // ClickHouse config type ClickHouse struct { - URL string `toml:"url" json:"url" comment:"default url, see https://clickhouse.tech/docs/en/interfaces/http. Can be overwritten with query-params"` - DataTimeout time.Duration `toml:"data-timeout" json:"data-timeout" comment:"default total timeout to fetch data, can be overwritten with query-params"` - RenderMaxQueries int `toml:"render-max-queries" json:"render-max-queries" comment:"Max queries to render queiries"` - RenderMaxConcurrent int `toml:"render-max-concurrent" json:"render-max-concurrent" comment:"Maximum concurrent queries to render queiries"` - QueryParams []QueryParam `toml:"query-params" json:"query-params" comment:"customized query params (url, data timeout, limiters) for durations greater or equal"` - FindMaxQueries int `toml:"find-max-queries" json:"find-max-queries" comment:"Max queries for find queries"` - FindMaxConcurrent int `toml:"find-max-concurrent" json:"find-max-concurrent" comment:"Maximum concurrent queries for find queries"` + URL string `toml:"url" json:"url" comment:"default url, see https://clickhouse.tech/docs/en/interfaces/http. Can be overwritten with query-params"` + DataTimeout time.Duration `toml:"data-timeout" json:"data-timeout" comment:"default total timeout to fetch data, can be overwritten with query-params"` + QueryParams []QueryParam `toml:"query-params" json:"query-params" comment:"customized query params (url, data timeout, limiters) for durations greater or equal"` + + RenderMaxQueries int `toml:"render-max-queries" json:"render-max-queries" comment:"Max queries to render queiries"` + RenderConcurrentQueries int `toml:"render-concurrent-queries" json:"render-concurrent-queries" comment:"Concurrent queries to render queiries"` + RenderAdaptiveQueries int `toml:"render-adaptive-queries" json:"render-adaptive-queries" comment:"Render adaptive queries (based on load average) for increase/decrease concurrent queries"` + + FindMaxQueries int `toml:"find-max-queries" json:"find-max-queries" comment:"Max queries for find queries"` + FindConcurrentQueries int `toml:"find-concurrent-queries" json:"find-concurrent-queries" comment:"Find concurrent queries for find queries"` + FindAdaptiveQueries int `toml:"find-adaptive-queries" json:"find-adaptive-queries" comment:"Find adaptive queries (based on load average) for increase/decrease concurrent queries"` FindLimiter limiter.ServerLimiter `toml:"-" json:"-"` - TagsMaxQueries int `toml:"tags-max-queries" json:"tags-max-queries" comment:"Max queries for tags queries"` - TagsMaxConcurrent int `toml:"tags-max-concurrent" json:"tags-max-concurrent" comment:"Maximum concurrent queries for tags queries"` - TagsMinInQuery int `toml:"tags-min-in-query" json:"tags-min-in-query" comment:"Minimum tags in seriesByTag query"` - TagsMinInAutocomplete int `toml:"tags-min-in-autocomplete" json:"tags-min-in-autocomplete" comment:"Minimum tags in autocomplete query"` + + TagsMaxQueries int `toml:"tags-max-queries" json:"tags-max-queries" comment:"Max queries for tags queries"` + TagsConcurrentQueries int `toml:"tags-concurrent-queries" json:"tags-concurrent-queries" comment:"Concurrent queries for tags queries"` + TagsAdaptiveQueries int `toml:"tags-adaptive-queries" json:"tags-adaptive-queries" comment:"Tags adaptive queries (based on load average) for increase/decrease concurrent queries"` TagsLimiter limiter.ServerLimiter `toml:"-" json:"-"` - UserLimits map[string]UserLimits `toml:"user-limits" json:"user-limits" comment:"customized query limiter for some users" commented:"true"` - DateFormat string `toml:"date-format" json:"date-format" comment:"Date format (default, utc, both)"` - IndexTable string `toml:"index-table" json:"index-table" comment:"see doc/index-table.md"` - IndexUseDaily bool `toml:"index-use-daily" json:"index-use-daily"` - IndexReverse string `toml:"index-reverse" json:"index-reverse" comment:"see doc/config.md"` - IndexReverses IndexReverses `toml:"index-reverses" json:"index-reverses" comment:"see doc/config.md" commented:"true"` - IndexTimeout time.Duration `toml:"index-timeout" json:"index-timeout" comment:"total timeout to fetch series list from index"` - TaggedTable string `toml:"tagged-table" json:"tagged-table" comment:"'tagged' table from carbon-clickhouse, required for seriesByTag"` - TaggedAutocompleDays int `toml:"tagged-autocomplete-days" json:"tagged-autocomplete-days" comment:"or how long the daemon will query tags during autocomplete"` - TaggedUseDaily bool `toml:"tagged-use-daily" json:"tagged-use-daily" comment:"whether to use date filter when searching for the metrics in the tagged-table"` - TaggedCosts map[string]*Costs `toml:"tagged-costs" json:"tagged-costs" comment:"costs for tags (for tune which tag will be used as primary), by default is 0, increase for costly (with poor selectivity) tags" commented:"true"` - TreeTable string `toml:"tree-table" json:"tree-table" comment:"old index table, DEPRECATED, see description in doc/config.md" commented:"true"` - ReverseTreeTable string `toml:"reverse-tree-table" json:"reverse-tree-table" commented:"true"` - DateTreeTable string `toml:"date-tree-table" json:"date-tree-table" commented:"true"` - DateTreeTableVersion int `toml:"date-tree-table-version" json:"date-tree-table-version" commented:"true"` - TreeTimeout time.Duration `toml:"tree-timeout" json:"tree-timeout" commented:"true"` - TagTable string `toml:"tag-table" json:"tag-table" comment:"is not recommended to use, https://github.com/lomik/graphite-clickhouse/wiki/TagsRU" commented:"true"` - ExtraPrefix string `toml:"extra-prefix" json:"extra-prefix" comment:"add extra prefix (directory in graphite) for all metrics, w/o trailing dot"` - ConnectTimeout time.Duration `toml:"connect-timeout" json:"connect-timeout" comment:"TCP connection timeout"` + + TagsMinInQuery int `toml:"tags-min-in-query" json:"tags-min-in-query" comment:"Minimum tags in seriesByTag query"` + TagsMinInAutocomplete int `toml:"tags-min-in-autocomplete" json:"tags-min-in-autocomplete" comment:"Minimum tags in autocomplete query"` + + UserLimits map[string]UserLimits `toml:"user-limits" json:"user-limits" comment:"customized query limiter for some users" commented:"true"` + DateFormat string `toml:"date-format" json:"date-format" comment:"Date format (default, utc, both)"` + IndexTable string `toml:"index-table" json:"index-table" comment:"see doc/index-table.md"` + IndexUseDaily bool `toml:"index-use-daily" json:"index-use-daily"` + IndexReverse string `toml:"index-reverse" json:"index-reverse" comment:"see doc/config.md"` + IndexReverses IndexReverses `toml:"index-reverses" json:"index-reverses" comment:"see doc/config.md" commented:"true"` + IndexTimeout time.Duration `toml:"index-timeout" json:"index-timeout" comment:"total timeout to fetch series list from index"` + TaggedTable string `toml:"tagged-table" json:"tagged-table" comment:"'tagged' table from carbon-clickhouse, required for seriesByTag"` + TaggedAutocompleDays int `toml:"tagged-autocomplete-days" json:"tagged-autocomplete-days" comment:"or how long the daemon will query tags during autocomplete"` + TaggedUseDaily bool `toml:"tagged-use-daily" json:"tagged-use-daily" comment:"whether to use date filter when searching for the metrics in the tagged-table"` + TaggedCosts map[string]*Costs `toml:"tagged-costs" json:"tagged-costs" comment:"costs for tags (for tune which tag will be used as primary), by default is 0, increase for costly (with poor selectivity) tags" commented:"true"` + TreeTable string `toml:"tree-table" json:"tree-table" comment:"old index table, DEPRECATED, see description in doc/config.md" commented:"true"` + ReverseTreeTable string `toml:"reverse-tree-table" json:"reverse-tree-table" commented:"true"` + DateTreeTable string `toml:"date-tree-table" json:"date-tree-table" commented:"true"` + DateTreeTableVersion int `toml:"date-tree-table-version" json:"date-tree-table-version" commented:"true"` + TreeTimeout time.Duration `toml:"tree-timeout" json:"tree-timeout" commented:"true"` + TagTable string `toml:"tag-table" json:"tag-table" comment:"is not recommended to use, https://github.com/lomik/graphite-clickhouse/wiki/TagsRU" commented:"true"` + ExtraPrefix string `toml:"extra-prefix" json:"extra-prefix" comment:"add extra prefix (directory in graphite) for all metrics, w/o trailing dot"` + ConnectTimeout time.Duration `toml:"connect-timeout" json:"connect-timeout" comment:"TCP connection timeout"` // TODO: remove in v0.14 DataTableLegacy string `toml:"data-table" json:"data-table" comment:"will be removed in 0.14" commented:"true"` // TODO: remove in v0.14 @@ -465,8 +527,8 @@ func Unmarshal(body []byte) (cfg *Config, warns []zap.Field, err error) { cfg.Logging = make([]zapwriter.Config, 0) } - if cfg.ClickHouse.RenderMaxConcurrent > cfg.ClickHouse.RenderMaxQueries && cfg.ClickHouse.RenderMaxQueries > 0 { - cfg.ClickHouse.RenderMaxConcurrent = 0 + if cfg.ClickHouse.RenderConcurrentQueries > cfg.ClickHouse.RenderMaxQueries && cfg.ClickHouse.RenderMaxQueries > 0 { + cfg.ClickHouse.RenderConcurrentQueries = 0 } chURL, err := clickhouseURLValidate(cfg.ClickHouse.URL) if err != nil { @@ -486,8 +548,8 @@ func Unmarshal(body []byte) (cfg *Config, warns []zap.Field, err error) { warns = append(warns, zap.Strings("tls-config", warnings)) } for i := range cfg.ClickHouse.QueryParams { - if cfg.ClickHouse.QueryParams[i].MaxConcurrent > cfg.ClickHouse.QueryParams[i].MaxQueries && cfg.ClickHouse.QueryParams[i].MaxQueries > 0 { - cfg.ClickHouse.QueryParams[i].MaxConcurrent = 0 + if cfg.ClickHouse.QueryParams[i].ConcurrentQueries > cfg.ClickHouse.QueryParams[i].MaxQueries && cfg.ClickHouse.QueryParams[i].MaxQueries > 0 { + cfg.ClickHouse.QueryParams[i].ConcurrentQueries = 0 } if cfg.ClickHouse.QueryParams[i].Duration == 0 { @@ -508,7 +570,8 @@ func Unmarshal(body []byte) (cfg *Config, warns []zap.Field, err error) { cfg.ClickHouse.QueryParams = append( []QueryParam{{ URL: cfg.ClickHouse.URL, DataTimeout: cfg.ClickHouse.DataTimeout, - MaxQueries: cfg.ClickHouse.RenderMaxQueries, MaxConcurrent: cfg.ClickHouse.RenderMaxConcurrent, + MaxQueries: cfg.ClickHouse.RenderMaxQueries, ConcurrentQueries: cfg.ClickHouse.RenderConcurrentQueries, + AdaptiveQueries: cfg.ClickHouse.RenderAdaptiveQueries, }}, cfg.ClickHouse.QueryParams..., ) @@ -611,31 +674,71 @@ func Unmarshal(body []byte) (cfg *Config, warns []zap.Field, err error) { } } - if cfg.ClickHouse.FindMaxConcurrent > cfg.ClickHouse.FindMaxQueries && cfg.ClickHouse.FindMaxQueries > 0 { - cfg.ClickHouse.FindMaxConcurrent = 0 + if cfg.ClickHouse.FindConcurrentQueries > cfg.ClickHouse.FindMaxQueries && cfg.ClickHouse.FindMaxQueries > 0 { + cfg.ClickHouse.FindConcurrentQueries = 0 } - if cfg.ClickHouse.TagsMaxConcurrent > cfg.ClickHouse.TagsMaxQueries && cfg.ClickHouse.TagsMaxQueries > 0 { - cfg.ClickHouse.TagsMaxConcurrent = 0 + if cfg.ClickHouse.TagsConcurrentQueries > cfg.ClickHouse.TagsMaxQueries && cfg.ClickHouse.TagsMaxQueries > 0 { + cfg.ClickHouse.TagsConcurrentQueries = 0 } metricsEnabled := cfg.setupGraphiteMetrics() - cfg.ClickHouse.FindLimiter = limiter.NewWLimiter(cfg.ClickHouse.FindMaxQueries, cfg.ClickHouse.FindMaxConcurrent, metricsEnabled, "find", "all") + cfg.ClickHouse.FindLimiter = limiter.NewALimiter( + cfg.ClickHouse.FindMaxQueries, cfg.ClickHouse.FindConcurrentQueries, cfg.ClickHouse.FindAdaptiveQueries, + metricsEnabled, "find", "all", + ) - cfg.ClickHouse.TagsLimiter = limiter.NewWLimiter(cfg.ClickHouse.TagsMaxQueries, cfg.ClickHouse.TagsMaxConcurrent, metricsEnabled, "tags", "all") + cfg.ClickHouse.TagsLimiter = limiter.NewALimiter( + cfg.ClickHouse.TagsMaxQueries, cfg.ClickHouse.TagsConcurrentQueries, cfg.ClickHouse.TagsAdaptiveQueries, + metricsEnabled, "tags", "all", + ) for i := range cfg.ClickHouse.QueryParams { - cfg.ClickHouse.QueryParams[i].Limiter = limiter.NewWLimiter(cfg.ClickHouse.QueryParams[i].MaxQueries, cfg.ClickHouse.QueryParams[i].MaxConcurrent, metricsEnabled, "render", duration.String(cfg.ClickHouse.QueryParams[i].Duration)) + cfg.ClickHouse.QueryParams[i].Limiter = limiter.NewALimiter( + cfg.ClickHouse.QueryParams[i].MaxQueries, cfg.ClickHouse.QueryParams[i].ConcurrentQueries, + cfg.ClickHouse.QueryParams[i].AdaptiveQueries, + metricsEnabled, "render", duration.String(cfg.ClickHouse.QueryParams[i].Duration), + ) } for u, q := range cfg.ClickHouse.UserLimits { - q.Limiter = limiter.NewWLimiter(q.MaxQueries, q.MaxConcurrent, metricsEnabled, u, "all") + q.Limiter = limiter.NewALimiter( + q.MaxQueries, q.ConcurrentQueries, q.AdaptiveQueries, metricsEnabled, u, "all", + ) cfg.ClickHouse.UserLimits[u] = q } return cfg, warns, nil } +// NeedLoadAvgColect check if load avg collect is neeeded +func (c *Config) NeedLoadAvgColect() bool { + if c.Common.SD != "" { + if c.Common.BaseWeight <= 0 { + c.Common.BaseWeight = 100 + } + if c.Common.SDNamespace == "" { + c.Common.SDNamespace = "graphite" + } + return true + } + if c.ClickHouse.RenderAdaptiveQueries > 0 { + return true + } + if c.ClickHouse.FindAdaptiveQueries > 0 { + return true + } + if c.ClickHouse.TagsAdaptiveQueries > 0 { + return true + } + for _, u := range c.ClickHouse.UserLimits { + if u.AdaptiveQueries > 0 { + return true + } + } + return false +} + // ProcessDataTables checks if legacy `data`-table config is used, compiles regexps for `target-match-any` and `target-match-all` // parameters, sets the rollup configuration and proper context. func (c *Config) ProcessDataTables() (err error) { diff --git a/config/config_test.go b/config/config_test.go index 128a14eaa..3466d532b 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -472,11 +472,11 @@ tree-timeout = "5s" connect-timeout = "2s" render-max-queries = 1000 -render-max-concurrent = 10 +render-concurrent-queries = 10 find-max-queries = 200 -find-max-concurrent = 8 +find-concurrent-queries = 8 tags-max-queries = 50 -tags-max-concurrent = 4 +tags-concurrent-queries = 4 query-params = [ { @@ -488,7 +488,7 @@ query-params = [ user-limits = { "alert" = { max-queries = 200, - max-concurrent = 10 + concurrent-queries = 10 } } @@ -614,11 +614,11 @@ sample-thereafter = 12 DataTimeout: 64000000000, QueryParams: []QueryParam{ { - Duration: 0, - URL: "http://somehost:8123", - DataTimeout: 64000000000, - MaxQueries: 1000, - MaxConcurrent: 10, + Duration: 0, + URL: "http://somehost:8123", + DataTimeout: 64000000000, + MaxQueries: 1000, + ConcurrentQueries: 10, }, { Duration: 72 * time.Hour, @@ -627,16 +627,16 @@ sample-thereafter = 12 Limiter: limiter.NoopLimiter{}, }, }, - RenderMaxQueries: 1000, - RenderMaxConcurrent: 10, - FindMaxQueries: 200, - FindMaxConcurrent: 8, - TagsMaxQueries: 50, - TagsMaxConcurrent: 4, + RenderMaxQueries: 1000, + RenderConcurrentQueries: 10, + FindMaxQueries: 200, + FindConcurrentQueries: 8, + TagsMaxQueries: 50, + TagsConcurrentQueries: 4, UserLimits: map[string]UserLimits{ "alert": { - MaxQueries: 200, - MaxConcurrent: 10, + MaxQueries: 200, + ConcurrentQueries: 10, }, }, IndexTable: "graphite_index", @@ -662,18 +662,333 @@ sample-thereafter = 12 r, _ = regexp.Compile("^reg$") expected.ClickHouse.IndexReverses[1] = &IndexReverseRule{"", "", "^reg$", r, "reversed"} for i := range config.ClickHouse.QueryParams { - if _, ok := config.ClickHouse.QueryParams[i].Limiter.(*limiter.WLimiter); ok && config.ClickHouse.QueryParams[i].MaxQueries > 0 && config.ClickHouse.QueryParams[i].MaxConcurrent > 0 { + if _, ok := config.ClickHouse.QueryParams[i].Limiter.(*limiter.WLimiter); ok && config.ClickHouse.QueryParams[i].MaxQueries > 0 && config.ClickHouse.QueryParams[i].ConcurrentQueries > 0 { config.ClickHouse.QueryParams[i].Limiter = nil } } - if _, ok := config.ClickHouse.FindLimiter.(*limiter.WLimiter); ok && config.ClickHouse.FindMaxQueries > 0 && config.ClickHouse.FindMaxConcurrent > 0 { + if _, ok := config.ClickHouse.FindLimiter.(*limiter.WLimiter); ok && config.ClickHouse.FindMaxQueries > 0 && config.ClickHouse.FindConcurrentQueries > 0 { config.ClickHouse.FindLimiter = nil } - if _, ok := config.ClickHouse.TagsLimiter.(*limiter.WLimiter); ok && config.ClickHouse.TagsMaxQueries > 0 && config.ClickHouse.TagsMaxConcurrent > 0 { + if _, ok := config.ClickHouse.TagsLimiter.(*limiter.WLimiter); ok && config.ClickHouse.TagsMaxQueries > 0 && config.ClickHouse.TagsConcurrentQueries > 0 { config.ClickHouse.TagsLimiter = nil } for u, q := range config.ClickHouse.UserLimits { - if _, ok := q.Limiter.(*limiter.WLimiter); ok && q.MaxQueries > 0 && q.MaxConcurrent > 0 { + if _, ok := q.Limiter.(*limiter.WLimiter); ok && q.MaxQueries > 0 && q.ConcurrentQueries > 0 { + q.Limiter = nil + config.ClickHouse.UserLimits[u] = q + } + } + + assert.Equal(t, expected.ClickHouse, config.ClickHouse) + + // Tags + expected.Tags = Tags{"filename", "2012-12-12", "AND case", "input", "output"} + assert.Equal(t, expected.Tags, config.Tags) + + // Carbonlink + expected.Carbonlink = Carbonlink{"server:3333", 5, 2, 250000000, 350000000, 800000000} + assert.Equal(t, expected.Carbonlink, config.Carbonlink) + + // Prometheus + expected.Prometheus = Prometheus{":9092", "https://server:3456/uri", nil, "Prometheus Time Series", 5 * time.Minute} + u, _ := url.Parse(expected.Prometheus.ExternalURLRaw) + expected.Prometheus.ExternalURL = u + assert.Equal(t, expected.Prometheus, config.Prometheus) + + // Debug + expected.Debug = Debug{"tests_tmp", os.FileMode(0755), os.FileMode(0640)} + assert.Equal(t, expected.Debug, config.Debug) + assert.DirExists(t, "tests_tmp") + + // Logger + expected.Logging = make([]zapwriter.Config, 2) + expected.Logging[0] = zapwriter.Config{ + Logger: "debugger", + File: "stdout", + Level: "debug", + Encoding: "console", + EncodingTime: "iso8601", + EncodingDuration: "string", + SampleTick: "5ms", + SampleInitial: 1, + SampleThereafter: 2, + } + expected.Logging[1] = zapwriter.Config{ + Logger: "logger", + File: "tests_tmp/logger.txt", + Level: "info", + Encoding: "json", + EncodingTime: "epoch", + EncodingDuration: "seconds", + SampleTick: "50ms", + SampleInitial: 10, + SampleThereafter: 12, + } + assert.Equal(t, expected.Logging, config.Logging) + + metrics.FindRequestMetric = nil + metrics.TagsRequestMetric = nil + metrics.RenderRequestMetric = nil + metrics.UnregisterAll() +} + +func TestReadConfigGraphiteWithALimiter(t *testing.T) { + body := []byte( + `[common] +listen = "[::1]:9090" +pprof-listen = "127.0.0.1:9091" +max-cpu = 15 +max-metrics-in-find-answer = 13 +max-metrics-per-target = 16 +target-blacklist = ['^blacklisted'] +memory-return-interval = "12s150ms" + +[metrics] +metric-endpoint = "127.0.0.1:2003" +metric-interval = "10s" +metric-prefix = "graphite" +ranges = { "1h" = "1h", "3d" = "72h", "7d" = "168h", "30d" = "720h", "90d" = "2160h" } + +[clickhouse] +url = "http://somehost:8123" +index-table = "graphite_index" +index-use-daily = false +index-reverse = "direct" +index-reverses = [ + {suffix = "suf", prefix = "pref", reverse = "direct"}, + {regex = "^reg$", reverse = "reversed"}, +] +tagged-table = "graphite_tags" +tagged-autocomplete-days = 5 +tagged-use-daily = false +tree-table = "tree" +reverse-tree-table = "reversed_tree" +date-tree-table = "data_tree" +date-tree-table-version = 2 +tag-table = "tag_table" +extra-prefix = "tum.pu-dum" +data-table = "data" +rollup-conf = "none" +max-data-points = 8000 +internal-aggregation = true +data-timeout = "64s" +index-timeout = "4s" +tree-timeout = "5s" +connect-timeout = "2s" + +render-max-queries = 1000 +render-concurrent-queries = 10 +render-adaptive-queries = 4 +find-max-queries = 200 +find-concurrent-queries = 8 +tags-max-queries = 50 +tags-concurrent-queries = 4 +tags-adaptive-queries = 3 + +query-params = [ + { + duration = "72h", + url = "http://localhost:8123/?max_rows_to_read=20000", + concurrent-queries = 4, + adaptive-queries = 6 + } +] + +user-limits = { + "alert" = { + max-queries = 200, + concurrent-queries = 10, + adaptive-queries = 5 + } +} + +# DataTable is tested in TestProcessDataTables +# [[data-table]] +# table = "another_data" +# rollup-conf = "auto" +# rollup-conf-table = "another_table" + +[tags] +rules = "filename" +date = "2012-12-12" +extra-where = "AND case" +input-file = "input" +output-file = "output" + +[carbonlink] +server = "server:3333" +threads-per-request = 5 +connect-timeout = "250ms" +query-timeout = "350ms" +total-timeout = "800ms" + +[prometheus] +listen = ":9092" +external-url = "https://server:3456/uri" +page-title = "Prometheus Time Series" +lookback-delta = "5m" + +[debug] +directory = "tests_tmp" +directory-perm = 0o755 +external-data-perm = 0o640 + +[[logging]] +logger = "debugger" +file = "stdout" +level = "debug" +encoding = "console" +encoding-time = "iso8601" +encoding-duration = "string" +sample-tick = "5ms" +sample-initial = 1 +sample-thereafter = 2 + +[[logging]] +logger = "logger" +file = "tests_tmp/logger.txt" +level = "info" +encoding = "json" +encoding-time = "epoch" +encoding-duration = "seconds" +sample-tick = "50ms" +sample-initial = 10 +sample-thereafter = 12 +`, + ) + config, _, err := Unmarshal(body) + expected := New() + require.NoError(t, err) + assert.NotNil(t, metrics.Graphite) + metrics.Graphite = nil + + // Common + expected.Common = Common{ + Listen: "[::1]:9090", + PprofListen: "127.0.0.1:9091", + MaxCPU: 15, + MaxMetricsInFindAnswer: 13, + MaxMetricsPerTarget: 16, + TargetBlacklist: []string{"^blacklisted"}, + Blacklist: make([]*regexp.Regexp, 1), + MemoryReturnInterval: 12150000000, + FindCacheConfig: CacheConfig{ + Type: "null", + DefaultTimeoutSec: 0, + ShortTimeoutSec: 0, + }, + } + expected.Metrics = metrics.Config{ + MetricEndpoint: "127.0.0.1:2003", + MetricInterval: 10 * time.Second, + MetricTimeout: time.Second, + MetricPrefix: "graphite", + BucketsWidth: []int64{200, 500, 1000, 2000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000, 40000, 50000, 60000}, + BucketsLabels: []string{ + "_to_200ms", + "_to_500ms", + "_to_1000ms", + "_to_2000ms", + "_to_3000ms", + "_to_5000ms", + "_to_7000ms", + "_to_10000ms", + "_to_15000ms", + "_to_20000ms", + "_to_25000ms", + "_to_30000ms", + "_to_40000ms", + "_to_50000ms", + "_to_60000ms", + "_to_inf", + }, + // until-from = { "1h" = "1h", "3d" = "72h", "7d" = "168h", "30d" = "720h", "90d" = "2160h" } + Ranges: map[string]time.Duration{ + "1h": time.Hour, + "3d": 72 * time.Hour, + "7d": 168 * time.Hour, + "30d": 720 * time.Hour, + "90d": 2160 * time.Hour, + }, + RangeNames: []string{"1h", "3d", "7d", "30d", "90d", "history"}, + RangeS: []int64{3600, 259200, 604800, 2592000, 7776000, math.MaxInt64}, + } + r, _ := regexp.Compile(expected.Common.TargetBlacklist[0]) + expected.Common.Blacklist[0] = r + assert.Equal(t, expected.Common, config.Common) + assert.Equal(t, expected.Metrics, config.Metrics) + + // ClickHouse + expected.ClickHouse = ClickHouse{ + URL: "http://somehost:8123", + DataTimeout: 64000000000, + QueryParams: []QueryParam{ + { + Duration: 0, + URL: "http://somehost:8123", + DataTimeout: 64000000000, + MaxQueries: 1000, + ConcurrentQueries: 10, + AdaptiveQueries: 4, + }, + { + Duration: 72 * time.Hour, + URL: "http://localhost:8123/?max_rows_to_read=20000", + DataTimeout: 64000000000, + ConcurrentQueries: 4, + AdaptiveQueries: 6, + }, + }, + RenderMaxQueries: 1000, + RenderConcurrentQueries: 10, + RenderAdaptiveQueries: 4, + FindMaxQueries: 200, + FindConcurrentQueries: 8, + TagsMaxQueries: 50, + TagsConcurrentQueries: 4, + TagsAdaptiveQueries: 3, + UserLimits: map[string]UserLimits{ + "alert": { + MaxQueries: 200, + ConcurrentQueries: 10, + AdaptiveQueries: 5, + }, + }, + IndexTable: "graphite_index", + IndexReverse: "direct", + IndexReverses: make(IndexReverses, 2), + IndexTimeout: 4000000000, + TaggedTable: "graphite_tags", + TaggedAutocompleDays: 5, + TreeTable: "tree", + ReverseTreeTable: "reversed_tree", + DateTreeTable: "data_tree", + DateTreeTableVersion: 2, + TreeTimeout: 5000000000, + TagTable: "tag_table", + ExtraPrefix: "tum.pu-dum", + ConnectTimeout: 2000000000, + DataTableLegacy: "data", + RollupConfLegacy: "none", + MaxDataPoints: 8000, + InternalAggregation: true, + } + expected.ClickHouse.IndexReverses[0] = &IndexReverseRule{"suf", "pref", "", nil, "direct"} + r, _ = regexp.Compile("^reg$") + expected.ClickHouse.IndexReverses[1] = &IndexReverseRule{"", "", "^reg$", r, "reversed"} + for i := range config.ClickHouse.QueryParams { + if _, ok := config.ClickHouse.QueryParams[i].Limiter.(*limiter.ALimiter); ok { + config.ClickHouse.QueryParams[i].Limiter = nil + } + } + if _, ok := config.ClickHouse.FindLimiter.(*limiter.WLimiter); ok { + config.ClickHouse.FindLimiter = nil + } + if _, ok := config.ClickHouse.TagsLimiter.(*limiter.ALimiter); ok { + config.ClickHouse.TagsLimiter = nil + } + for u, q := range config.ClickHouse.UserLimits { + if _, ok := q.Limiter.(*limiter.ALimiter); ok { q.Limiter = nil config.ClickHouse.UserLimits[u] = q } diff --git a/deploy/doc/config.md b/deploy/doc/config.md index 2811d2118..8c0f6a744 100644 --- a/deploy/doc/config.md +++ b/deploy/doc/config.md @@ -71,15 +71,17 @@ query-params = [ ### Query limiter for prevent database overloading (limit concurrent/maximum incomming requests) For prevent database overloading incomming requests (render/find/autocomplete) can be limited. -If executing max-concurrent requests, next request will be wait for free slot until index-timeout reached If wait max-queries requests, for new request error returned immediately. - +If executing concurrent-queries requests, next request will be wait for free slot until index-timeout reached +adaptive-queries prevent overload with load average check if graphite-clickhouse run on one host with clickhouse +Real queries will be concurrent-queries + adaptive-queries * (1 / normalized_load_avg - 1). +If normalized_load_avg > 0.9, limit will be concurrent-queries. ``` url = "http://graphite:qwerty@localhost:8123/?readonly=2&log_queries=1&max_rows_to_read=102400000&max_result_bytes=12800000&max_threads=2" render-max-queries = 500 render-max-concurrent = 10 find-max-queries = 100 -find-max-concurrent = 10 +find-concurrent-queries = 10 tags-max-queries = 100 tags-max-concurrent = 10 diff --git a/doc/config.md b/doc/config.md index 46afcef12..372559a27 100644 --- a/doc/config.md +++ b/doc/config.md @@ -74,15 +74,17 @@ query-params = [ ### Query limiter for prevent database overloading (limit concurrent/maximum incomming requests) For prevent database overloading incomming requests (render/find/autocomplete) can be limited. -If executing max-concurrent requests, next request will be wait for free slot until index-timeout reached If wait max-queries requests, for new request error returned immediately. - +If executing concurrent-queries requests, next request will be wait for free slot until index-timeout reached +adaptive-queries prevent overload with load average check if graphite-clickhouse run on one host with clickhouse +Real queries will be concurrent-queries + adaptive-queries * (1 / normalized_load_avg - 1). +If normalized_load_avg > 0.9, limit will be concurrent-queries. ``` url = "http://graphite:qwerty@localhost:8123/?readonly=2&log_queries=1&max_rows_to_read=102400000&max_result_bytes=12800000&max_threads=2" render-max-queries = 500 render-max-concurrent = 10 find-max-queries = 100 -find-max-concurrent = 10 +find-concurrent-queries = 10 tags-max-queries = 100 tags-max-concurrent = 10 @@ -190,6 +192,16 @@ Only one tag used as filter for index field Tag1, see graphite_tagged table [str memory-return-interval = "0s" # additional request headers to log headers-to-log = [] + # service discovery base weight (on idle) + base_weight = 0 + # service discovery type + service-discovery-type = 0 + # service discovery address (consul) + service-discovery = "" + # service discovery namespace (graphite by default) + service-discovery-ns = "" + # service discovery datacenters (first - is primary, in other register as backup) + service-discovery-ds = [] # find/tags cache config [common.find-cache] @@ -241,16 +253,22 @@ Only one tag used as filter for index field Tag1, see graphite_tagged table [str data-timeout = "1m0s" # Max queries to render queiries render-max-queries = 0 - # Maximum concurrent queries to render queiries - render-max-concurrent = 0 + # Concurrent queries to render queiries + render-concurrent-queries = 0 + # Render adaptive queries (based on load average) for increase/decrease concurrent queries + render-adaptive-queries = 0 # Max queries for find queries find-max-queries = 0 - # Maximum concurrent queries for find queries - find-max-concurrent = 0 + # Find concurrent queries for find queries + find-concurrent-queries = 0 + # Find adaptive queries (based on load average) for increase/decrease concurrent queries + find-adaptive-queries = 0 # Max queries for tags queries tags-max-queries = 0 - # Maximum concurrent queries for tags queries - tags-max-concurrent = 0 + # Concurrent queries for tags queries + tags-concurrent-queries = 0 + # Tags adaptive queries (based on load average) for increase/decrease concurrent queries + tags-adaptive-queries = 0 # Minimum tags in seriesByTag query tags-min-in-query = 0 # Minimum tags in autocomplete query diff --git a/doc/config.md.orig b/doc/config.md.orig new file mode 100644 index 000000000..29a054ffc --- /dev/null +++ b/doc/config.md.orig @@ -0,0 +1,399 @@ +[//]: # (This file is built out of deploy/doc/config.md, please do not edit it manually) +[//]: # (To rebuild it run `make config`) + +# Configuration + +## Common `[common]` + +### Finder cache + +Specify what storage to use for finder cache. This cache stores finder results (metrics find/tags autocomplete/render). + +Supported cache types: + - `mem` - will use integrated in-memory cache. Not distributed. Fast. + - `memcache` - will use specified memcache servers. Could be shared. Slow. + - `null` - disable cache + +Extra options: + - `size_mb` - specify max size of cache, in MiB + - `defaultTimeoutSec` - specify default cache ttl. + - `shortTimeoutSec` - cache ttl for short duration intervals of render queries (duration <= shortDuration && now-until <= 61) (if 0, disable this cache) + - `findTimeoutSec` - cache ttl for finder/tags autocompleter queries (if 0, disable this cache) + - `shortDuration` - maximum duration for render queries, which use shortTimeoutSec duration + +### Example +```yaml +[common.find-cache] +type = "memcache" +size_mb = 0 +memcachedServers = [ "127.0.0.1:1234", "127.0.0.2:1235" ] +defaultTimeoutSec = 10800 +shortTimeoutSec = 300 +findTimeoutSec = 600 +``` + +## ClickHouse `[clickhouse]` + +### URL `url` +Detailed explanation of ClickHouse HTTP interface is given in [documentation](https://clickhouse.tech/docs/en/interfaces/http). It's recommended to create a dedicated read-only user for graphite-clickhouse. + +Example: `url = "http://graphite:qwerty@localhost:8123/?readonly=2&log_queries=1"` + +Some useful parameters: + +- [log_queries=1](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-log-queries): all queries will be logged in the `system.query_log` table. Useful for debug. +- [readonly=2](https://clickhouse.tech/docs/en/operations/settings/permissions-for-queries/#settings_readonly): do not change data on the server +- [max_rows_to_read=200000000](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#max-rows-to-read): useful if you want to prevent too broad requests +- [cancel_http_readonly_queries_on_client_close=1](https://clickhouse.tech/docs/en/operations/settings/settings/#cancel-http-readonly-queries-on-client-close): cancel DB query when request is canceled. + +All these and more settings can be set in clickhouse-server configuration as user's profile settings. + +Useless settings: + +- `max_query_size`: at the moment [external data](https://clickhouse.tech/docs/en/engines/table-engines/special/external-data/) is used, the query length is relatively small and always less than the default [262144](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-max_query_size) +- `max_ast_elements`: the same +- `max_execution_time`: with `cancel_http_readonly_queries_on_client_close=1` and `data-timeout = "1m"` it's already covered. + +### Query multi parameters (for overwrite default url and data-timeout) + +For queries with duration (until - from) >= 72 hours, use custom url and data-timeout + +``` +url = "http://graphite:qwerty@localhost:8123/?readonly=2&log_queries=1&max_rows_to_read=102400000&max_result_bytes=12800000&max_threads=2" +data-timeout = "30s" + +query-params = [ + { + duration = "72h", + url = "http://graphite:qwerty@localhost:8123/?readonly=2&log_queries=1&max_rows_to_read=1024000000&max_result_bytes=128000000&max_threads=1", + data-timeout = "60s" + } +] +``` + +### Query limiter for prevent database overloading (limit concurrent/maximum incomming requests) + +For prevent database overloading incomming requests (render/find/autocomplete) can be limited. +If executing max-concurrent requests, next request will be wait for free slot until index-timeout reached +If wait max-queries requests, for new request error returned immediately. + +``` +url = "http://graphite:qwerty@localhost:8123/?readonly=2&log_queries=1&max_rows_to_read=102400000&max_result_bytes=12800000&max_threads=2" +render-max-queries = 500 +render-max-concurrent = 10 +find-max-queries = 100 +find-max-concurrent = 10 +tags-max-queries = 100 +tags-max-concurrent = 10 + +query-params = [ + { + duration = "72h", + url = "http://graphite:qwerty@localhost:8123/?readonly=2&log_queries=1&max_rows_to_read=1024000000&max_result_bytes=128000000&max_threads=1", + data-timeout = "60s" + max-queries = 100, + max-concurrent = 4 + } +] + +user-limits = { + "alerting" = { + max-queries = 100, + max-concurrent = 5 + } +} + +``` + +### Index table +See [index table](./index-table.md) documentation for details. + +### Index reversed queries tuning +By default the daemon decides to make a direct or reversed request to the [index table](./index-table.md) based on a first and last glob node in the metric. It choose the most long path to reduce readings. Additional examples can be found in [tests](../finder/index_test.go). + +You can overwrite automatic behavior with `index-reverse`. Valid values are `"auto", direct, "reversed"` + +If you need fine tuning for different paths, you can use `[[clickhouse.index-reverses]]` to set behavior per metrics' `prefix`, `suffix` or `regexp`. + +### Tags table +By default, tags are stored in the tagged-table on the daily basis. If a metric set doesn't change much, that leads to situation when the same data stored multiple times. +To prevent uncontrolled growth and reduce the amount of data stored in the tagged-table, the `tagged-use-daily` parameter could be set to `false` and table definition could be changed to something like: +``` +CREATE TABLE graphite_tagged ( + Date Date, + Tag1 String, + Path String, + Tags Array(String), + Version UInt32 +) ENGINE = ReplacingMergeTree(Date) +ORDER BY (Tag1, Path); +``` + +For restrict costly seriesByTag (may be like `seriesByTag('name=~test.*.*.rabbitmq_overview.connections')` or `seriesByTag('name=test.*.*.rabbitmq_overview.connections')`) use tags-min-in-query parameter. +For restrict costly autocomplete queries use tags-min-in-autocomplete parameter. + +set for require at minimum 1 eq argument (without wildcards) +`tags-min-in-query=1` + + +`ReplacingMergeTree(Date)` prevent broken tags autocomplete with default `ReplacingMergeTree(Version)`, when write to the past. + +### ClickHouse aggregation +For detailed description of `max-data-points` and `internal-aggregation` see [aggregation documentation](./aggregation.md). + +## Data tables `[[data-table]]` + +### Rollup +The rollup configuration is used for a proper metrics pre-aggregation. It contains two rules types: + +- retention for point per time range +- aggregation function for a values + +Historically, the way to define the config was `rollup-conf = "/path/to/the/conf/with/graphite_rollup.xml"`. The format is the same as [graphite_rollup](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/graphitemergetree/#rollup-configuration) scheme for ClickHouse server. + +For a quite long time it's recommended to use `rollup-conf = "auto"` to get the configuration from remote ClickHouse server. It will update itself on each `rollup-auto-interval` (1 minute by default) or once on startup if set to "0s". + +If you don't use a `GraphiteMergeTree` family engine, you can still use `rollup-conf = "auto"` by setting `rollup-auto-table="graphiteMergeTreeTable"` and get the proper config. In this case `graphiteMergeTreeTable` is a dummy table associated with proper [graphite_rollup](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/graphitemergetree/#rollup-configuration). The cases when you may need it: + +- ReplacingMergeTree engine +- Distributed engine +- Materialized view + +It's possible as well to set `rollup-conf = "none"`. Then values from `rollup-default-precision` and `rollup-default-function` will be used. + +#### Additional rollup tuning for reversed data tables +When `reverse = true` is set for data-table, there are two possibles cases for [graphite_rollup](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/graphitemergetree/#rollup-configuration): + +- Original regexps are used, like `^level_one.level_two.suffix$` +- Reversed regexps are used, like `^suffix.level_two.level_one$` + +Depends on it for having a proper retention and aggregation you must additionally set `rollup-use-reverted = true` for the first case and `rollup-use-reverted = false` for the second. + +#### Additional tuning tagged find for seriesByTag and autocomplete +Only one tag used as filter for index field Tag1, see graphite_tagged table [structure](https://github.com/lomik/ +```toml +[common] + # general listener + listen = ":9090" + # listener to serve /debug/pprof requests. '-pprof' argument overrides it + pprof-listen = "" + max-cpu = 1 + # limit number of results from find query, 0=unlimited + max-metrics-in-find-answer = 0 + # limit numbers of queried metrics per target in /render requests, 0 or negative = unlimited + max-metrics-per-target = 15000 + # daemon returns empty response if query matches any of regular expressions + # target-blacklist = [] + # daemon will return the freed memory to the OS when it>0 + memory-return-interval = "0s" + # additional request headers to log + headers-to-log = [] + + # find/tags cache config + [common.find-cache] + # cache type + type = "null" + # cache size + size-mb = 0 + # memcached servers + memcached-servers = [] + # default cache ttl + default-timeout = 0 + # short-time cache ttl + short-timeout = 0 + # finder/tags autocompleter cache ttl + find-timeout = 0 + # maximum diration, used with short_timeout + short-duration = "0s" + # offset beetween now and until for select short cache timeout + short-offset = 0 + +[metrics] + # graphite relay address + metric-endpoint = "" + # statsd server address + statsd-endpoint = "" + # graphite metrics send interval + metric-interval = "0s" + # graphite metrics send timeout + metric-timeout = "0s" + # graphite metrics prefix + metric-prefix = "" + # Request historgram buckets widths + request-buckets = [] + # Request historgram buckets labels + request-labels = [] + + # Additional separate stats for until-from ranges + [metrics.ranges] + + # Additional separate stats for until-from find ranges + [metrics.find-ranges] + # Extended metrics + extended-stat = false + +[clickhouse] + # default url, see https://clickhouse.tech/docs/en/interfaces/http. Can be overwritten with query-params + url = "http://localhost:8123?cancel_http_readonly_queries_on_client_close=1" + # default total timeout to fetch data, can be overwritten with query-params + data-timeout = "1m0s" + # Max queries to render queiries + render-max-queries = 0 + # Maximum concurrent queries to render queiries + render-max-concurrent = 0 + # Max queries for find queries + find-max-queries = 0 + # Maximum concurrent queries for find queries + find-max-concurrent = 0 + # Max queries for tags queries + tags-max-queries = 0 + # Maximum concurrent queries for tags queries + tags-max-concurrent = 0 + # Minimum tags in seriesByTag query + tags-min-in-query = 0 + # Minimum tags in autocomplete query + tags-min-in-autocomplete = 0 + + # customized query limiter for some users + # [clickhouse.user-limits] + # Date format (default, utc, both) + date-format = "" + # see doc/index-table.md + index-table = "graphite_index" + index-use-daily = true + # see doc/config.md + index-reverse = "auto" + + # [[clickhouse.index-reverses]] + # rule is used when the target suffix is matched + # suffix = "suffix" + # same as index-reverse + # reverse = "auto" + + # [[clickhouse.index-reverses]] + # rule is used when the target prefix is matched + # prefix = "prefix" + # same as index-reverse + # reverse = "direct" + + # [[clickhouse.index-reverses]] + # rule is used when the target regex is matched + # regex = "regex" + # same as index-reverse + # reverse = "reversed" + # total timeout to fetch series list from index + index-timeout = "1m0s" + # 'tagged' table from carbon-clickhouse, required for seriesByTag + tagged-table = "graphite_tagged" + # or how long the daemon will query tags during autocomplete + tagged-autocomplete-days = 7 + # whether to use date filter when searching for the metrics in the tagged-table + tagged-use-daily = true + + # costs for tags (for tune which tag will be used as primary), by default is 0, increase for costly (with poor selectivity) tags + # [clickhouse.tagged-costs] + # old index table, DEPRECATED, see description in doc/config.md + # tree-table = "" + # reverse-tree-table = "" + # date-tree-table = "" + # date-tree-table-version = 0 + # tree-timeout = "0s" + # is not recommended to use, https://github.com/lomik/graphite-clickhouse/wiki/TagsRU + # tag-table = "" + # add extra prefix (directory in graphite) for all metrics, w/o trailing dot + extra-prefix = "" + # TCP connection timeout + connect-timeout = "1s" + # will be removed in 0.14 + # data-table = "" + # rollup-conf = "auto" + # max points per metric when internal-aggregation=true + max-data-points = 1048576 + # ClickHouse-side aggregation, see doc/aggregation.md + internal-aggregation = true + +[[data-table]] + # data table from carbon-clickhouse + table = "graphite_data" + # if it stores direct or reversed metrics + reverse = false + # maximum age stored in the table + max-age = "0s" + # minimum age stored in the table + min-age = "0s" + # maximum until-from interval allowed for the table + max-interval = "0s" + # minimum until-from interval allowed for the table + min-interval = "0s" + # table allowed only if any metrics in target matches regexp + target-match-any = "" + # table allowed only if all metrics in target matches regexp + target-match-all = "" + # custom rollup.xml file for table, 'auto' and 'none' are allowed as well + rollup-conf = "auto" + # custom table for 'rollup-conf=auto', useful for Distributed or MatView + rollup-auto-table = "" + # rollup update interval for 'rollup-conf=auto' + rollup-auto-interval = "1m0s" + # is used when none of rules match + rollup-default-precision = 0 + # is used when none of rules match + rollup-default-function = "" + # should be set to true if you don't have reverted regexps in rollup-conf for reversed tables + rollup-use-reverted = false + # valid values are 'graphite' of 'prometheus' + context = [] + +# is not recommended to use, https://github.com/lomik/graphite-clickhouse/wiki/TagsRU +# [tags] + # rules = "" + # date = "" + # extra-where = "" + # input-file = "" + # output-file = "" + +[carbonlink] + server = "" + threads-per-request = 10 + connect-timeout = "50ms" + query-timeout = "50ms" + # timeout for querying and parsing response + total-timeout = "500ms" + +[prometheus] + # listen addr for prometheus ui and api + listen = ":9092" + # allows to set URL for redirect manually + external-url = "" + page-title = "Prometheus Time Series Collection and Processing Server" + lookback-delta = "5m0s" + +# see doc/debugging.md +[debug] + # the directory for additional debug output + directory = "" + # permissions for directory, octal value is set as 0o755 + directory-perm = 493 + # permissions for directory, octal value is set as 0o640 + external-data-perm = 0 + +[[logging]] + # handler name, default empty + logger = "" + # '/path/to/filename', 'stderr', 'stdout', 'empty' (=='stderr'), 'none' + file = "/var/log/graphite-clickhouse/graphite-clickhouse.log" + # 'debug', 'info', 'warn', 'error', 'dpanic', 'panic', and 'fatal' + level = "info" + # 'json' or 'console' + encoding = "mixed" + # 'millis', 'nanos', 'epoch', 'iso8601' + encoding-time = "iso8601" + # 'seconds', 'nanos', 'string' + encoding-duration = "seconds" + # passed to time.ParseDuration + sample-tick = "" + # first n messages logged per tick + sample-initial = 0 + # every m-th message logged thereafter per tick + sample-thereafter = 0 +``` diff --git a/go.mod b/go.mod index 88fdd2a43..a38f2f317 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/go-graphite/protocol v1.0.0 github.com/gogo/protobuf v1.3.2 github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 + github.com/json-iterator/go v1.1.12 github.com/lomik/carbon-clickhouse v0.11.7 github.com/lomik/graphite-pickle v0.0.0-20171221213606-614e8df42119 github.com/lomik/og-rek v0.0.0-20170411191824-628eefeb8d80 @@ -61,7 +62,6 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/lomik/stop v0.0.0-20161127103810-188e98d969bd // indirect github.com/mailru/easyjson v0.7.7 // indirect diff --git a/go.sum b/go.sum index acaea36ef..c650fb765 100644 --- a/go.sum +++ b/go.sum @@ -587,8 +587,6 @@ github.com/msaf1980/go-metrics v0.0.14/go.mod h1:8VcR8MdyvIJpcVLOVFKbhb27+60tXy0 github.com/msaf1980/go-stringutils v0.1.2/go.mod h1:AxmV/6JuQUAtZJg5XmYATB5ZwCWgtpruVHY03dswRf8= github.com/msaf1980/go-stringutils v0.1.4 h1:UwsIT0hplHVucqbknk3CoNqKkmIuSHhsbBldXxyld5U= github.com/msaf1980/go-stringutils v0.1.4/go.mod h1:AxmV/6JuQUAtZJg5XmYATB5ZwCWgtpruVHY03dswRf8= -github.com/msaf1980/go-syncutils v0.0.2 h1:F7lTtojuZUHFH9Cs6yRz4SRnvmttSV2qD6nEvseCFVg= -github.com/msaf1980/go-syncutils v0.0.2/go.mod h1:zoZwQNkDATcfKq5lQPK6dmJT7Z01COxw/vd8bcJyC9w= github.com/msaf1980/go-syncutils v0.0.3 h1:bd6+yTSB8/CmpG7M6j1gq5sJMyPqecjJcBf19s2Y6u4= github.com/msaf1980/go-syncutils v0.0.3/go.mod h1:zoZwQNkDATcfKq5lQPK6dmJT7Z01COxw/vd8bcJyC9w= github.com/msaf1980/go-timeutils v0.0.3 h1:c0NIpJBcU6KoMeMCPdnbGFcaP4sm7VCwoW1cdgsmUkU= diff --git a/graphite-clickhouse.go b/graphite-clickhouse.go index 3a8df876d..3881fdfa8 100644 --- a/graphite-clickhouse.go +++ b/graphite-clickhouse.go @@ -26,6 +26,8 @@ import ( "github.com/lomik/graphite-clickhouse/pkg/scope" "github.com/lomik/graphite-clickhouse/prometheus" "github.com/lomik/graphite-clickhouse/render" + "github.com/lomik/graphite-clickhouse/sd" + "github.com/lomik/graphite-clickhouse/sd/nginx" "github.com/lomik/graphite-clickhouse/tagger" ) @@ -96,6 +98,8 @@ func main() { "Additional pprof listen addr for non-server modes (tagger, etc..), overrides pprof-listen from common ", ) + sdList := flag.Bool("sd-list", false, "List registered nodes in SD") + printVersion := flag.Bool("version", false, "Print version") verbose := flag.Bool("verbose", false, "Verbose (print config on startup)") @@ -123,15 +127,41 @@ func main() { return } + if *sdList { + if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { + var sd sd.SD + logger := zapwriter.Default() + switch cfg.Common.SDType { + case config.SDNginx: + sd = nginx.New(cfg.Common.SD, cfg.Common.SDNamespace, "", logger) + default: + panic("serive discovery type not registered") + } + if nodes, err := sd.Nodes(); err == nil { + for _, node := range nodes { + fmt.Printf("%s: %s\n", node.Key, node.Value) + } + } else { + log.Fatal(err) + } + } + return + } + if err = zapwriter.ApplyConfig(cfg.Logging); err != nil { log.Fatal(err) } - logger := zapwriter.Logger("start") + localManager, err := zapwriter.NewManager(cfg.Logging) + if err != nil { + log.Fatal(err) + } + logger := localManager.Logger("start") if len(warns) > 0 { zapwriter.Logger("config").Warn("warnings", warns...) } + if *verbose { logger.Info("starting graphite-clickhouse", zap.String("build_version", BuildVersion), @@ -216,5 +246,10 @@ func main() { metrics.Graphite.Start(nil) } + if cfg.NeedLoadAvgColect() { + sdLogger := localManager.Logger("service discovery") + go sd.Register(cfg, sdLogger) + } + log.Fatal(http.ListenAndServe(cfg.Common.Listen, mux)) } diff --git a/limiter/alimiter.go b/limiter/alimiter.go new file mode 100644 index 000000000..e59bd96b5 --- /dev/null +++ b/limiter/alimiter.go @@ -0,0 +1,162 @@ +package limiter + +import ( + "context" + "time" + + "github.com/lomik/graphite-clickhouse/load_avg" + "github.com/lomik/graphite-clickhouse/metrics" +) + +var ( + ctxMain, Stop = context.WithCancel(context.Background()) + checkDelay = time.Second * 60 +) + +func getWeighted(n, max int) int { + if n <= 0 { + return 0 + } + loadAvg := load_avg.Load() + if loadAvg < 0.6 { + return 0 + } + + l := int(float64(n) * loadAvg) + if l >= max { + if max <= 1 { + return 1 + } + return max - 1 + } + + return l +} + +// ALimiter provide limiter amount of requests/concurrently executing requests (adaptive with load avg) +type ALimiter struct { + l limiter + cL limiter + c int + n int + + m metrics.WaitMetric +} + +// NewServerLimiter creates a limiter for specific servers list. +func NewALimiter(l, c, n int, enableMetrics bool, scope, sub string) ServerLimiter { + if l <= 0 && c <= 0 { + return NoopLimiter{} + } + if n >= c { + n = c - 1 + } + if n <= 0 { + return NewWLimiter(l, c, enableMetrics, scope, sub) + } + + a := &ALimiter{ + m: metrics.NewWaitMetric(enableMetrics, scope, sub), c: c, n: n, + } + a.cL.ch = make(chan struct{}, c) + a.cL.cap = c + + go a.balance() + + return a +} + +func (sl *ALimiter) balance() int { + var last int + for { + start := time.Now() + n := getWeighted(sl.n, sl.c) + if n > last { + for i := 0; i < n-last; i++ { + if sl.cL.enter(ctxMain, "balance") != nil { + break + } + } + last = n + } else if n < last { + for i := 0; i < last-n; i++ { + sl.cL.leave(ctxMain, "balance") + } + last = n + } + delay := time.Since(start) + if delay < checkDelay { + time.Sleep(checkDelay - delay) + } + } +} + +func (sl *ALimiter) Capacity() int { + return sl.l.capacity() +} + +func (sl *ALimiter) Enter(ctx context.Context, s string) (err error) { + if sl.l.cap > 0 { + if err = sl.l.tryEnter(ctx, s); err != nil { + sl.m.WaitErrors.Add(1) + return + } + } + if sl.cL.cap > 0 { + if sl.cL.enter(ctx, s) != nil { + if sl.l.cap > 0 { + sl.l.leave(ctx, s) + } + sl.m.WaitErrors.Add(1) + err = ErrTimeout + } + } + sl.m.Requests.Add(1) + return +} + +// TryEnter claims one of free slots without blocking. +func (sl *ALimiter) TryEnter(ctx context.Context, s string) (err error) { + if sl.l.cap > 0 { + if err = sl.l.tryEnter(ctx, s); err != nil { + sl.m.WaitErrors.Add(1) + return + } + } + if sl.cL.cap > 0 { + if sl.cL.tryEnter(ctx, s) != nil { + if sl.l.cap > 0 { + sl.l.leave(ctx, s) + } + sl.m.WaitErrors.Add(1) + err = ErrTimeout + } + } + sl.m.Requests.Add(1) + return +} + +// Frees a slot in limiter +func (sl *ALimiter) Leave(ctx context.Context, s string) { + if sl.l.cap > 0 { + sl.l.leave(ctx, s) + } + sl.cL.leave(ctx, s) +} + +// SendDuration send StatsD duration iming +func (sl *ALimiter) SendDuration(queueMs int64) { + if sl.m.WaitTimeName != "" { + metrics.Gstatsd.Timing(sl.m.WaitTimeName, queueMs, 1.0) + } +} + +// Unregiter unregister graphite metric +func (sl *ALimiter) Unregiter() { + sl.m.Unregister() +} + +// Enabled return enabled flag, if false - it's a noop limiter and can be safely skiped +func (sl *ALimiter) Enabled() bool { + return true +} diff --git a/limiter/alimiter_test.go b/limiter/alimiter_test.go new file mode 100644 index 000000000..e70bab71d --- /dev/null +++ b/limiter/alimiter_test.go @@ -0,0 +1,188 @@ +package limiter + +import ( + "context" + "fmt" + "strconv" + "sync" + "testing" + "time" + + "github.com/lomik/graphite-clickhouse/load_avg" + "github.com/stretchr/testify/require" +) + +func Test_getWeighted(t *testing.T) { + tests := []struct { + loadAvg float64 + c int + n int + want int + }{ + {loadAvg: 0, c: 100, n: 100, want: 0}, + {loadAvg: 0.2, c: 100, n: 100, want: 0}, + {loadAvg: 0.999, c: 100, n: 1, want: 0}, + {loadAvg: 1, c: 1, n: 100, want: 1}, + {loadAvg: 1, c: 100, n: 100, want: 99}, + {loadAvg: 1, c: 101, n: 100, want: 100}, + {loadAvg: 1, c: 200, n: 100, want: 100}, + {loadAvg: 2, c: 100, n: 200, want: 99}, + {loadAvg: 2, c: 200, n: 200, want: 199}, + {loadAvg: 2, c: 300, n: 200, want: 299}, + {loadAvg: 2, c: 400, n: 200, want: 399}, + {loadAvg: 2, c: 401, n: 200, want: 400}, + {loadAvg: 2, c: 402, n: 200, want: 400}, + } + for n, tt := range tests { + t.Run(strconv.Itoa(n), func(t *testing.T) { + load_avg.Store(tt.loadAvg) + if got := getWeighted(tt.n, tt.c); got != tt.want { + t.Errorf("load avg = %f getWeighted(%d) = %v, want %v", tt.loadAvg, tt.n, got, tt.want) + } + }) + } +} + +func TestNewALimiter(t *testing.T) { + l := 14 + c := 12 + n := 10 + checkDelay = time.Millisecond * 10 + limiter := NewALimiter(l, c, n, false, "", "") + + // inital - load not collected + load_avg.Store(0) + + var i int + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + + for i = 0; i < c; i++ { + require.NoError(t, limiter.Enter(ctx, "render"), "try to lock with load_avg = 0 [%d]", i) + } + + require.Error(t, limiter.Enter(ctx, "render")) + + for i = 0; i < c; i++ { + limiter.Leave(ctx, "render") + } + + cancel() + + // load_avg 0.5 + load_avg.Store(0.5) + k := getWeighted(n, c) + require.Equal(t, 0, k) + + // load_avg 0.6 + load_avg.Store(0.6) + k = getWeighted(n, c) + require.Equal(t, n*6/10, k) + + time.Sleep(checkDelay * 2) + + ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*100) + for i = 0; i < c-k; i++ { + require.NoError(t, limiter.Enter(ctx, "render"), "try to lock with load_avg = 0.5 [%d]", i) + } + + require.Error(t, limiter.Enter(ctx, "render")) + + for i = 0; i < c-k; i++ { + limiter.Leave(ctx, "render") + } + + cancel() + + // // load_avg 1 + load_avg.Store(1) + k = getWeighted(n, c) + require.Equal(t, n, k) + + time.Sleep(checkDelay * 2) + + ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*10) + for i = 0; i < c-n; i++ { + require.NoError(t, limiter.Enter(ctx, "render"), "try to lock with load_avg = 1 [%d]", i) + } + + require.Error(t, limiter.Enter(ctx, "render")) + + for i = 0; i < c-n; i++ { + limiter.Leave(ctx, "render") + } + + cancel() +} + +type testLimiter struct { + l int + c int + n int + concurrencyLevel int +} + +func Benchmark_Limiter_Parallel(b *testing.B) { + tests := []testLimiter{ + // WLimiter + {l: 2000, c: 10, concurrencyLevel: 1}, + {l: 2000, c: 10, concurrencyLevel: 10}, + {l: 2000, c: 10, concurrencyLevel: 20}, + {l: 2000, c: 10, concurrencyLevel: 50}, + {l: 2000, c: 10, concurrencyLevel: 100}, + {l: 2000, c: 10, concurrencyLevel: 1000}, + // ALimiter + {l: 2000, c: 10, n: 50, concurrencyLevel: 1}, + {l: 2000, c: 10, n: 50, concurrencyLevel: 10}, + {l: 2000, c: 10, n: 50, concurrencyLevel: 20}, + {l: 2000, c: 10, n: 50, concurrencyLevel: 50}, + {l: 2000, c: 10, n: 50, concurrencyLevel: 100}, + {l: 2000, c: 10, n: 50, concurrencyLevel: 1000}, + } + + load_avg.Store(0.5) + for _, tt := range tests { + + b.Run(fmt.Sprintf("L%d_C%d_N%d_CONCURRENCY%d", tt.l, tt.c, tt.n, tt.concurrencyLevel), func(b *testing.B) { + var ( + err error + ) + + limiter := NewALimiter(tt.l, tt.c, tt.n, false, "", "") + + wgStart := sync.WaitGroup{} + wg := sync.WaitGroup{} + wgStart.Add(tt.concurrencyLevel) + + ctx := context.Background() + + b.ResetTimer() + for i := 0; i < tt.concurrencyLevel; i++ { + wg.Add(1) + go func() { + wgStart.Done() + wgStart.Wait() + // Test routine + for n := 0; n < b.N; n++ { + errW := limiter.Enter(ctx, "render") + if errW == nil { + limiter.Leave(ctx, "render") + } else { + err = errW + break + } + } + // End test routine + wg.Done() + }() + + } + + wg.Wait() + b.StopTimer() + + if err != nil { + b.Fatal(b, err) + } + }) + } +} diff --git a/limiter/wlimiter.go b/limiter/wlimiter.go index 98ecd43be..fa49cc9af 100644 --- a/limiter/wlimiter.go +++ b/limiter/wlimiter.go @@ -6,7 +6,7 @@ import ( "github.com/lomik/graphite-clickhouse/metrics" ) -// WLimiter provides interface to limit amount of requests/concurrently executing requests +// WLimiter provide limiter amount of requests/concurrently executing requests type WLimiter struct { l limiter cL limiter @@ -18,6 +18,9 @@ func NewWLimiter(l, c int, enableMetrics bool, scope, sub string) ServerLimiter if l <= 0 && c <= 0 { return NoopLimiter{} } + if c <= 0 { + return NewLimiter(l, enableMetrics, scope, sub) + } w := &WLimiter{ m: metrics.NewWaitMetric(enableMetrics, scope, sub), @@ -46,7 +49,9 @@ func (sl *WLimiter) Enter(ctx context.Context, s string) (err error) { } if sl.cL.cap > 0 { if sl.cL.enter(ctx, s) != nil { - sl.l.leave(ctx, s) + if sl.l.cap > 0 { + sl.l.leave(ctx, s) + } sl.m.WaitErrors.Add(1) err = ErrTimeout } @@ -65,7 +70,9 @@ func (sl *WLimiter) TryEnter(ctx context.Context, s string) (err error) { } if sl.cL.cap > 0 { if sl.cL.tryEnter(ctx, s) != nil { - sl.l.leave(ctx, s) + if sl.l.cap > 0 { + sl.l.leave(ctx, s) + } sl.m.WaitErrors.Add(1) err = ErrTimeout } @@ -76,7 +83,9 @@ func (sl *WLimiter) TryEnter(ctx context.Context, s string) (err error) { // Frees a slot in limiter func (sl *WLimiter) Leave(ctx context.Context, s string) { - sl.l.leave(ctx, s) + if sl.l.cap > 0 { + sl.l.leave(ctx, s) + } sl.cL.leave(ctx, s) } diff --git a/load_avg/load_avg.go b/load_avg/load_avg.go new file mode 100644 index 000000000..c3cf8118a --- /dev/null +++ b/load_avg/load_avg.go @@ -0,0 +1,33 @@ +package load_avg + +import ( + "math" + + "github.com/msaf1980/go-syncutils/atomic" +) + +var ( + loadAvgStore atomic.Float64 +) + +func Load() float64 { + return loadAvgStore.Load() +} + +func Store(f float64) { + loadAvgStore.Store(f) +} + +func Weight(n int, l float64) int64 { + // (1 / normalized_load_avg - 1) + l = math.Round(10*l) / 10 + if l == 0 { + return 2 * int64(n) + } + l = math.Log10(l) + w := int64(n) - int64(float64(n)*l) + if w < 0 { + return 0 + } + return w +} diff --git a/load_avg/load_avg_default.go b/load_avg/load_avg_default.go new file mode 100644 index 000000000..9749cf6eb --- /dev/null +++ b/load_avg/load_avg_default.go @@ -0,0 +1,20 @@ +//go:build !linux +// +build !linux + +package load_avg + +import ( + "os" + "strings" + "syscall" + + "github.com/msaf1980/go-stringutils" +) + +func Normalized() (float64, error) { + return 0, nil +} + +func CpuCount() (uint64, error) { + return 0, nil +} diff --git a/load_avg/load_avg_linux.go b/load_avg/load_avg_linux.go new file mode 100644 index 000000000..e6de3f297 --- /dev/null +++ b/load_avg/load_avg_linux.go @@ -0,0 +1,41 @@ +//go:build linux +// +build linux + +package load_avg + +import ( + "os" + "strings" + "syscall" + + "github.com/msaf1980/go-stringutils" +) + +func Normalized() (float64, error) { + var info syscall.Sysinfo_t + err := syscall.Sysinfo(&info) + if err != nil { + return 0, err + } + + cpus, err := CpuCount() + if err != nil { + return 0, err + } + + const si_load_shift = 16 + load := float64(info.Loads[0]) / float64(1<= 300 { + return nil, errs.NewErrorWithCode(string(data), resp.StatusCode) + } + return data, err +} + +func HttpPut(url string, body []byte) error { + req, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer(body)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + client := &http.Client{Timeout: 2 * time.Second} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return ErrNotFound + } + if resp.StatusCode >= 300 { + data, _ := io.ReadAll(resp.Body) + return errs.NewErrorWithCode(string(data), resp.StatusCode) + } + return nil +} + +func HttpDelete(url string) error { + req, err := http.NewRequest(http.MethodDelete, url, nil) + if err != nil { + return err + } + client := &http.Client{Timeout: 2 * time.Second} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return ErrNotFound + } + if resp.StatusCode >= 300 { + data, _ := io.ReadAll(resp.Body) + return errs.NewErrorWithCode(string(data), resp.StatusCode) + } + return nil +} + +// GetLocalIP returns the non loopback local IP of the host +func GetLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + return ipnet.IP.String() + } + } + } + return "" +} diff --git a/tests/consul.sh b/tests/consul.sh new file mode 100755 index 000000000..8e01f99c6 --- /dev/null +++ b/tests/consul.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +if [ "$1" != "" ]; then + wget -q https://releases.hashicorp.com/consul/${1}/consul_${1}_linux_amd64.zip || exit 1 + unzip consul_${1}_linux_amd64.zip || exit 1 +fi + +./consul agent -server -bootstrap -data-dir=/tmp/consul -bind=127.0.0.1 diff --git a/tests/limitera/carbon-clickhouse.conf.tpl b/tests/limitera/carbon-clickhouse.conf.tpl new file mode 100644 index 000000000..41d7ce56d --- /dev/null +++ b/tests/limitera/carbon-clickhouse.conf.tpl @@ -0,0 +1,45 @@ +[common] + +[data] +path = "/etc/carbon-clickhouse/data" +chunk-interval = "1s" +chunk-auto-interval = "" + +[upload.graphite_index] +type = "index" +table = "graphite_index" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +cache-ttl = "1h" + +[upload.graphite_tags] +type = "tagged" +table = "graphite_tags" +threads = 3 +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +cache-ttl = "1h" + +[upload.graphite_reverse] +type = "points-reverse" +table = "graphite_reverse" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +zero-timestamp = false + +[upload.graphite] +type = "points" +table = "graphite" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +zero-timestamp = false + +[tcp] +listen = ":2003" +enabled = true +drop-future = "0s" +drop-past = "0s" + +[logging] +file = "/etc/carbon-clickhouse/carbon-clickhouse.log" +level = "debug" diff --git a/tests/limitera/graphite-clickhouse-internal-aggr-cached.conf.tpl b/tests/limitera/graphite-clickhouse-internal-aggr-cached.conf.tpl new file mode 100644 index 000000000..3caaba4ff --- /dev/null +++ b/tests/limitera/graphite-clickhouse-internal-aggr-cached.conf.tpl @@ -0,0 +1,50 @@ +# Adaptive limiter with throttle queries and limit max queries + +[common] +listen = "{{ .GCH_ADDR }}" +max-cpu = 0 +max-metrics-in-render-answer = 10000 +max-metrics-per-target = 10000 +headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] + +[common.find-cache] +type = "mem" +size-mb = 1 +default-timeout = 300 +short-timeout = 60 +short-duration = "240s" +find-timeout = 120 + +[clickhouse] +url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" +data-timeout = "30s" + +index-table = "graphite_index" +index-use-daily = true +index-timeout = "1m" +internal-aggregation = true + +tagged-table = "graphite_tags" +tagged-autocomplete-days = 1 + +render-max-concurrent = 6 +render-adaptive-queries = 2 +find-max-concurrent = 4 +find-adaptive-queries = 2 +tags-max-concurrent = 4 +tags-adaptive-queries = 2 + +[[data-table]] +# # clickhouse table name +table = "graphite" +# # points in table are stored with reverse path +reverse = false +rollup-conf = "auto" + +[[logging]] +logger = "" +file = "{{ .GCH_DIR }}/graphite-clickhouse.log" +level = "info" +encoding = "json" +encoding-time = "iso8601" +encoding-duration = "seconds" diff --git a/tests/limitera/test.toml b/tests/limitera/test.toml new file mode 100644 index 000000000..3124a4e2f --- /dev/null +++ b/tests/limitera/test.toml @@ -0,0 +1,59 @@ +[test] +precision = "10s" + +[[test.clickhouse]] +version = "latest" +dir = "tests/clickhouse/rollup" +delay = "10s" + +[test.carbon_clickhouse] +template = "carbon-clickhouse.conf.tpl" + +[[test.graphite_clickhouse]] +template = "graphite-clickhouse-internal-aggr-cached.conf.tpl" + +########################################################################## +[[test.input]] +name = "test.cache" +points = [{value = 1.0, time = "midnight-270s"}, {value = 3.0, time = "now"}] + +[[test.input]] +name = "cache;scope=test" +points = [{value = 2.0, time = "midnight-270s"}, {value = 4.0, time = "now"}] + +########################################################################## +[[test.find_checks]] +query = "test" +result = [{ path = "test", is_leaf = false }] + +[[test.find_checks]] +query = "test.cache" +result = [{ path = "test.cache", is_leaf = true }] + +########################################################################## + +[[test.tags_checks]] +query = "name;scope=test" +result = [ + "cache", +] + +########################################################################## + +[[test.render_checks]] +from = "rnow" +until = "rnow+10" +targets = [ "test.cache" ] + +[[test.render_checks.result]] +name = "test.cache" +path = "test.cache" +consolidation = "avg" +start = "rnow" +stop = "rnow+20" +step = 10 +req_start = "rnow" +req_stop = "rnow+20" +values = [3.0, nan] + +########################################################################## diff --git a/tests/limitermax/carbon-clickhouse.conf.tpl b/tests/limitermax/carbon-clickhouse.conf.tpl new file mode 100644 index 000000000..41d7ce56d --- /dev/null +++ b/tests/limitermax/carbon-clickhouse.conf.tpl @@ -0,0 +1,45 @@ +[common] + +[data] +path = "/etc/carbon-clickhouse/data" +chunk-interval = "1s" +chunk-auto-interval = "" + +[upload.graphite_index] +type = "index" +table = "graphite_index" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +cache-ttl = "1h" + +[upload.graphite_tags] +type = "tagged" +table = "graphite_tags" +threads = 3 +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +cache-ttl = "1h" + +[upload.graphite_reverse] +type = "points-reverse" +table = "graphite_reverse" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +zero-timestamp = false + +[upload.graphite] +type = "points" +table = "graphite" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +zero-timestamp = false + +[tcp] +listen = ":2003" +enabled = true +drop-future = "0s" +drop-past = "0s" + +[logging] +file = "/etc/carbon-clickhouse/carbon-clickhouse.log" +level = "debug" diff --git a/tests/limitermax/graphite-clickhouse-internal-aggr-cached.conf.tpl b/tests/limitermax/graphite-clickhouse-internal-aggr-cached.conf.tpl new file mode 100644 index 000000000..e41cf4a00 --- /dev/null +++ b/tests/limitermax/graphite-clickhouse-internal-aggr-cached.conf.tpl @@ -0,0 +1,47 @@ +# Limiter with limit max connections + +[common] +listen = "{{ .GCH_ADDR }}" +max-cpu = 0 +max-metrics-in-render-answer = 10000 +max-metrics-per-target = 10000 +headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] + +[common.find-cache] +type = "mem" +size-mb = 1 +default-timeout = 300 +short-timeout = 60 +short-duration = "240s" +find-timeout = 120 + +[clickhouse] +url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" +data-timeout = "30s" + +index-table = "graphite_index" +index-use-daily = true +index-timeout = "1m" +internal-aggregation = true + +tagged-table = "graphite_tags" +tagged-autocomplete-days = 1 + +render-max-queries = 100 +find-max-queries = 50 +tags-max-queries = 50 + +[[data-table]] +# # clickhouse table name +table = "graphite" +# # points in table are stored with reverse path +reverse = false +rollup-conf = "auto" + +[[logging]] +logger = "" +file = "{{ .GCH_DIR }}/graphite-clickhouse.log" +level = "info" +encoding = "json" +encoding-time = "iso8601" +encoding-duration = "seconds" diff --git a/tests/limitermax/test.toml b/tests/limitermax/test.toml new file mode 100644 index 000000000..3124a4e2f --- /dev/null +++ b/tests/limitermax/test.toml @@ -0,0 +1,59 @@ +[test] +precision = "10s" + +[[test.clickhouse]] +version = "latest" +dir = "tests/clickhouse/rollup" +delay = "10s" + +[test.carbon_clickhouse] +template = "carbon-clickhouse.conf.tpl" + +[[test.graphite_clickhouse]] +template = "graphite-clickhouse-internal-aggr-cached.conf.tpl" + +########################################################################## +[[test.input]] +name = "test.cache" +points = [{value = 1.0, time = "midnight-270s"}, {value = 3.0, time = "now"}] + +[[test.input]] +name = "cache;scope=test" +points = [{value = 2.0, time = "midnight-270s"}, {value = 4.0, time = "now"}] + +########################################################################## +[[test.find_checks]] +query = "test" +result = [{ path = "test", is_leaf = false }] + +[[test.find_checks]] +query = "test.cache" +result = [{ path = "test.cache", is_leaf = true }] + +########################################################################## + +[[test.tags_checks]] +query = "name;scope=test" +result = [ + "cache", +] + +########################################################################## + +[[test.render_checks]] +from = "rnow" +until = "rnow+10" +targets = [ "test.cache" ] + +[[test.render_checks.result]] +name = "test.cache" +path = "test.cache" +consolidation = "avg" +start = "rnow" +stop = "rnow+20" +step = 10 +req_start = "rnow" +req_stop = "rnow+20" +values = [3.0, nan] + +########################################################################## diff --git a/tests/limiterw/carbon-clickhouse.conf.tpl b/tests/limiterw/carbon-clickhouse.conf.tpl new file mode 100644 index 000000000..41d7ce56d --- /dev/null +++ b/tests/limiterw/carbon-clickhouse.conf.tpl @@ -0,0 +1,45 @@ +[common] + +[data] +path = "/etc/carbon-clickhouse/data" +chunk-interval = "1s" +chunk-auto-interval = "" + +[upload.graphite_index] +type = "index" +table = "graphite_index" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +cache-ttl = "1h" + +[upload.graphite_tags] +type = "tagged" +table = "graphite_tags" +threads = 3 +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +cache-ttl = "1h" + +[upload.graphite_reverse] +type = "points-reverse" +table = "graphite_reverse" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +zero-timestamp = false + +[upload.graphite] +type = "points" +table = "graphite" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +zero-timestamp = false + +[tcp] +listen = ":2003" +enabled = true +drop-future = "0s" +drop-past = "0s" + +[logging] +file = "/etc/carbon-clickhouse/carbon-clickhouse.log" +level = "debug" diff --git a/tests/limiterw/graphite-clickhouse-internal-aggr-cached.conf.tpl b/tests/limiterw/graphite-clickhouse-internal-aggr-cached.conf.tpl new file mode 100644 index 000000000..625363d9a --- /dev/null +++ b/tests/limiterw/graphite-clickhouse-internal-aggr-cached.conf.tpl @@ -0,0 +1,47 @@ +# Limiter with throttle queries + +[common] +listen = "{{ .GCH_ADDR }}" +max-cpu = 0 +max-metrics-in-render-answer = 10000 +max-metrics-per-target = 10000 +headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] + +[common.find-cache] +type = "mem" +size-mb = 1 +default-timeout = 300 +short-timeout = 60 +short-duration = "240s" +find-timeout = 120 + +[clickhouse] +url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" +data-timeout = "30s" + +index-table = "graphite_index" +index-use-daily = true +index-timeout = "1m" +internal-aggregation = true + +tagged-table = "graphite_tags" +tagged-autocomplete-days = 1 + +render-max-concurrent = 6 +find-max-concurrent = 4 +tags-max-concurrent = 4 + +[[data-table]] +# # clickhouse table name +table = "graphite" +# # points in table are stored with reverse path +reverse = false +rollup-conf = "auto" + +[[logging]] +logger = "" +file = "{{ .GCH_DIR }}/graphite-clickhouse.log" +level = "info" +encoding = "json" +encoding-time = "iso8601" +encoding-duration = "seconds" diff --git a/tests/limiterw/test.toml b/tests/limiterw/test.toml new file mode 100644 index 000000000..3124a4e2f --- /dev/null +++ b/tests/limiterw/test.toml @@ -0,0 +1,59 @@ +[test] +precision = "10s" + +[[test.clickhouse]] +version = "latest" +dir = "tests/clickhouse/rollup" +delay = "10s" + +[test.carbon_clickhouse] +template = "carbon-clickhouse.conf.tpl" + +[[test.graphite_clickhouse]] +template = "graphite-clickhouse-internal-aggr-cached.conf.tpl" + +########################################################################## +[[test.input]] +name = "test.cache" +points = [{value = 1.0, time = "midnight-270s"}, {value = 3.0, time = "now"}] + +[[test.input]] +name = "cache;scope=test" +points = [{value = 2.0, time = "midnight-270s"}, {value = 4.0, time = "now"}] + +########################################################################## +[[test.find_checks]] +query = "test" +result = [{ path = "test", is_leaf = false }] + +[[test.find_checks]] +query = "test.cache" +result = [{ path = "test.cache", is_leaf = true }] + +########################################################################## + +[[test.tags_checks]] +query = "name;scope=test" +result = [ + "cache", +] + +########################################################################## + +[[test.render_checks]] +from = "rnow" +until = "rnow+10" +targets = [ "test.cache" ] + +[[test.render_checks.result]] +name = "test.cache" +path = "test.cache" +consolidation = "avg" +start = "rnow" +stop = "rnow+20" +step = 10 +req_start = "rnow" +req_stop = "rnow+20" +values = [3.0, nan] + +########################################################################## diff --git a/tests/limiterwn/carbon-clickhouse.conf.tpl b/tests/limiterwn/carbon-clickhouse.conf.tpl new file mode 100644 index 000000000..41d7ce56d --- /dev/null +++ b/tests/limiterwn/carbon-clickhouse.conf.tpl @@ -0,0 +1,45 @@ +[common] + +[data] +path = "/etc/carbon-clickhouse/data" +chunk-interval = "1s" +chunk-auto-interval = "" + +[upload.graphite_index] +type = "index" +table = "graphite_index" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +cache-ttl = "1h" + +[upload.graphite_tags] +type = "tagged" +table = "graphite_tags" +threads = 3 +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +cache-ttl = "1h" + +[upload.graphite_reverse] +type = "points-reverse" +table = "graphite_reverse" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +zero-timestamp = false + +[upload.graphite] +type = "points" +table = "graphite" +url = "{{ .CLICKHOUSE_URL }}/" +timeout = "2m30s" +zero-timestamp = false + +[tcp] +listen = ":2003" +enabled = true +drop-future = "0s" +drop-past = "0s" + +[logging] +file = "/etc/carbon-clickhouse/carbon-clickhouse.log" +level = "debug" diff --git a/tests/limiterwn/graphite-clickhouse-internal-aggr-cached.conf.tpl b/tests/limiterwn/graphite-clickhouse-internal-aggr-cached.conf.tpl new file mode 100644 index 000000000..470300818 --- /dev/null +++ b/tests/limiterwn/graphite-clickhouse-internal-aggr-cached.conf.tpl @@ -0,0 +1,50 @@ +# Limiter with throttle queries and limit max queries + +[common] +listen = "{{ .GCH_ADDR }}" +max-cpu = 0 +max-metrics-in-render-answer = 10000 +max-metrics-per-target = 10000 +headers-to-log = [ "X-Ctx-Carbonapi-Uuid" ] + +[common.find-cache] +type = "mem" +size-mb = 1 +default-timeout = 300 +short-timeout = 60 +short-duration = "240s" +find-timeout = 120 + +[clickhouse] +url = "{{ .CLICKHOUSE_URL }}/?max_rows_to_read=500000000&max_result_bytes=1073741824&readonly=2&log_queries=1" +data-timeout = "30s" + +index-table = "graphite_index" +index-use-daily = true +index-timeout = "1m" +internal-aggregation = true + +tagged-table = "graphite_tags" +tagged-autocomplete-days = 1 + +render-max-queries = 100 +render-max-concurrent = 6 +find-max-queries = 50 +find-max-concurrent = 4 +tags-max-queries = 50 +tags-max-concurrent = 4 + +[[data-table]] +# # clickhouse table name +table = "graphite" +# # points in table are stored with reverse path +reverse = false +rollup-conf = "auto" + +[[logging]] +logger = "" +file = "{{ .GCH_DIR }}/graphite-clickhouse.log" +level = "info" +encoding = "json" +encoding-time = "iso8601" +encoding-duration = "seconds" diff --git a/tests/limiterwn/test.toml b/tests/limiterwn/test.toml new file mode 100644 index 000000000..3124a4e2f --- /dev/null +++ b/tests/limiterwn/test.toml @@ -0,0 +1,59 @@ +[test] +precision = "10s" + +[[test.clickhouse]] +version = "latest" +dir = "tests/clickhouse/rollup" +delay = "10s" + +[test.carbon_clickhouse] +template = "carbon-clickhouse.conf.tpl" + +[[test.graphite_clickhouse]] +template = "graphite-clickhouse-internal-aggr-cached.conf.tpl" + +########################################################################## +[[test.input]] +name = "test.cache" +points = [{value = 1.0, time = "midnight-270s"}, {value = 3.0, time = "now"}] + +[[test.input]] +name = "cache;scope=test" +points = [{value = 2.0, time = "midnight-270s"}, {value = 4.0, time = "now"}] + +########################################################################## +[[test.find_checks]] +query = "test" +result = [{ path = "test", is_leaf = false }] + +[[test.find_checks]] +query = "test.cache" +result = [{ path = "test.cache", is_leaf = true }] + +########################################################################## + +[[test.tags_checks]] +query = "name;scope=test" +result = [ + "cache", +] + +########################################################################## + +[[test.render_checks]] +from = "rnow" +until = "rnow+10" +targets = [ "test.cache" ] + +[[test.render_checks.result]] +name = "test.cache" +path = "test.cache" +consolidation = "avg" +start = "rnow" +stop = "rnow+20" +step = 10 +req_start = "rnow" +req_stop = "rnow+20" +values = [3.0, nan] + +########################################################################## diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/chanmutex.go b/vendor/github.com/msaf1980/go-syncutils/lock/chanmutex.go deleted file mode 100644 index 70758052f..000000000 --- a/vendor/github.com/msaf1980/go-syncutils/lock/chanmutex.go +++ /dev/null @@ -1,67 +0,0 @@ -package lock - -import ( - "context" - "time" -) - -// ChanMutex is the struct implementing Mutex by channel. -type ChanMutex struct { - lockChan chan struct{} -} - -// NewChanMutex returns ChanMutex. -func NewChanMutex() *ChanMutex { - return &ChanMutex{ - lockChan: make(chan struct{}, 1), - } -} - -// Lock acquires the lock. -// If it is currently held by others, Lock will wait until it has a chance to acquire it. -func (m *ChanMutex) Lock() { - m.lockChan <- struct{}{} -} - -// Unlock releases the lock. -func (m *ChanMutex) Unlock() { - <-m.lockChan -} - -// TryLock attempts to acquire the lock without blocking. -// Return false if someone is holding it now. -func (m *ChanMutex) TryLock() bool { - select { - case m.lockChan <- struct{}{}: - return true - default: - return false - } -} - -// LockWithContext attempts to acquire the lock, blocking until resources -// are available or ctx is done (timeout or cancellation). -func (m *ChanMutex) LockWithContext(ctx context.Context) bool { - select { - case m.lockChan <- struct{}{}: - return true - case <-ctx.Done(): - // timeout or cancellation - return false - } -} - -// LockWithTimeout attempts to acquire the lock within a period of time. -// Return false if spending time is more than duration and no chance to acquire it. -func (m *ChanMutex) LockWithTimeout(duration time.Duration) bool { - - t := time.After(duration) - - select { - case m.lockChan <- struct{}{}: - return true - case <-t: - // timeout - return false - } -} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/condchan.go b/vendor/github.com/msaf1980/go-syncutils/lock/condchan.go deleted file mode 100644 index a2d972c2c..000000000 --- a/vendor/github.com/msaf1980/go-syncutils/lock/condchan.go +++ /dev/null @@ -1,165 +0,0 @@ -package lock - -import ( - "context" - "sync" - "time" -) - -// CondChan implements a condition variable, a rendezvous point for goroutines waiting for or announcing the occurrence -// of an event. -// -// A Cond must not be copied after first use. -type CondChan struct { - _ noCopy - - ch chan struct{} - L sync.Mutex -} - -func (cc *CondChan) waitCh() <-chan struct{} { - - if cc.ch == nil { - cc.ch = make(chan struct{}) - } - ch := cc.ch - - return ch - -} - -// Wait atomically unlocks cc.Lockand suspends execution of the calling goroutine. -// It is required for the caller to hold cc.Lock during the call. -func (cc *CondChan) Wait() { - - ch := cc.waitCh() - - cc.L.Unlock() - - <-ch - - cc.L.Lock() -} - -// WaitU atomically unlocks cc.Lockand suspends execution of the calling goroutine. -// It is required for the caller to hold cc.Lock during the call. -// After execution, cc.Lock is unlocked -func (cc *CondChan) WaitU() { - - ch := cc.waitCh() - - cc.L.Unlock() - - <-ch - -} - -// WaitWithContext attempts to wait with context. -// It is required for the caller to hold cc.Lock during the call. -func (cc *CondChan) WaitWithContext(ctx context.Context) (ok bool) { - - ch := cc.waitCh() - - cc.L.Unlock() - - select { - case <-ch: - cc.L.Lock() - ok = true - case <-ctx.Done(): - // timeout or cancellation - } - - return - -} - -// WaitUWithContext attempts to wait with context. -// It is required for the caller to hold cc.Lock during the call. -// After execution, cc.Lock is unlocked -func (cc *CondChan) WaitUWithContext(ctx context.Context) (ok bool) { - - ch := cc.waitCh() - - cc.L.Unlock() - - select { - case <-ch: - ok = true - case <-ctx.Done(): - // timeout or cancellation - } - - return - -} - -// WaitWithTimeout attempts to wait with timeout. -// After later resuming execution, Wait locks cc.Lock before returning. -func (cc *CondChan) WaitWithTimeout(duration time.Duration) (ok bool) { - - t := time.After(duration) - - ch := cc.waitCh() - - cc.L.Unlock() - - select { - case <-ch: - cc.L.Lock() - ok = true - case <-t: - // timeout - } - - return - -} - -// WaitUWithTimeout attempts to wait with timeout. -// After later resuming execution, Wait locks cc.Lock before returning. -// After execution, cc.Lock is unlocked -func (cc *CondChan) WaitUWithTimeout(duration time.Duration) (ok bool) { - - t := time.After(duration) - - ch := cc.waitCh() - - cc.L.Unlock() - - select { - case <-ch: - ok = true - case <-t: - // timeout - } - - return - -} - -// Signal wakes one goroutine waiting on cc, if there is any. -// It is required for the caller to hold cc.Lock during the call. -func (cc *CondChan) Signal() { - - if cc.ch == nil { - return - } - select { - case cc.ch <- struct{}{}: - default: - } - -} - -// Broadcast wakes all goroutines waiting on cc. -// It is required for the caller to hold cc.Lock during the call. -func (cc *CondChan) Broadcast() { - - if cc.ch == nil { - return - } - close(cc.ch) - cc.ch = make(chan struct{}) - -} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/mutex.go b/vendor/github.com/msaf1980/go-syncutils/lock/mutex.go deleted file mode 100644 index 0e257f758..000000000 --- a/vendor/github.com/msaf1980/go-syncutils/lock/mutex.go +++ /dev/null @@ -1,165 +0,0 @@ -package lock - -import ( - "context" - "sync" - "sync/atomic" - "time" -) - -const tmLocked int32 = 1 // lock - -// Mutex - Try Mutex -type Mutex struct { - state int32 - mx sync.Mutex - ch chan struct{} -} - -func (m *Mutex) chGet() chan struct{} { - - m.mx.Lock() - if m.ch == nil { - m.ch = make(chan struct{}, 1) - } - r := m.ch - m.mx.Unlock() - return r - -} - -func (m *Mutex) tryChGet() (chan struct{}, bool) { - - if !m.mx.TryLock() { - return nil, false - } - if m.ch == nil { - m.ch = make(chan struct{}, 1) - } - r := m.ch - m.mx.Unlock() - return r, true - -} - -func (m *Mutex) chClose() { - // it's need only when exists parallel - // to make faster need add counter to add drop listners of chan - - var o chan struct{} - m.mx.Lock() - if m.ch != nil { - o = m.ch - m.ch = nil - } - m.mx.Unlock() - - if o != nil { - close(o) - } - -} - -// Lock - locks mutex -func (m *Mutex) Lock() { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - - return - } - - // Slow way - m.lockS() -} - -// TryLock - try locks mutex -func (m *Mutex) TryLock() bool { - return atomic.CompareAndSwapInt32(&m.state, 0, -1) -} - -// Unlock - unlocks mutex -func (m *Mutex) Unlock() { - if atomic.CompareAndSwapInt32(&m.state, -1, 0) { - m.chClose() - return - } - - panic("Mutex: Unlock fail") -} - -// LockWithContext - try locks mutex with context -func (m *Mutex) LockWithContext(ctx context.Context) bool { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - return true - } - - // Slow way - return m.lockST(ctx) -} - -// LockD - try locks mutex with time duration -func (m *Mutex) LockWithTimeout(d time.Duration) bool { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - return true - } - - // Slow way - return m.lockSD(d) -} - -func (m *Mutex) lockS() { - ch := m.chGet() - for { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - - return - } - - select { - case <-ch: - ch = m.chGet() - } - } - -} - -func (m *Mutex) lockST(ctx context.Context) bool { - ch := m.chGet() - for { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - - return true - } - - if ctx == nil { - return false - } - - select { - case <-ch: - ch = m.chGet() - case <-ctx.Done(): - return false - } - - } -} - -func (m *Mutex) lockSD(d time.Duration) bool { - // may be use context.WithTimeout(context.Background(), d) however NO it's not fun - t := time.After(d) - ch := m.chGet() - for { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - - return true - } - - select { - case <-ch: - ch = m.chGet() - case <-t: - return false - } - - } -} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/nocopy.go b/vendor/github.com/msaf1980/go-syncutils/lock/nocopy.go deleted file mode 100644 index 88f6fc3f2..000000000 --- a/vendor/github.com/msaf1980/go-syncutils/lock/nocopy.go +++ /dev/null @@ -1,14 +0,0 @@ -package lock - -// noCopy may be added to structs which must not be copied -// after the first use. -// -// See https://golang.org/issues/8005#issuecomment-190753527 -// for details. -// -// Note that it must not be embedded, due to the Lock and Unlock methods. -type noCopy struct{} - -// Lock is a no-op used by -copylocks checker from `go vet`. -func (*noCopy) Lock() {} -func (*noCopy) Unlock() {} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/pmutex.go b/vendor/github.com/msaf1980/go-syncutils/lock/pmutex.go deleted file mode 100644 index 5e424b12f..000000000 --- a/vendor/github.com/msaf1980/go-syncutils/lock/pmutex.go +++ /dev/null @@ -1,491 +0,0 @@ -package lock - -import ( - "context" - "fmt" - "sync" - "time" -) - -// PMutex - Read Write Try Mutex with change priority (Promote and Reduce) -// F methods (like LockF and TryLockF) Locks mutex if mutex already locked then this methods will be first in lock queue -// Promote - lock mutex from RLock to Lock -// Reduce - lock mutex from Lock to RLock -type PMutex struct { - state int32 - mx sync.Mutex - ch chan struct{} -} - -func (m *PMutex) chGet() chan struct{} { - - m.mx.Lock() - if m.ch == nil { - m.ch = make(chan struct{}, 1) - } - r := m.ch - m.mx.Unlock() - return r - -} - -// chClose - unlocks other routines needs mx.Lock -func (m *PMutex) chClose() { - // it's need only when exists parallel - // to make faster need add counter to add drop listners of chan - - var o chan struct{} - - if m.ch != nil { - o = m.ch - m.ch = nil - } - if o != nil { - close(o) - } - -} - -// Lock - locks mutex -func (m *PMutex) Lock() { - - m.mx.Lock() - - if m.state == 0 { - m.state = -1 - m.mx.Unlock() - return - } - m.mx.Unlock() - // Slow way - m.lockS() -} - -// TryLock - try locks mutex -func (m *PMutex) TryLock() (ok bool) { - - m.mx.Lock() - - if m.state == 0 { - m.state = -1 - ok = true - } - - m.mx.Unlock() - - return -} - -// Unlock - unlocks mutex -func (m *PMutex) Unlock() { - - m.mx.Lock() - - if m.state == -1 { - m.state = 0 - m.chClose() - } else { - panic(fmt.Sprintf("PMutex: Unlock fail (%v)", m.state)) - } - m.mx.Unlock() -} - -// Reduce - lock mutex from Lock to RLock -func (m *PMutex) Reduce() { - - m.mx.Lock() - - if m.state == -1 { - m.state = 1 - m.chClose() - } else { - panic(fmt.Sprintf("PMutex: Reduce fail (%v)", m.state)) - } - m.mx.Unlock() -} - -// LockWithContext - try locks mutex with context -func (m *PMutex) LockWithContext(ctx context.Context) bool { - - m.mx.Lock() - - if m.state == 0 { - m.state = -1 - m.mx.Unlock() - return true - } - m.mx.Unlock() - - // Slow way - return m.lockST(ctx) -} - -// LockWithTimeout - try locks mutex with time duration -func (m *PMutex) LockWithTimeout(d time.Duration) bool { - m.mx.Lock() - - if m.state == 0 { - m.state = -1 - m.mx.Unlock() - return true - } - m.mx.Unlock() - - // Slow way - return m.lockSD(d) -} - -// RLock - read locks mutex -func (m *PMutex) RLock() { - m.mx.Lock() - - if m.state >= 0 { - m.state++ - m.mx.Unlock() - return - } - m.mx.Unlock() - - // Slow way - m.rlockS() -} - -// TryRLock - read locks mutex -func (m *PMutex) TryRLock() (ok bool) { - m.mx.Lock() - - if m.state >= 0 { - m.state++ - ok = true - } - m.mx.Unlock() - - return -} - -// RUnlock - unlocks mutex -func (m *PMutex) RUnlock() { - - m.mx.Lock() - - if m.state > 0 { - m.state-- - if m.state <= 1 { - m.chClose() - } - } else { - panic(fmt.Sprintf("PMutex: RUnlock fail (%v)", m.state)) - } - - m.mx.Unlock() -} - -// RLockWithContext - try read locks mutex with context -func (m *PMutex) RLockWithContext(ctx context.Context) bool { - m.mx.Lock() - - if m.state >= 0 { - m.state++ - m.mx.Unlock() - return true - } - m.mx.Unlock() - - // Slow way - return m.rlockST(ctx) -} - -// RLockWithTimeout - try read locks mutex with time duration -func (m *PMutex) RLockWithTimeout(d time.Duration) bool { - m.mx.Lock() - - if m.state >= 0 { - m.state++ - m.mx.Unlock() - return true - } - m.mx.Unlock() - - // Slow way - return m.rlockSD(d) -} - -func (m *PMutex) lockS() { - - ch := m.chGet() - for { - - m.mx.Lock() - if m.state == 0 { - m.state = -1 - m.mx.Unlock() - return - } - m.mx.Unlock() - - select { - case <-ch: - ch = m.chGet() - } - } -} - -func (m *PMutex) lockST(ctx context.Context) bool { - - ch := m.chGet() - for { - - m.mx.Lock() - if m.state == 0 { - m.state = -1 - m.mx.Unlock() - return true - } - m.mx.Unlock() - - if ctx == nil { - return false - } - - select { - case <-ch: - ch = m.chGet() - case <-ctx.Done(): - return false - } - } -} - -func (m *PMutex) lockSD(d time.Duration) bool { - // may be use context.WithTimeout(context.Background(), d) however NO it's not fun - t := time.After(d) - - ch := m.chGet() - for { - - m.mx.Lock() - if m.state == 0 { - m.state = -1 - m.mx.Unlock() - return true - } - m.mx.Unlock() - - select { - case <-ch: - ch = m.chGet() - case <-t: - return false - } - - } -} - -func (m *PMutex) rlockS() { - - ch := m.chGet() - for { - - m.mx.Lock() - if m.state >= 0 { - m.state++ - m.mx.Unlock() - return - } - m.mx.Unlock() - - select { - case <-ch: - ch = m.chGet() - } - - } -} - -func (m *PMutex) rlockST(ctx context.Context) bool { - - ch := m.chGet() - for { - - m.mx.Lock() - if m.state >= 0 { - m.state++ - m.mx.Unlock() - return true - } - m.mx.Unlock() - - if ctx == nil { - return false - } - - select { - case <-ch: - ch = m.chGet() - case <-ctx.Done(): - return false - } - - } -} - -func (m *PMutex) rlockSD(d time.Duration) bool { - - t := time.After(d) - - ch := m.chGet() - for { - m.mx.Lock() - if m.state >= 0 { - m.state++ - m.mx.Unlock() - return true - } - m.mx.Unlock() - - select { - case <-ch: - ch = m.chGet() - case <-t: - return false - } - - } -} - -// Promote - lock mutex from RLock to Lock -// !!! use carefully - can produce deadlock, if promote from two grouroutines -func (m *PMutex) Promote() { - m.mx.Lock() - - if m.state == 1 { - m.state = -1 - m.mx.Unlock() - return - } - m.mx.Unlock() - - // Slow way - m.promoteS() -} - -// TryPromote - lock mutex from RLock to Lock -func (m *PMutex) TryPromote() (ok bool) { - m.mx.Lock() - - if m.state == 1 { - m.state = -1 - ok = true - } - m.mx.Unlock() - - return -} - -// PromoteWithContext - try locks mutex from RLock to Lock with context -// !!! If returns false then mutex is UNLOCKED if true mutex is locked as Lock -func (m *PMutex) PromoteWithContext(ctx context.Context) bool { - m.mx.Lock() - - if m.state == 1 { - m.state = -1 - m.mx.Unlock() - return true - } - m.mx.Unlock() - - // Slow way - return m.promoteST(ctx) -} - -// PromoteWithTimeout - try locks mutex from RLock to Lock with time duration -// !!! If returns false then mutex is UNLOCKED if true mutex is locked as Lock -func (m *PMutex) PromoteWithTimeout(d time.Duration) bool { - m.mx.Lock() - - if m.state == 1 { - m.state = -1 - m.mx.Unlock() - return true - } - m.mx.Unlock() - - // Slow way - return m.promoteSD(d) -} - -func (m *PMutex) promoteS() { - - ch := m.chGet() - for { - m.mx.Lock() - if m.state == 1 { - m.state = -1 - m.mx.Unlock() - return - } - m.mx.Unlock() - - select { - case <-ch: - ch = m.chGet() - } - } - -} - -func (m *PMutex) promoteST(ctx context.Context) bool { - - ch := m.chGet() - for { - - m.mx.Lock() - if m.state == 1 { - m.state = -1 - m.mx.Unlock() - return true - } - m.mx.Unlock() - - if ctx == nil { - return false - } - - select { - case <-ch: - ch = m.chGet() - case <-ctx.Done(): - m.RUnlock() - return false - } - - } - -} - -func (m *PMutex) promoteSD(d time.Duration) bool { - - t := time.After(d) - - ch := m.chGet() - for { - - m.mx.Lock() - if m.state == 1 { - m.state = -1 - m.mx.Unlock() - return true - - } - m.mx.Unlock() - - select { - case <-ch: - ch = m.chGet() - case <-t: - m.RUnlock() - return false - } - - } -} diff --git a/vendor/github.com/msaf1980/go-syncutils/lock/rwmutex.go b/vendor/github.com/msaf1980/go-syncutils/lock/rwmutex.go deleted file mode 100644 index 7f0c89ed1..000000000 --- a/vendor/github.com/msaf1980/go-syncutils/lock/rwmutex.go +++ /dev/null @@ -1,293 +0,0 @@ -package lock - -import ( - "context" - "sync" - "sync/atomic" - "time" -) - -// RWMutex - Read Write and Try Mutex with change priority (Promote and Reduce) -type RWMutex struct { - state int32 - mx sync.Mutex - ch chan struct{} -} - -func (m *RWMutex) chGet() chan struct{} { - m.mx.Lock() - if m.ch == nil { - m.ch = make(chan struct{}, 1) - } - r := m.ch - m.mx.Unlock() - return r -} - -func (m *RWMutex) tryChGet() (chan struct{}, bool) { - - if !m.mx.TryLock() { - return nil, false - } - if m.ch == nil { - m.ch = make(chan struct{}, 1) - } - r := m.ch - m.mx.Unlock() - - return r, true - -} - -func (m *RWMutex) chClose() { - // it's need only when exists parallel - // to make faster need add counter to add drop listners of chan - - var o chan struct{} - m.mx.Lock() - if m.ch != nil { - o = m.ch - m.ch = nil - } - m.mx.Unlock() - - if o != nil { - close(o) - } - -} - -// Lock - locks mutex -func (m *RWMutex) Lock() { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - - return - } - - // Slow way - m.lockS() -} - -// TryLock - try locks mutex -func (m *RWMutex) TryLock() bool { - return atomic.CompareAndSwapInt32(&m.state, 0, -1) -} - -// Unlock - unlocks mutex -func (m *RWMutex) Unlock() { - if atomic.CompareAndSwapInt32(&m.state, -1, 0) { - m.chClose() - return - } - - panic("RWMutex: Unlock fail") -} - -// LockWithContext - try locks mutex with context -func (m *RWMutex) LockWithContext(ctx context.Context) bool { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - return true - } - - // Slow way - return m.lockST(ctx) -} - -// LockD - try locks mutex with time duration -func (m *RWMutex) LockWithTimeout(d time.Duration) bool { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - return true - } - - // Slow way - return m.lockSD(d) -} - -// RLock - read locks mutex -func (m *RWMutex) RLock() { - k := atomic.LoadInt32(&m.state) - if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { - return - } - - // Slow way - m.rlockS() -} - -// TryRLock - try read locks mutex -func (m *RWMutex) TryRLock() bool { - k := atomic.LoadInt32(&m.state) - if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { - return true - } else if k == -1 { - return false - } - - // Slow way - if m.mx.TryLock() { - k := atomic.LoadInt32(&m.state) - if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { - m.mx.Unlock() - return true - } else if k == -1 { - m.mx.Unlock() - return false - } - } - - return false -} - -// RUnlock - unlocks mutex -func (m *RWMutex) RUnlock() { - i := atomic.AddInt32(&m.state, -1) - if i > 0 { - return - } else if i == 0 { - m.chClose() - return - } - - panic("RWMutex: RUnlock fail") -} - -// RLockWithContext - try read locks mutex with context -func (m *RWMutex) RLockWithContext(ctx context.Context) bool { - k := atomic.LoadInt32(&m.state) - if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { - return true - } - - // Slow way - return m.rlockST(ctx) -} - -// RLockWithDuration - try read locks mutex with time duration -func (m *RWMutex) RLockWithTimeout(d time.Duration) bool { - k := atomic.LoadInt32(&m.state) - if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { - return true - } - - // Slow way - return m.rlockSD(d) -} - -func (m *RWMutex) lockS() { - ch := m.chGet() - for { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - - return - } - - select { - case <-ch: - ch = m.chGet() - } - } - -} - -func (m *RWMutex) lockST(ctx context.Context) bool { - ch := m.chGet() - for { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - - return true - } - - if ctx == nil { - return false - } - - select { - case <-ch: - ch = m.chGet() - case <-ctx.Done(): - return false - } - - } -} - -func (m *RWMutex) lockSD(d time.Duration) bool { - // may be use context.WithTimeout(context.Background(), d) however NO it's not fun - t := time.After(d) - ch := m.chGet() - for { - if atomic.CompareAndSwapInt32(&m.state, 0, -1) { - - return true - } - - select { - case <-ch: - ch = m.chGet() - case <-t: - return false - } - - } -} - -func (m *RWMutex) rlockS() { - - ch := m.chGet() - var k int32 - for { - k = atomic.LoadInt32(&m.state) - if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { - return - } - - select { - case <-ch: - ch = m.chGet() - } - - } - -} - -func (m *RWMutex) rlockST(ctx context.Context) bool { - ch := m.chGet() - var k int32 - for { - k = atomic.LoadInt32(&m.state) - if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { - return true - } - - if ctx == nil { - return false - } - - select { - case <-ch: - ch = m.chGet() - case <-ctx.Done(): - return false - } - - } -} - -func (m *RWMutex) rlockSD(d time.Duration) bool { - ch := m.chGet() - t := time.After(d) - var k int32 - for { - k = atomic.LoadInt32(&m.state) - if k >= 0 && atomic.CompareAndSwapInt32(&m.state, k, k+1) { - return true - } - - select { - case <-ch: - ch = m.chGet() - case <-t: - return false - } - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index c1f7e632c..7b98bcfdc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -221,7 +221,6 @@ github.com/msaf1980/go-stringutils # github.com/msaf1980/go-syncutils v0.0.3 ## explicit; go 1.18 github.com/msaf1980/go-syncutils/atomic -github.com/msaf1980/go-syncutils/lock # github.com/msaf1980/go-timeutils v0.0.3 ## explicit; go 1.19 github.com/msaf1980/go-timeutils/duration @@ -234,8 +233,6 @@ github.com/oklog/ulid # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml -# github.com/pierrec/lz4 v2.6.0+incompatible -## explicit # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -340,8 +337,6 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/zentures/cityhash v0.0.0-20131128155616-cdd6a94144ab -## explicit # go.mongodb.org/mongo-driver v1.10.2 ## explicit; go 1.10 go.mongodb.org/mongo-driver/bson @@ -450,10 +445,6 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c -## explicit -# google.golang.org/grpc v1.50.1 -## explicit # google.golang.org/protobuf v1.28.1 ## explicit; go 1.11 google.golang.org/protobuf/encoding/prototext From 6d1bb4554113652d2021fc4fbc14d25e8ee48d67 Mon Sep 17 00:00:00 2001 From: msaf1980 Date: Tue, 25 Apr 2023 11:43:37 +0500 Subject: [PATCH 3/5] alive: revert to no non-checked version and add /health method --- graphite-clickhouse.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/graphite-clickhouse.go b/graphite-clickhouse.go index 3881fdfa8..db0184f3e 100644 --- a/graphite-clickhouse.go +++ b/graphite-clickhouse.go @@ -4,6 +4,7 @@ import ( "encoding/json" "flag" "fmt" + "io" "log" "math/rand" "net/http" @@ -215,7 +216,11 @@ func main() { mux.Handle("/render/", app.Handler(render.NewHandler(cfg))) mux.Handle("/tags/autoComplete/tags", app.Handler(autocomplete.NewTags(cfg))) mux.Handle("/tags/autoComplete/values", app.Handler(autocomplete.NewValues(cfg))) - mux.Handle("/alive", app.Handler(healthcheck.NewHandler(cfg))) + mux.HandleFunc("/alive", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + io.WriteString(w, "Graphite-clickhouse is alive.\n") + }) + mux.Handle("/health", app.Handler(healthcheck.NewHandler(cfg))) mux.HandleFunc("/debug/config", func(w http.ResponseWriter, r *http.Request) { status := http.StatusOK start := time.Now() From f127d52d9a15ac2b1de1aa8f559ff524a32ad5eb Mon Sep 17 00:00:00 2001 From: msaf1980 Date: Tue, 25 Apr 2023 13:32:47 +0500 Subject: [PATCH 4/5] sd: add cleanup for dead hosts --- graphite-clickhouse.go | 16 +++++++++-- sd/nginx/nginx.go | 35 +++++++++++++++++++---- sd/nginx/nginx_test.go | 65 +++++++++++++++++++++++++++++++++--------- sd/utils/utils.go | 1 + 4 files changed, 94 insertions(+), 23 deletions(-) diff --git a/graphite-clickhouse.go b/graphite-clickhouse.go index db0184f3e..cba394342 100644 --- a/graphite-clickhouse.go +++ b/graphite-clickhouse.go @@ -100,6 +100,7 @@ func main() { ) sdList := flag.Bool("sd-list", false, "List registered nodes in SD") + sdClean := flag.Bool("sd-clean", false, "Cleanup registered nodes in SD") printVersion := flag.Bool("version", false, "Print version") verbose := flag.Bool("verbose", false, "Verbose (print config on startup)") @@ -128,7 +129,7 @@ func main() { return } - if *sdList { + if *sdList || *sdClean { if cfg.Common.SD != "" && cfg.NeedLoadAvgColect() { var sd sd.SD logger := zapwriter.Default() @@ -136,11 +137,20 @@ func main() { case config.SDNginx: sd = nginx.New(cfg.Common.SD, cfg.Common.SDNamespace, "", logger) default: - panic("serive discovery type not registered") + panic("service discovery type not registered") } + ts := time.Now().Unix() - 3600 if nodes, err := sd.Nodes(); err == nil { for _, node := range nodes { - fmt.Printf("%s: %s\n", node.Key, node.Value) + if *sdClean && node.Flags > 0 { + if ts > node.Flags { + fmt.Printf("%s: %s (%s), deleted\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) + } else { + fmt.Printf("%s: %s (%s)\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) + } + } else { + fmt.Printf("%s: %s (%s)\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) + } } } else { log.Fatal(err) diff --git a/sd/nginx/nginx.go b/sd/nginx/nginx.go index 39ae4810d..aa841fe8a 100644 --- a/sd/nginx/nginx.go +++ b/sd/nginx/nginx.go @@ -5,6 +5,7 @@ import ( "errors" "strconv" "strings" + "time" "github.com/lomik/graphite-clickhouse/sd/utils" @@ -17,6 +18,8 @@ var ( json = jsoniter.ConfigCompatibleWithStandardLibrary ErrNoKey = errors.New("list key no found") ErrInvalidKey = errors.New("list key is invalid") + + timeNow = time.Now ) func splitNode(node string) (dc, host, listen string, ok bool) { @@ -51,7 +54,7 @@ func New(url, namespace, hostname string, logger *zap.Logger) *Nginx { sd := &Nginx{ logger: logger, body: make([]byte, 128), - backupBody: []byte(`{"backup":1, "max_fails":0}`), + backupBody: []byte(`{"backup":1,"max_fails":0}`), nsEnd: "upstreams/" + namespace + "/", hostname: hostname, } @@ -188,19 +191,27 @@ func (sd *Nginx) Nodes() (nodes []utils.KV, err error) { if s, ok := i.(string); ok { if strings.HasPrefix(s, sd.nsEnd) { s = s[len(sd.nsEnd):] + kv := utils.KV{Key: s} if i, ok := jNode["Value"]; ok { if v, ok := i.(string); ok { d, err := base64.StdEncoding.DecodeString(v) if err != nil { return nil, err } - nodes = append(nodes, utils.KV{Key: s, Value: stringutils.UnsafeString(d)}) - } else { - nodes = append(nodes, utils.KV{Key: s, Value: ""}) + kv.Value = stringutils.UnsafeString(d) + } + } + if i, ok := jNode["Flags"]; ok { + switch v := i.(type) { + case float64: + kv.Flags = int64(v) + case int: + kv.Flags = int64(v) + case int64: + kv.Flags = v } - } else { - nodes = append(nodes, utils.KV{Key: s, Value: ""}) } + nodes = append(nodes, kv) } else { return nil, ErrInvalidKey } @@ -227,11 +238,20 @@ func (sd *Nginx) update(ip, port string, dc []string) (err error) { } sd.url.WriteString(port) + // add custom query flags + sd.url.WriteByte('?') + sd.url.WriteString("flags=") + sd.url.WriteInt(timeNow().Unix(), 10) + if err = utils.HttpPut(sd.url.String(), sd.body); err != nil { sd.logger.Error("put", zap.String("address", sd.url.String()[sd.pos:]), zap.Error(err)) return } } else { + flags := make([]byte, 0, 32) + flags = append(flags, "?flags="...) + flags = strconv.AppendInt(flags, timeNow().Unix(), 10) + for i := 0; i < len(dc); i++ { // cfg.Common.SDDc sd.url.Truncate(sd.pos) @@ -245,6 +265,9 @@ func (sd *Nginx) update(ip, port string, dc []string) (err error) { } sd.url.WriteString(port) + // add custom query flags + sd.url.Write(flags) + if i == 0 { if nErr := utils.HttpPut(sd.url.String(), sd.body); nErr != nil { sd.logger.Error( diff --git a/sd/nginx/nginx_test.go b/sd/nginx/nginx_test.go index d63e51f2b..076c4736e 100644 --- a/sd/nginx/nginx_test.go +++ b/sd/nginx/nginx_test.go @@ -6,6 +6,7 @@ package nginx import ( "sort" "testing" + "time" "github.com/lomik/graphite-clickhouse/sd/utils" "github.com/lomik/zapwriter" @@ -27,6 +28,10 @@ var ( ) func TestNginx(t *testing.T) { + timeNow = func() time.Time { + return time.Unix(1682408721, 0) + } + logger := zapwriter.Default() sd1 := New("http://127.0.0.1:8500/v1/kv/upstreams", "graphite", hostname1, logger) @@ -126,6 +131,17 @@ func TestNginx(t *testing.T) { "_/test_host2/192.168.0.1:9090": `{"weight":25,"max_fails":0}`, }, nodesMap, ) + + nodesV, err := sd2.Nodes() + require.NoError(t, err) + assert.Equal( + t, []utils.KV{ + {Key: "_/test_host1/192.168.0.1:9090", Value: `{"weight":10,"max_fails":0}`, Flags: 1682408721}, + {Key: "_/test_host2/192.168.0.1:9090", Value: `{"weight":25,"max_fails":0}`, Flags: 1682408721}, + {Key: "_/test_host2/192.168.1.25:9090", Value: `{"weight":25,"max_fails":0}`, Flags: 1682408721}, + }, nodesV, + ) + require.NoError(t, sd2.Clear(ip2, port)) nodesMap, err = sd2.ListMap() require.NoError(t, err) @@ -148,6 +164,10 @@ func TestNginx(t *testing.T) { } func TestNginxDC(t *testing.T) { + timeNow = func() time.Time { + return time.Unix(1682408721, 0) + } + logger := zapwriter.Default() sd1 := New("http://127.0.0.1:8500/v1/kv/upstreams", "graphite", hostname1, logger) @@ -182,8 +202,8 @@ func TestNginxDC(t *testing.T) { assert.Equal( t, map[string]string{ "dc1/test_host1/192.168.0.1:9090": `{"weight":10,"max_fails":0}`, - "dc2/test_host1/192.168.0.1:9090": `{"backup":1, "max_fails":0}`, - "dc3/test_host1/192.168.0.1:9090": `{"backup":1, "max_fails":0}`, + "dc2/test_host1/192.168.0.1:9090": `{"backup":1,"max_fails":0}`, + "dc3/test_host1/192.168.0.1:9090": `{"backup":1,"max_fails":0}`, }, nodesMap, ) @@ -205,8 +225,8 @@ func TestNginxDC(t *testing.T) { assert.Equal( t, map[string]string{ "dc2/test_host2/192.168.1.25:9090": `{"weight":21,"max_fails":0}`, - "dc1/test_host2/192.168.1.25:9090": `{"backup":1, "max_fails":0}`, - "dc3/test_host2/192.168.1.25:9090": `{"backup":1, "max_fails":0}`, + "dc1/test_host2/192.168.1.25:9090": `{"backup":1,"max_fails":0}`, + "dc3/test_host2/192.168.1.25:9090": `{"backup":1,"max_fails":0}`, }, nodesMap, ) @@ -228,8 +248,8 @@ func TestNginxDC(t *testing.T) { assert.Equal( t, map[string]string{ "dc2/test_host2/192.168.1.25:9090": `{"weight":25,"max_fails":0}`, - "dc1/test_host2/192.168.1.25:9090": `{"backup":1, "max_fails":0}`, - "dc3/test_host2/192.168.1.25:9090": `{"backup":1, "max_fails":0}`, + "dc1/test_host2/192.168.1.25:9090": `{"backup":1,"max_fails":0}`, + "dc3/test_host2/192.168.1.25:9090": `{"backup":1,"max_fails":0}`, }, nodesMap, ) @@ -245,8 +265,8 @@ func TestNginxDC(t *testing.T) { assert.Equal( t, map[string]string{ "dc1/test_host1/192.168.0.1:9090": `{"weight":10,"max_fails":0}`, - "dc2/test_host1/192.168.0.1:9090": `{"backup":1, "max_fails":0}`, - "dc3/test_host1/192.168.0.1:9090": `{"backup":1, "max_fails":0}`, + "dc2/test_host1/192.168.0.1:9090": `{"backup":1,"max_fails":0}`, + "dc3/test_host1/192.168.0.1:9090": `{"backup":1,"max_fails":0}`, }, nodesMap, ) @@ -258,21 +278,38 @@ func TestNginxDC(t *testing.T) { assert.Equal( t, map[string]string{ "dc2/test_host2/192.168.1.25:9090": `{"weight":25,"max_fails":0}`, - "dc1/test_host2/192.168.1.25:9090": `{"backup":1, "max_fails":0}`, - "dc3/test_host2/192.168.1.25:9090": `{"backup":1, "max_fails":0}`, + "dc1/test_host2/192.168.1.25:9090": `{"backup":1,"max_fails":0}`, + "dc3/test_host2/192.168.1.25:9090": `{"backup":1,"max_fails":0}`, "dc2/test_host2/192.168.0.1:9090": `{"weight":25,"max_fails":0}`, - "dc1/test_host2/192.168.0.1:9090": `{"backup":1, "max_fails":0}`, - "dc3/test_host2/192.168.0.1:9090": `{"backup":1, "max_fails":0}`, + "dc1/test_host2/192.168.0.1:9090": `{"backup":1,"max_fails":0}`, + "dc3/test_host2/192.168.0.1:9090": `{"backup":1,"max_fails":0}`, }, nodesMap, ) + + nodesV, err := sd2.Nodes() + require.NoError(t, err) + assert.Equal( + t, []utils.KV{ + {Key: "dc1/test_host1/192.168.0.1:9090", Value: `{"weight":10,"max_fails":0}`, Flags: 1682408721}, + {Key: "dc1/test_host2/192.168.0.1:9090", Value: `{"backup":1,"max_fails":0}`, Flags: 1682408721}, + {Key: "dc1/test_host2/192.168.1.25:9090", Value: `{"backup":1,"max_fails":0}`, Flags: 1682408721}, + {Key: "dc2/test_host1/192.168.0.1:9090", Value: `{"backup":1,"max_fails":0}`, Flags: 1682408721}, + {Key: "dc2/test_host2/192.168.0.1:9090", Value: `{"weight":25,"max_fails":0}`, Flags: 1682408721}, + {Key: "dc2/test_host2/192.168.1.25:9090", Value: `{"weight":25,"max_fails":0}`, Flags: 1682408721}, + {Key: "dc3/test_host1/192.168.0.1:9090", Value: `{"backup":1,"max_fails":0}`, Flags: 1682408721}, + {Key: "dc3/test_host2/192.168.0.1:9090", Value: `{"backup":1,"max_fails":0}`, Flags: 1682408721}, + {Key: "dc3/test_host2/192.168.1.25:9090", Value: `{"backup":1,"max_fails":0}`, Flags: 1682408721}, + }, nodesV, + ) + require.NoError(t, sd2.Clear(ip2, port)) nodesMap, err = sd2.ListMap() require.NoError(t, err) assert.Equal( t, map[string]string{ "dc2/test_host2/192.168.1.25:9090": `{"weight":25,"max_fails":0}`, - "dc1/test_host2/192.168.1.25:9090": `{"backup":1, "max_fails":0}`, - "dc3/test_host2/192.168.1.25:9090": `{"backup":1, "max_fails":0}`, + "dc1/test_host2/192.168.1.25:9090": `{"backup":1,"max_fails":0}`, + "dc3/test_host2/192.168.1.25:9090": `{"backup":1,"max_fails":0}`, }, nodesMap, ) diff --git a/sd/utils/utils.go b/sd/utils/utils.go index c68776cd3..defe3d907 100644 --- a/sd/utils/utils.go +++ b/sd/utils/utils.go @@ -18,6 +18,7 @@ var ( type KV struct { Key string Value string + Flags int64 } func HttpGet(url string) ([]byte, error) { From 6d7fd4666ee1070ab351ffc17ff3b11e4c985577 Mon Sep 17 00:00:00 2001 From: Michail Safronov Date: Tue, 8 Aug 2023 17:19:48 +0500 Subject: [PATCH 5/5] sd: cleanup on shutdown --- graphite-clickhouse.go | 59 +++++++++++++++++++++++++++++++++++++----- sd/nginx/nginx.go | 25 ++++++++++++------ sd/register.go | 34 +++++++++++++++++++----- 3 files changed, 97 insertions(+), 21 deletions(-) diff --git a/graphite-clickhouse.go b/graphite-clickhouse.go index cba394342..3f89efb2f 100644 --- a/graphite-clickhouse.go +++ b/graphite-clickhouse.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "flag" "fmt" @@ -9,8 +10,12 @@ import ( "math/rand" "net/http" _ "net/http/pprof" + "os" + "os/signal" "runtime" "runtime/debug" + "sync" + "syscall" "time" "github.com/lomik/zapwriter" @@ -80,7 +85,10 @@ func (app *App) Handler(handler http.Handler) http.Handler { }) } -var BuildVersion = "(development build)" +var ( + BuildVersion = "(development build)" + srv *http.Server +) func main() { rand.Seed(time.Now().UnixNano()) @@ -137,7 +145,7 @@ func main() { case config.SDNginx: sd = nginx.New(cfg.Common.SD, cfg.Common.SDNamespace, "", logger) default: - panic("service discovery type not registered") + panic(fmt.Errorf("service discovery type %q can be registered", cfg.Common.SDType.String())) } ts := time.Now().Unix() - 3600 if nodes, err := sd.Nodes(); err == nil { @@ -145,6 +153,7 @@ func main() { if *sdClean && node.Flags > 0 { if ts > node.Flags { fmt.Printf("%s: %s (%s), deleted\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) + // sd.Delete(node.Key, node.Value) } else { fmt.Printf("%s: %s (%s)\n", node.Key, node.Value, time.Unix(node.Flags, 0).UTC().Format(time.RFC3339Nano)) } @@ -155,6 +164,8 @@ func main() { } else { log.Fatal(err) } + } else { + fmt.Fprintln(os.Stderr, "SD not enabled") } return } @@ -261,10 +272,46 @@ func main() { metrics.Graphite.Start(nil) } - if cfg.NeedLoadAvgColect() { - sdLogger := localManager.Logger("service discovery") - go sd.Register(cfg, sdLogger) + var exitWait sync.WaitGroup + srv = &http.Server{ + Addr: cfg.Common.Listen, + Handler: mux, + } + + exitWait.Add(1) + + go func() { + defer exitWait.Done() + if err := srv.ListenAndServe(); err != http.ErrServerClosed { + // unexpected error. port in use? + log.Fatalf("ListenAndServe(): %v", err) + } + }() + + go func() { + stop := make(chan os.Signal, 1) + signal.Notify(stop, syscall.SIGTERM, syscall.SIGINT) + <-stop + logger.Info("stoping graphite-clickhouse") + if cfg.Common.SDType != config.SDNone { + // unregister SD + sd.Stop() + time.Sleep(10 * time.Second) + } + // initiating the shutdown + ctx, _ := context.WithTimeout(context.Background(), time.Second*10) + srv.Shutdown(ctx) + }() + + if cfg.Common.SD != "" { + go func() { + time.Sleep(time.Millisecond * 100) + sdLogger := localManager.Logger("service discovery") + sd.Register(cfg, sdLogger) + }() } - log.Fatal(http.ListenAndServe(cfg.Common.Listen, mux)) + exitWait.Wait() + + logger.Info("stop graphite-clickhouse") } diff --git a/sd/nginx/nginx.go b/sd/nginx/nginx.go index aa841fe8a..4729d2298 100644 --- a/sd/nginx/nginx.go +++ b/sd/nginx/nginx.go @@ -14,12 +14,21 @@ import ( "go.uber.org/zap" ) -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - ErrNoKey = errors.New("list key no found") - ErrInvalidKey = errors.New("list key is invalid") +type ErrInvalidKey struct { + val string +} + +func (e ErrInvalidKey) Error() string { + if e.val == "" { + return "list key is invalid" + } + return "list key is invalid: '" + e.val + "'" +} - timeNow = time.Now +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + ErrNoKey = errors.New("list key no found") + timeNow = time.Now ) func splitNode(node string) (dc, host, listen string, ok bool) { @@ -106,7 +115,7 @@ func (sd *Nginx) List() (nodes []string, err error) { nodes = append(nodes, s) } } else { - return nil, ErrInvalidKey + return nil, ErrInvalidKey{s} } } else { return nil, ErrNoKey @@ -157,7 +166,7 @@ func (sd *Nginx) ListMap() (nodes map[string]string, err error) { } } } else { - return nil, ErrInvalidKey + return nil, ErrInvalidKey{s} } } else { return nil, ErrNoKey @@ -213,7 +222,7 @@ func (sd *Nginx) Nodes() (nodes []utils.KV, err error) { } nodes = append(nodes, kv) } else { - return nil, ErrInvalidKey + return nil, ErrInvalidKey{s} } } else { return nil, ErrNoKey diff --git a/sd/register.go b/sd/register.go index 0744c51e3..fea1cab85 100644 --- a/sd/register.go +++ b/sd/register.go @@ -1,7 +1,6 @@ package sd import ( - "context" "os" "strings" "time" @@ -14,8 +13,10 @@ import ( ) var ( - ctxMain, Stop = context.WithCancel(context.Background()) - delay = time.Second * 10 + // ctxMain, Stop = context.WithCancel(context.Background()) + stop chan struct{} = make(chan struct{}, 1) + delay = time.Second * 10 + hostname string ) type SD interface { @@ -34,7 +35,6 @@ func Register(cfg *config.Config, logger *zap.Logger) { listenIP string prevIP string registerFirst bool - hostname string sd SD err error load float64 @@ -59,11 +59,14 @@ func Register(cfg *config.Config, logger *zap.Logger) { if err == nil { load_avg.Store(load) } + + logger.Info("init sd", + zap.String("hostname", hostname), + ) + w = load_avg.Weight(cfg.Common.BaseWeight, load) sd.Update(listenIP, cfg.Common.Listen, cfg.Common.SDDc, w) sd.Clear(listenIP, cfg.Common.Listen) - - defer sd.Clear("", "") } LOOP: for { @@ -90,8 +93,25 @@ LOOP: select { case <-t: continue - case <-ctxMain.Done(): + case <-stop: break LOOP } } + + if sd != nil { + if err := sd.Clear("", ""); err == nil { + logger.Info("cleanup sd", + zap.String("hostname", hostname), + ) + } else { + logger.Warn("cleanup sd", + zap.String("hostname", hostname), + zap.Error(err), + ) + } + } +} + +func Stop() { + stop <- struct{}{} }