Skip to content

Commit

Permalink
Remove usages of global logger (#7914)
Browse files Browse the repository at this point in the history
* Remove usages of global logger

Removes uses of the the global logger, util/log/Logger, from library
code. Non-test uses are replaced with an injected version of the logger
and test uses are replaced with a test implementation of log.Logger that
acts as an adapter between go-kit logging and test output.

Signed-off-by: Nick Pillitteri <[email protected]>

* Code review

Signed-off-by: Nick Pillitteri <[email protected]>

---------

Signed-off-by: Nick Pillitteri <[email protected]>
  • Loading branch information
56quarters authored Apr 17, 2024
1 parent 91c0c36 commit a851e6c
Show file tree
Hide file tree
Showing 29 changed files with 97 additions and 171 deletions.
12 changes: 9 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -383,11 +383,17 @@ lint: check-makefiles

# Ensure packages that no longer use a global logger don't reintroduce it
faillint -paths "github.com/grafana/mimir/pkg/util/log.{Logger}" \
./pkg/alertmanager/alertstore/... \
./pkg/ingester/... \
./pkg/alertmanager/... \
./pkg/compactor/... \
./pkg/distributor/... \
./pkg/flusher/... \
./pkg/frontend/... \
./pkg/ingester/... \
./pkg/querier/... \
./pkg/ruler/...
./pkg/ruler/... \
./pkg/scheduler/... \
./pkg/storage/... \
./pkg/storegateway/...

# We've copied github.com/NYTimes/gziphandler to pkg/util/gziphandler
# at least until https://github.com/nytimes/gziphandler/pull/112 is merged
Expand Down
2 changes: 1 addition & 1 deletion cmd/mimir-continuous-test/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func main() {
registry.MustRegister(version.NewCollector("mimir_continuous_test"))
registry.MustRegister(collectors.NewGoCollector())

i := instrumentation.NewMetricsServer(serverMetricsPort, registry)
i := instrumentation.NewMetricsServer(serverMetricsPort, registry, util_log.Logger)
if err := i.Start(); err != nil {
level.Error(logger).Log("msg", "Unable to start instrumentation server", "err", err.Error())
util_log.Flush()
Expand Down
2 changes: 1 addition & 1 deletion cmd/query-tee/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func main() {
registry := prometheus.NewRegistry()
registry.MustRegister(collectors.NewGoCollector())

i := instrumentation.NewMetricsServer(cfg.ServerMetricsPort, registry)
i := instrumentation.NewMetricsServer(cfg.ServerMetricsPort, registry, util_log.Logger)
if err := i.Start(); err != nil {
level.Error(util_log.Logger).Log("msg", "Unable to start instrumentation server", "err", err.Error())
util_log.Flush()
Expand Down
30 changes: 8 additions & 22 deletions pkg/alertmanager/alertmanager_http.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,9 @@ import (
"net/http"
"text/template"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"

util_log "github.com/grafana/mimir/pkg/util/log"
)

var (
Expand All @@ -34,45 +33,32 @@ type statusPageContents struct {
State string
}

func writeRingStatusMessage(w http.ResponseWriter, message string) {
func writeRingStatusMessage(w http.ResponseWriter, message string, logger log.Logger) {
w.WriteHeader(http.StatusOK)
err := ringStatusPageTemplate.Execute(w, ringStatusPageContents{Message: message})
if err != nil {
level.Error(util_log.Logger).Log("msg", "unable to serve alertmanager ring page", "err", err)
level.Error(logger).Log("msg", "unable to serve alertmanager ring page", "err", err)
}
}

func (am *MultitenantAlertmanager) RingHandler(w http.ResponseWriter, req *http.Request) {
if am.State() != services.Running {
// we cannot read the ring before the alertmanager is in Running state,
// because that would lead to race condition.
writeRingStatusMessage(w, "Alertmanager is not running yet.")
writeRingStatusMessage(w, "Alertmanager is not running yet.", am.logger)
return
}

am.ring.ServeHTTP(w, req)
}

// GetStatusHandler returns the status handler for this multi-tenant
// alertmanager.
func (am *MultitenantAlertmanager) GetStatusHandler() StatusHandler {
return StatusHandler{
am: am,
}
}

// StatusHandler shows the status of the alertmanager.
type StatusHandler struct {
am *MultitenantAlertmanager
}

// ServeHTTP serves the status of the alertmanager.
func (s StatusHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
// StatusHandler serves the status of the alertmanager.
func (am *MultitenantAlertmanager) StatusHandler(w http.ResponseWriter, _ *http.Request) {
err := statusPageTemplate.Execute(w, statusPageContents{
State: s.am.State().String(),
State: am.State().String(),
})
if err != nil {
level.Error(util_log.Logger).Log("msg", "unable to serve alertmanager status page", "err", err)
level.Error(am.logger).Log("msg", "unable to serve alertmanager status page", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
7 changes: 4 additions & 3 deletions pkg/alertmanager/alertmanager_http_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,22 @@ import (
"net/http/httptest"
"testing"

"github.com/go-kit/log"
"github.com/prometheus/alertmanager/featurecontrol"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"

"github.com/grafana/mimir/pkg/util/test"
)

func TestMultitenantAlertmanager_GetStatusHandler(t *testing.T) {
store := prepareInMemoryAlertStore()
reg := prometheus.NewPedanticRegistry()
cfg := mockAlertmanagerConfig(t)
am := setupSingleMultitenantAlertmanager(t, cfg, store, nil, featurecontrol.NoopFlags{}, log.NewNopLogger(), reg)
am := setupSingleMultitenantAlertmanager(t, cfg, store, nil, featurecontrol.NoopFlags{}, test.NewTestingLogger(t), reg)

req := httptest.NewRequest("GET", "http://alertmanager.cortex/status", nil)
w := httptest.NewRecorder()
am.GetStatusHandler().ServeHTTP(w, req)
am.StatusHandler(w, req)

resp := w.Result()
require.Equal(t, 200, w.Code)
Expand Down
7 changes: 3 additions & 4 deletions pkg/alertmanager/alertmanager_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,9 @@
package alertmanager

import (
"github.com/go-kit/log"
dskit_metrics "github.com/grafana/dskit/metrics"
"github.com/prometheus/client_golang/prometheus"

util_log "github.com/grafana/mimir/pkg/util/log"
)

// This struct aggregates metrics exported by Alertmanager
Expand Down Expand Up @@ -81,9 +80,9 @@ type alertmanagerMetrics struct {
alertsLimiterAlertsSize *prometheus.Desc
}

func newAlertmanagerMetrics() *alertmanagerMetrics {
func newAlertmanagerMetrics(logger log.Logger) *alertmanagerMetrics {
return &alertmanagerMetrics{
regs: dskit_metrics.NewTenantRegistries(util_log.Logger),
regs: dskit_metrics.NewTenantRegistries(logger),
alertsReceived: prometheus.NewDesc(
"cortex_alertmanager_alerts_received_total",
"The total number of received alerts.",
Expand Down
6 changes: 4 additions & 2 deletions pkg/alertmanager/alertmanager_metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/require"

"github.com/grafana/mimir/pkg/util/test"
)

var integrations = []string{
Expand All @@ -33,7 +35,7 @@ var integrations = []string{
func TestAlertmanagerMetricsStore(t *testing.T) {
mainReg := prometheus.NewPedanticRegistry()

alertmanangerMetrics := newAlertmanagerMetrics()
alertmanangerMetrics := newAlertmanagerMetrics(test.NewTestingLogger(t))
mainReg.MustRegister(alertmanangerMetrics)
alertmanangerMetrics.addUserRegistry("user1", populateAlertmanager(1))
alertmanangerMetrics.addUserRegistry("user2", populateAlertmanager(10))
Expand Down Expand Up @@ -353,7 +355,7 @@ func TestAlertmanagerMetricsStore(t *testing.T) {
func TestAlertmanagerMetricsRemoval(t *testing.T) {
mainReg := prometheus.NewPedanticRegistry()

alertmanagerMetrics := newAlertmanagerMetrics()
alertmanagerMetrics := newAlertmanagerMetrics(test.NewTestingLogger(t))
mainReg.MustRegister(alertmanagerMetrics)
alertmanagerMetrics.addUserRegistry("user1", populateAlertmanager(1))
alertmanagerMetrics.addUserRegistry("user2", populateAlertmanager(10))
Expand Down
14 changes: 7 additions & 7 deletions pkg/alertmanager/api_grafana_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (

"github.com/grafana/mimir/pkg/alertmanager/alertspb"
"github.com/grafana/mimir/pkg/alertmanager/alertstore/bucketclient"
util_log "github.com/grafana/mimir/pkg/util/log"
"github.com/grafana/mimir/pkg/util/test"
)

const (
Expand Down Expand Up @@ -57,7 +57,7 @@ func TestMultitenantAlertmanager_DeleteUserGrafanaConfig(t *testing.T) {

am := &MultitenantAlertmanager{
store: alertstore,
logger: util_log.Logger,
logger: test.NewTestingLogger(t),
}

require.NoError(t, alertstore.SetGrafanaAlertConfig(context.Background(), alertspb.GrafanaAlertConfigDesc{
Expand Down Expand Up @@ -115,7 +115,7 @@ func TestMultitenantAlertmanager_DeleteUserGrafanaState(t *testing.T) {

am := &MultitenantAlertmanager{
store: alertstore,
logger: util_log.Logger,
logger: test.NewTestingLogger(t),
}

require.NoError(t, alertstore.SetFullGrafanaState(context.Background(), "test_user", alertspb.FullStateDesc{
Expand Down Expand Up @@ -177,7 +177,7 @@ func TestMultitenantAlertmanager_GetUserGrafanaConfig(t *testing.T) {

am := &MultitenantAlertmanager{
store: alertstore,
logger: util_log.Logger,
logger: test.NewTestingLogger(t),
}

require.NoError(t, alertstore.SetGrafanaAlertConfig(context.Background(), alertspb.GrafanaAlertConfigDesc{
Expand Down Expand Up @@ -229,7 +229,7 @@ func TestMultitenantAlertmanager_GetUserGrafanaState(t *testing.T) {

am := &MultitenantAlertmanager{
store: alertstore,
logger: util_log.Logger,
logger: test.NewTestingLogger(t),
}

require.NoError(t, alertstore.SetFullGrafanaState(context.Background(), "test_user", alertspb.FullStateDesc{
Expand Down Expand Up @@ -281,7 +281,7 @@ func TestMultitenantAlertmanager_SetUserGrafanaConfig(t *testing.T) {

am := &MultitenantAlertmanager{
store: alertstore,
logger: util_log.Logger,
logger: test.NewTestingLogger(t),
}

require.Len(t, storage.Objects(), 0)
Expand Down Expand Up @@ -350,7 +350,7 @@ func TestMultitenantAlertmanager_SetUserGrafanaState(t *testing.T) {

am := &MultitenantAlertmanager{
store: alertstore,
logger: util_log.Logger,
logger: test.NewTestingLogger(t),
}

require.Len(t, storage.Objects(), 0)
Expand Down
6 changes: 3 additions & 3 deletions pkg/alertmanager/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (

"github.com/grafana/mimir/pkg/alertmanager/alertspb"
"github.com/grafana/mimir/pkg/alertmanager/alertstore/bucketclient"
util_log "github.com/grafana/mimir/pkg/util/log"
"github.com/grafana/mimir/pkg/util/test"
)

func TestAMConfigValidationAPI(t *testing.T) {
Expand Down Expand Up @@ -882,7 +882,7 @@ alertmanager_config: |
limits := &mockAlertManagerLimits{}
am := &MultitenantAlertmanager{
store: prepareInMemoryAlertStore(),
logger: util_log.Logger,
logger: test.NewTestingLogger(t),
limits: limits,
}
for _, tc := range testCases {
Expand Down Expand Up @@ -917,7 +917,7 @@ func TestMultitenantAlertmanager_DeleteUserConfig(t *testing.T) {

am := &MultitenantAlertmanager{
store: alertStore,
logger: util_log.Logger,
logger: test.NewTestingLogger(t),
}

require.NoError(t, alertStore.SetAlertConfig(context.Background(), alertspb.AlertConfigDesc{
Expand Down
4 changes: 2 additions & 2 deletions pkg/alertmanager/distributor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import (
"google.golang.org/grpc/health/grpc_health_v1"

"github.com/grafana/mimir/pkg/alertmanager/alertmanagerpb"
util_log "github.com/grafana/mimir/pkg/util/log"
utiltest "github.com/grafana/mimir/pkg/util/test"
)

func TestDistributor_DistributeRequest(t *testing.T) {
Expand Down Expand Up @@ -314,7 +314,7 @@ func prepare(t *testing.T, numAM, numHappyAM, replicationFactor int, responseBod
cfg := &MultitenantAlertmanagerConfig{}
flagext.DefaultValues(cfg)

d, err := NewDistributor(cfg.AlertmanagerClient, cfg.MaxRecvMsgSize, amRing, newMockAlertmanagerClientFactory(amByAddr), util_log.Logger, prometheus.NewRegistry())
d, err := NewDistributor(cfg.AlertmanagerClient, cfg.MaxRecvMsgSize, amRing, newMockAlertmanagerClientFactory(amByAddr), utiltest.NewTestingLogger(t), prometheus.NewRegistry())
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), d))

Expand Down
2 changes: 1 addition & 1 deletion pkg/alertmanager/multitenant.go
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackC
fallbackConfig: string(fallbackConfig),
cfgs: map[string]alertspb.AlertConfigDesc{},
alertmanagers: map[string]*Alertmanager{},
alertmanagerMetrics: newAlertmanagerMetrics(),
alertmanagerMetrics: newAlertmanagerMetrics(logger),
multitenantMetrics: newMultitenantAlertmanagerMetrics(registerer),
store: store,
logger: log.With(logger, "component", "MultiTenantAlertmanager"),
Expand Down
2 changes: 1 addition & 1 deletion pkg/api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, api
})

// Ensure this route is registered before the prefixed AM route
a.RegisterRoute("/multitenant_alertmanager/status", am.GetStatusHandler(), false, true, "GET")
a.RegisterRoute("/multitenant_alertmanager/status", http.HandlerFunc(am.StatusHandler), false, true, "GET")
a.RegisterRoute("/multitenant_alertmanager/configs", http.HandlerFunc(am.ListAllConfigs), false, true, "GET")
a.RegisterRoute("/multitenant_alertmanager/ring", http.HandlerFunc(am.RingHandler), false, true, "GET", "POST")
a.RegisterRoute("/multitenant_alertmanager/delete_tenant_config", http.HandlerFunc(am.DeleteUserConfig), true, true, "POST")
Expand Down
2 changes: 1 addition & 1 deletion pkg/compactor/blocks_cleaner.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userID
level.Info(userLogger).Log("msg", "deleted blocks for tenant marked for deletion", "deletedBlocks", deletedBlocks)
}

mark, err := mimir_tsdb.ReadTenantDeletionMark(ctx, c.bucketClient, userID)
mark, err := mimir_tsdb.ReadTenantDeletionMark(ctx, c.bucketClient, userID, c.logger)
if err != nil {
return errors.Wrap(err, "failed to read tenant deletion mark")
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/compactor/compactor.go
Original file line number Diff line number Diff line change
Expand Up @@ -730,11 +730,11 @@ func (c *MultitenantCompactor) compactUserWithRetries(ctx context.Context, userI

func (c *MultitenantCompactor) compactUser(ctx context.Context, userID string) error {
userBucket := bucket.NewUserBucketClient(userID, c.bucketClient, c.cfgProvider)
reg := prometheus.NewRegistry()
defer c.syncerMetrics.gatherThanosSyncerMetrics(reg)

userLogger := util_log.WithUserID(userID, c.logger)

reg := prometheus.NewRegistry()
defer c.syncerMetrics.gatherThanosSyncerMetrics(reg, userLogger)

// Filters out duplicate blocks that can be formed from two or more overlapping
// blocks that fully submatch the source blocks of the older blocks.
deduplicateBlocksFilter := NewShardAwareDeduplicateFilter()
Expand Down
9 changes: 4 additions & 5 deletions pkg/compactor/compactor_http.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,9 @@ import (
"html/template"
"net/http"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"

util_log "github.com/grafana/mimir/pkg/util/log"
)

var (
Expand All @@ -26,20 +25,20 @@ type statusPageContents struct {
Message string
}

func writeMessage(w http.ResponseWriter, message string) {
func writeMessage(w http.ResponseWriter, message string, logger log.Logger) {
w.WriteHeader(http.StatusOK)
err := statusPageTemplate.Execute(w, statusPageContents{Message: message})

if err != nil {
level.Error(util_log.Logger).Log("msg", "unable to serve compactor ring page", "err", err)
level.Error(logger).Log("msg", "unable to serve compactor ring page", "err", err)
}
}

func (c *MultitenantCompactor) RingHandler(w http.ResponseWriter, req *http.Request) {
if c.State() != services.Running {
// we cannot read the ring before MultitenantCompactor is in Running state,
// because that would lead to race condition.
writeMessage(w, "Compactor is not running yet.")
writeMessage(w, "Compactor is not running yet.", c.logger)
return
}

Expand Down
Loading

0 comments on commit a851e6c

Please sign in to comment.